1 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
2 /* Copyright 2014-2016 Freescale Semiconductor Inc.
3 * Copyright 2016-2022 NXP
6 #include <linux/net_tstamp.h>
7 #include <linux/nospec.h>
9 #include "dpni.h" /* DPNI_LINK_OPT_* */
10 #include "dpaa2-eth.h"
12 /* To be kept in sync with DPNI statistics */
13 static char dpaa2_ethtool_stats[][ETH_GSTRING_LEN] = {
16 "[hw] rx mcast frames",
17 "[hw] rx mcast bytes",
18 "[hw] rx bcast frames",
19 "[hw] rx bcast bytes",
22 "[hw] tx mcast frames",
23 "[hw] tx mcast bytes",
24 "[hw] tx bcast frames",
25 "[hw] tx bcast bytes",
26 "[hw] rx filtered frames",
27 "[hw] rx discarded frames",
28 "[hw] rx nobuffer discards",
29 "[hw] tx discarded frames",
30 "[hw] tx confirmed frames",
31 "[hw] tx dequeued bytes",
32 "[hw] tx dequeued frames",
33 "[hw] tx rejected bytes",
34 "[hw] tx rejected frames",
35 "[hw] tx pending frames",
38 #define DPAA2_ETH_NUM_STATS ARRAY_SIZE(dpaa2_ethtool_stats)
40 static char dpaa2_ethtool_extras[][ETH_GSTRING_LEN] = {
42 "[drv] tx conf frames",
43 "[drv] tx conf bytes",
46 "[drv] tx tso frames",
50 "[drv] tx converted sg frames",
51 "[drv] tx converted sg bytes",
52 "[drv] enqueue portal busy",
54 "[drv] dequeue portal busy",
55 "[drv] channel pull errors",
59 "[drv] xdp tx errors",
62 "[qbman] rx pending frames",
63 "[qbman] rx pending bytes",
64 "[qbman] tx conf pending frames",
65 "[qbman] tx conf pending bytes",
66 "[qbman] buffer count",
69 #define DPAA2_ETH_NUM_EXTRA_STATS ARRAY_SIZE(dpaa2_ethtool_extras)
71 static void dpaa2_eth_get_drvinfo(struct net_device *net_dev,
72 struct ethtool_drvinfo *drvinfo)
74 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
76 strscpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
78 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
79 "%u.%u", priv->dpni_ver_major, priv->dpni_ver_minor);
81 strscpy(drvinfo->bus_info, dev_name(net_dev->dev.parent->parent),
82 sizeof(drvinfo->bus_info));
85 static int dpaa2_eth_nway_reset(struct net_device *net_dev)
87 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
88 int err = -EOPNOTSUPP;
90 mutex_lock(&priv->mac_lock);
92 if (dpaa2_eth_is_type_phy(priv))
93 err = phylink_ethtool_nway_reset(priv->mac->phylink);
95 mutex_unlock(&priv->mac_lock);
101 dpaa2_eth_get_link_ksettings(struct net_device *net_dev,
102 struct ethtool_link_ksettings *link_settings)
104 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
107 mutex_lock(&priv->mac_lock);
109 if (dpaa2_eth_is_type_phy(priv)) {
110 err = phylink_ethtool_ksettings_get(priv->mac->phylink,
112 mutex_unlock(&priv->mac_lock);
116 mutex_unlock(&priv->mac_lock);
118 link_settings->base.autoneg = AUTONEG_DISABLE;
119 if (!(priv->link_state.options & DPNI_LINK_OPT_HALF_DUPLEX))
120 link_settings->base.duplex = DUPLEX_FULL;
121 link_settings->base.speed = priv->link_state.rate;
127 dpaa2_eth_set_link_ksettings(struct net_device *net_dev,
128 const struct ethtool_link_ksettings *link_settings)
130 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
131 int err = -EOPNOTSUPP;
133 mutex_lock(&priv->mac_lock);
135 if (dpaa2_eth_is_type_phy(priv))
136 err = phylink_ethtool_ksettings_set(priv->mac->phylink,
139 mutex_unlock(&priv->mac_lock);
144 static void dpaa2_eth_get_pauseparam(struct net_device *net_dev,
145 struct ethtool_pauseparam *pause)
147 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
148 u64 link_options = priv->link_state.options;
150 mutex_lock(&priv->mac_lock);
152 if (dpaa2_eth_is_type_phy(priv)) {
153 phylink_ethtool_get_pauseparam(priv->mac->phylink, pause);
154 mutex_unlock(&priv->mac_lock);
158 mutex_unlock(&priv->mac_lock);
160 pause->rx_pause = dpaa2_eth_rx_pause_enabled(link_options);
161 pause->tx_pause = dpaa2_eth_tx_pause_enabled(link_options);
162 pause->autoneg = AUTONEG_DISABLE;
165 static int dpaa2_eth_set_pauseparam(struct net_device *net_dev,
166 struct ethtool_pauseparam *pause)
168 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
169 struct dpni_link_cfg cfg = {0};
172 if (!dpaa2_eth_has_pause_support(priv)) {
173 netdev_info(net_dev, "No pause frame support for DPNI version < %d.%d\n",
174 DPNI_PAUSE_VER_MAJOR, DPNI_PAUSE_VER_MINOR);
178 mutex_lock(&priv->mac_lock);
180 if (dpaa2_eth_is_type_phy(priv)) {
181 err = phylink_ethtool_set_pauseparam(priv->mac->phylink,
183 mutex_unlock(&priv->mac_lock);
187 mutex_unlock(&priv->mac_lock);
192 cfg.rate = priv->link_state.rate;
193 cfg.options = priv->link_state.options;
195 cfg.options |= DPNI_LINK_OPT_PAUSE;
197 cfg.options &= ~DPNI_LINK_OPT_PAUSE;
198 if (!!pause->rx_pause ^ !!pause->tx_pause)
199 cfg.options |= DPNI_LINK_OPT_ASYM_PAUSE;
201 cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE;
203 if (cfg.options == priv->link_state.options)
206 err = dpni_set_link_cfg(priv->mc_io, 0, priv->mc_token, &cfg);
208 netdev_err(net_dev, "dpni_set_link_state failed\n");
212 priv->link_state.options = cfg.options;
217 static void dpaa2_eth_get_strings(struct net_device *netdev, u32 stringset,
225 for (i = 0; i < DPAA2_ETH_NUM_STATS; i++) {
226 strscpy(p, dpaa2_ethtool_stats[i], ETH_GSTRING_LEN);
227 p += ETH_GSTRING_LEN;
229 for (i = 0; i < DPAA2_ETH_NUM_EXTRA_STATS; i++) {
230 strscpy(p, dpaa2_ethtool_extras[i], ETH_GSTRING_LEN);
231 p += ETH_GSTRING_LEN;
233 dpaa2_mac_get_strings(p);
238 static int dpaa2_eth_get_sset_count(struct net_device *net_dev, int sset)
241 case ETH_SS_STATS: /* ethtool_get_stats(), ethtool_get_drvinfo() */
242 return DPAA2_ETH_NUM_STATS + DPAA2_ETH_NUM_EXTRA_STATS +
243 dpaa2_mac_get_sset_count();
249 /** Fill in hardware counters, as returned by MC.
251 static void dpaa2_eth_get_ethtool_stats(struct net_device *net_dev,
252 struct ethtool_stats *stats,
255 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
256 union dpni_statistics dpni_stats;
257 int dpni_stats_page_size[DPNI_STATISTICS_CNT] = {
258 sizeof(dpni_stats.page_0),
259 sizeof(dpni_stats.page_1),
260 sizeof(dpni_stats.page_2),
261 sizeof(dpni_stats.page_3),
262 sizeof(dpni_stats.page_4),
263 sizeof(dpni_stats.page_5),
264 sizeof(dpni_stats.page_6),
266 u32 fcnt_rx_total = 0, fcnt_tx_total = 0;
267 u32 bcnt_rx_total = 0, bcnt_tx_total = 0;
268 struct dpaa2_eth_ch_stats *ch_stats;
269 struct dpaa2_eth_drv_stats *extras;
270 u32 buf_cnt, buf_cnt_total = 0;
271 int j, k, err, num_cnt, i = 0;
275 sizeof(u64) * (DPAA2_ETH_NUM_STATS + DPAA2_ETH_NUM_EXTRA_STATS));
277 /* Print standard counters, from DPNI statistics */
278 for (j = 0; j <= 6; j++) {
279 /* We're not interested in pages 4 & 5 for now */
280 if (j == 4 || j == 5)
282 err = dpni_get_statistics(priv->mc_io, 0, priv->mc_token,
285 /* Older firmware versions don't support all pages */
286 memset(&dpni_stats, 0, sizeof(dpni_stats));
288 netdev_warn(net_dev, "dpni_get_stats(%d) failed\n", j);
290 num_cnt = dpni_stats_page_size[j] / sizeof(u64);
291 for (k = 0; k < num_cnt; k++)
292 *(data + i++) = dpni_stats.raw.counter[k];
295 /* Print per-cpu extra stats */
296 for_each_online_cpu(k) {
297 extras = per_cpu_ptr(priv->percpu_extras, k);
298 for (j = 0; j < sizeof(*extras) / sizeof(__u64); j++)
299 *((__u64 *)data + i + j) += *((__u64 *)extras + j);
303 /* Per-channel stats */
304 for (k = 0; k < priv->num_channels; k++) {
305 ch_stats = &priv->channel[k]->stats;
306 for (j = 0; j < DPAA2_ETH_CH_STATS; j++)
307 *((__u64 *)data + i + j) += *((__u64 *)ch_stats + j);
311 for (j = 0; j < priv->num_fqs; j++) {
312 /* Print FQ instantaneous counts */
313 err = dpaa2_io_query_fq_count(NULL, priv->fq[j].fqid,
316 netdev_warn(net_dev, "FQ query error %d", err);
320 if (priv->fq[j].type == DPAA2_TX_CONF_FQ) {
321 fcnt_tx_total += fcnt;
322 bcnt_tx_total += bcnt;
324 fcnt_rx_total += fcnt;
325 bcnt_rx_total += bcnt;
329 *(data + i++) = fcnt_rx_total;
330 *(data + i++) = bcnt_rx_total;
331 *(data + i++) = fcnt_tx_total;
332 *(data + i++) = bcnt_tx_total;
334 for (j = 0; j < priv->num_bps; j++) {
335 err = dpaa2_io_query_bp_count(NULL, priv->bp[j]->bpid, &buf_cnt);
337 netdev_warn(net_dev, "Buffer count query error %d\n", err);
340 buf_cnt_total += buf_cnt;
342 *(data + i++) = buf_cnt_total;
344 mutex_lock(&priv->mac_lock);
346 if (dpaa2_eth_has_mac(priv))
347 dpaa2_mac_get_ethtool_stats(priv->mac, data + i);
349 mutex_unlock(&priv->mac_lock);
352 static int dpaa2_eth_prep_eth_rule(struct ethhdr *eth_value, struct ethhdr *eth_mask,
353 void *key, void *mask, u64 *fields)
357 if (eth_mask->h_proto) {
358 off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_TYPE);
359 *(__be16 *)(key + off) = eth_value->h_proto;
360 *(__be16 *)(mask + off) = eth_mask->h_proto;
361 *fields |= DPAA2_ETH_DIST_ETHTYPE;
364 if (!is_zero_ether_addr(eth_mask->h_source)) {
365 off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_SA);
366 ether_addr_copy(key + off, eth_value->h_source);
367 ether_addr_copy(mask + off, eth_mask->h_source);
368 *fields |= DPAA2_ETH_DIST_ETHSRC;
371 if (!is_zero_ether_addr(eth_mask->h_dest)) {
372 off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_DA);
373 ether_addr_copy(key + off, eth_value->h_dest);
374 ether_addr_copy(mask + off, eth_mask->h_dest);
375 *fields |= DPAA2_ETH_DIST_ETHDST;
381 static int dpaa2_eth_prep_uip_rule(struct ethtool_usrip4_spec *uip_value,
382 struct ethtool_usrip4_spec *uip_mask,
383 void *key, void *mask, u64 *fields)
386 u32 tmp_value, tmp_mask;
388 if (uip_mask->tos || uip_mask->ip_ver)
391 if (uip_mask->ip4src) {
392 off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_SRC);
393 *(__be32 *)(key + off) = uip_value->ip4src;
394 *(__be32 *)(mask + off) = uip_mask->ip4src;
395 *fields |= DPAA2_ETH_DIST_IPSRC;
398 if (uip_mask->ip4dst) {
399 off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_DST);
400 *(__be32 *)(key + off) = uip_value->ip4dst;
401 *(__be32 *)(mask + off) = uip_mask->ip4dst;
402 *fields |= DPAA2_ETH_DIST_IPDST;
405 if (uip_mask->proto) {
406 off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_PROTO);
407 *(u8 *)(key + off) = uip_value->proto;
408 *(u8 *)(mask + off) = uip_mask->proto;
409 *fields |= DPAA2_ETH_DIST_IPPROTO;
412 if (uip_mask->l4_4_bytes) {
413 tmp_value = be32_to_cpu(uip_value->l4_4_bytes);
414 tmp_mask = be32_to_cpu(uip_mask->l4_4_bytes);
416 off = dpaa2_eth_cls_fld_off(NET_PROT_UDP, NH_FLD_UDP_PORT_SRC);
417 *(__be16 *)(key + off) = htons(tmp_value >> 16);
418 *(__be16 *)(mask + off) = htons(tmp_mask >> 16);
419 *fields |= DPAA2_ETH_DIST_L4SRC;
421 off = dpaa2_eth_cls_fld_off(NET_PROT_UDP, NH_FLD_UDP_PORT_DST);
422 *(__be16 *)(key + off) = htons(tmp_value & 0xFFFF);
423 *(__be16 *)(mask + off) = htons(tmp_mask & 0xFFFF);
424 *fields |= DPAA2_ETH_DIST_L4DST;
427 /* Only apply the rule for IPv4 frames */
428 off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_TYPE);
429 *(__be16 *)(key + off) = htons(ETH_P_IP);
430 *(__be16 *)(mask + off) = htons(0xFFFF);
431 *fields |= DPAA2_ETH_DIST_ETHTYPE;
436 static int dpaa2_eth_prep_l4_rule(struct ethtool_tcpip4_spec *l4_value,
437 struct ethtool_tcpip4_spec *l4_mask,
438 void *key, void *mask, u8 l4_proto, u64 *fields)
445 if (l4_mask->ip4src) {
446 off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_SRC);
447 *(__be32 *)(key + off) = l4_value->ip4src;
448 *(__be32 *)(mask + off) = l4_mask->ip4src;
449 *fields |= DPAA2_ETH_DIST_IPSRC;
452 if (l4_mask->ip4dst) {
453 off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_DST);
454 *(__be32 *)(key + off) = l4_value->ip4dst;
455 *(__be32 *)(mask + off) = l4_mask->ip4dst;
456 *fields |= DPAA2_ETH_DIST_IPDST;
460 off = dpaa2_eth_cls_fld_off(NET_PROT_UDP, NH_FLD_UDP_PORT_SRC);
461 *(__be16 *)(key + off) = l4_value->psrc;
462 *(__be16 *)(mask + off) = l4_mask->psrc;
463 *fields |= DPAA2_ETH_DIST_L4SRC;
467 off = dpaa2_eth_cls_fld_off(NET_PROT_UDP, NH_FLD_UDP_PORT_DST);
468 *(__be16 *)(key + off) = l4_value->pdst;
469 *(__be16 *)(mask + off) = l4_mask->pdst;
470 *fields |= DPAA2_ETH_DIST_L4DST;
473 /* Only apply the rule for IPv4 frames with the specified L4 proto */
474 off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_TYPE);
475 *(__be16 *)(key + off) = htons(ETH_P_IP);
476 *(__be16 *)(mask + off) = htons(0xFFFF);
477 *fields |= DPAA2_ETH_DIST_ETHTYPE;
479 off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_PROTO);
480 *(u8 *)(key + off) = l4_proto;
481 *(u8 *)(mask + off) = 0xFF;
482 *fields |= DPAA2_ETH_DIST_IPPROTO;
487 static int dpaa2_eth_prep_ext_rule(struct ethtool_flow_ext *ext_value,
488 struct ethtool_flow_ext *ext_mask,
489 void *key, void *mask, u64 *fields)
493 if (ext_mask->vlan_etype)
496 if (ext_mask->vlan_tci) {
497 off = dpaa2_eth_cls_fld_off(NET_PROT_VLAN, NH_FLD_VLAN_TCI);
498 *(__be16 *)(key + off) = ext_value->vlan_tci;
499 *(__be16 *)(mask + off) = ext_mask->vlan_tci;
500 *fields |= DPAA2_ETH_DIST_VLAN;
506 static int dpaa2_eth_prep_mac_ext_rule(struct ethtool_flow_ext *ext_value,
507 struct ethtool_flow_ext *ext_mask,
508 void *key, void *mask, u64 *fields)
512 if (!is_zero_ether_addr(ext_mask->h_dest)) {
513 off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_DA);
514 ether_addr_copy(key + off, ext_value->h_dest);
515 ether_addr_copy(mask + off, ext_mask->h_dest);
516 *fields |= DPAA2_ETH_DIST_ETHDST;
522 static int dpaa2_eth_prep_cls_rule(struct ethtool_rx_flow_spec *fs, void *key,
523 void *mask, u64 *fields)
527 switch (fs->flow_type & 0xFF) {
529 err = dpaa2_eth_prep_eth_rule(&fs->h_u.ether_spec, &fs->m_u.ether_spec,
533 err = dpaa2_eth_prep_uip_rule(&fs->h_u.usr_ip4_spec,
534 &fs->m_u.usr_ip4_spec, key, mask, fields);
537 err = dpaa2_eth_prep_l4_rule(&fs->h_u.tcp_ip4_spec, &fs->m_u.tcp_ip4_spec,
538 key, mask, IPPROTO_TCP, fields);
541 err = dpaa2_eth_prep_l4_rule(&fs->h_u.udp_ip4_spec, &fs->m_u.udp_ip4_spec,
542 key, mask, IPPROTO_UDP, fields);
545 err = dpaa2_eth_prep_l4_rule(&fs->h_u.sctp_ip4_spec,
546 &fs->m_u.sctp_ip4_spec, key, mask,
547 IPPROTO_SCTP, fields);
556 if (fs->flow_type & FLOW_EXT) {
557 err = dpaa2_eth_prep_ext_rule(&fs->h_ext, &fs->m_ext, key, mask, fields);
562 if (fs->flow_type & FLOW_MAC_EXT) {
563 err = dpaa2_eth_prep_mac_ext_rule(&fs->h_ext, &fs->m_ext, key,
572 static int dpaa2_eth_do_cls_rule(struct net_device *net_dev,
573 struct ethtool_rx_flow_spec *fs,
576 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
577 struct device *dev = net_dev->dev.parent;
578 struct dpni_rule_cfg rule_cfg = { 0 };
579 struct dpni_fs_action_cfg fs_act = { 0 };
585 if (fs->ring_cookie != RX_CLS_FLOW_DISC &&
586 fs->ring_cookie >= dpaa2_eth_queue_count(priv))
589 rule_cfg.key_size = dpaa2_eth_cls_key_size(DPAA2_ETH_DIST_ALL);
591 /* allocate twice the key size, for the actual key and for mask */
592 key_buf = kzalloc(rule_cfg.key_size * 2, GFP_KERNEL);
596 /* Fill the key and mask memory areas */
597 err = dpaa2_eth_prep_cls_rule(fs, key_buf, key_buf + rule_cfg.key_size, &fields);
601 if (!dpaa2_eth_fs_mask_enabled(priv)) {
602 /* Masking allows us to configure a maximal key during init and
603 * use it for all flow steering rules. Without it, we include
604 * in the key only the fields actually used, so we need to
605 * extract the others from the final key buffer.
607 * Program the FS key if needed, or return error if previously
608 * set key can't be used for the current rule. User needs to
609 * delete existing rules in this case to allow for the new one.
611 if (!priv->rx_cls_fields) {
612 err = dpaa2_eth_set_cls(net_dev, fields);
616 priv->rx_cls_fields = fields;
617 } else if (priv->rx_cls_fields != fields) {
618 netdev_err(net_dev, "No support for multiple FS keys, need to delete existing rules\n");
623 dpaa2_eth_cls_trim_rule(key_buf, fields);
624 rule_cfg.key_size = dpaa2_eth_cls_key_size(fields);
627 key_iova = dma_map_single(dev, key_buf, rule_cfg.key_size * 2,
629 if (dma_mapping_error(dev, key_iova)) {
634 rule_cfg.key_iova = key_iova;
635 if (dpaa2_eth_fs_mask_enabled(priv))
636 rule_cfg.mask_iova = key_iova + rule_cfg.key_size;
639 if (fs->ring_cookie == RX_CLS_FLOW_DISC)
640 fs_act.options |= DPNI_FS_OPT_DISCARD;
642 fs_act.flow_id = fs->ring_cookie;
644 for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
646 err = dpni_add_fs_entry(priv->mc_io, 0, priv->mc_token,
647 i, fs->location, &rule_cfg,
650 err = dpni_remove_fs_entry(priv->mc_io, 0,
653 if (err || priv->dpni_attrs.options & DPNI_OPT_SHARED_FS)
657 dma_unmap_single(dev, key_iova, rule_cfg.key_size * 2, DMA_TO_DEVICE);
665 static int dpaa2_eth_num_cls_rules(struct dpaa2_eth_priv *priv)
669 for (i = 0; i < dpaa2_eth_fs_count(priv); i++)
670 if (priv->cls_rules[i].in_use)
676 static int dpaa2_eth_update_cls_rule(struct net_device *net_dev,
677 struct ethtool_rx_flow_spec *new_fs,
678 unsigned int location)
680 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
681 struct dpaa2_eth_cls_rule *rule;
684 if (!priv->rx_cls_enabled)
687 if (location >= dpaa2_eth_fs_count(priv))
690 rule = &priv->cls_rules[location];
692 /* If a rule is present at the specified location, delete it. */
694 err = dpaa2_eth_do_cls_rule(net_dev, &rule->fs, false);
700 if (!dpaa2_eth_fs_mask_enabled(priv) &&
701 !dpaa2_eth_num_cls_rules(priv))
702 priv->rx_cls_fields = 0;
705 /* If no new entry to add, return here */
709 err = dpaa2_eth_do_cls_rule(net_dev, new_fs, true);
719 static int dpaa2_eth_get_rxnfc(struct net_device *net_dev,
720 struct ethtool_rxnfc *rxnfc, u32 *rule_locs)
722 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
723 int max_rules = dpaa2_eth_fs_count(priv);
726 switch (rxnfc->cmd) {
728 /* we purposely ignore cmd->flow_type for now, because the
729 * classifier only supports a single set of fields for all
732 rxnfc->data = priv->rx_hash_fields;
734 case ETHTOOL_GRXRINGS:
735 rxnfc->data = dpaa2_eth_queue_count(priv);
737 case ETHTOOL_GRXCLSRLCNT:
739 rxnfc->rule_cnt = dpaa2_eth_num_cls_rules(priv);
740 rxnfc->data = max_rules;
742 case ETHTOOL_GRXCLSRULE:
743 if (rxnfc->fs.location >= max_rules)
745 rxnfc->fs.location = array_index_nospec(rxnfc->fs.location,
747 if (!priv->cls_rules[rxnfc->fs.location].in_use)
749 rxnfc->fs = priv->cls_rules[rxnfc->fs.location].fs;
751 case ETHTOOL_GRXCLSRLALL:
752 for (i = 0; i < max_rules; i++) {
753 if (!priv->cls_rules[i].in_use)
755 if (j == rxnfc->rule_cnt)
760 rxnfc->data = max_rules;
769 static int dpaa2_eth_set_rxnfc(struct net_device *net_dev,
770 struct ethtool_rxnfc *rxnfc)
774 switch (rxnfc->cmd) {
776 if ((rxnfc->data & DPAA2_RXH_SUPPORTED) != rxnfc->data)
778 err = dpaa2_eth_set_hash(net_dev, rxnfc->data);
780 case ETHTOOL_SRXCLSRLINS:
781 err = dpaa2_eth_update_cls_rule(net_dev, &rxnfc->fs, rxnfc->fs.location);
783 case ETHTOOL_SRXCLSRLDEL:
784 err = dpaa2_eth_update_cls_rule(net_dev, NULL, rxnfc->fs.location);
793 int dpaa2_phc_index = -1;
794 EXPORT_SYMBOL(dpaa2_phc_index);
796 static int dpaa2_eth_get_ts_info(struct net_device *dev,
797 struct ethtool_ts_info *info)
800 return ethtool_op_get_ts_info(dev, info);
802 info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
803 SOF_TIMESTAMPING_RX_HARDWARE |
804 SOF_TIMESTAMPING_RAW_HARDWARE;
806 info->phc_index = dpaa2_phc_index;
808 info->tx_types = (1 << HWTSTAMP_TX_OFF) |
809 (1 << HWTSTAMP_TX_ON) |
810 (1 << HWTSTAMP_TX_ONESTEP_SYNC);
812 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
813 (1 << HWTSTAMP_FILTER_ALL);
817 static int dpaa2_eth_get_tunable(struct net_device *net_dev,
818 const struct ethtool_tunable *tuna,
821 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
825 case ETHTOOL_RX_COPYBREAK:
826 *(u32 *)data = priv->rx_copybreak;
836 static int dpaa2_eth_set_tunable(struct net_device *net_dev,
837 const struct ethtool_tunable *tuna,
840 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
844 case ETHTOOL_RX_COPYBREAK:
845 priv->rx_copybreak = *(u32 *)data;
855 static int dpaa2_eth_get_coalesce(struct net_device *dev,
856 struct ethtool_coalesce *ic,
857 struct kernel_ethtool_coalesce *kernel_coal,
858 struct netlink_ext_ack *extack)
860 struct dpaa2_eth_priv *priv = netdev_priv(dev);
861 struct dpaa2_io *dpio = priv->channel[0]->dpio;
863 dpaa2_io_get_irq_coalescing(dpio, &ic->rx_coalesce_usecs);
864 ic->use_adaptive_rx_coalesce = dpaa2_io_get_adaptive_coalescing(dpio);
869 static int dpaa2_eth_set_coalesce(struct net_device *dev,
870 struct ethtool_coalesce *ic,
871 struct kernel_ethtool_coalesce *kernel_coal,
872 struct netlink_ext_ack *extack)
874 struct dpaa2_eth_priv *priv = netdev_priv(dev);
875 struct dpaa2_io *dpio;
880 /* Keep track of the previous value, just in case we fail */
881 dpio = priv->channel[0]->dpio;
882 dpaa2_io_get_irq_coalescing(dpio, &prev_rx_usecs);
883 prev_adaptive = dpaa2_io_get_adaptive_coalescing(dpio);
885 /* Setup new value for rx coalescing */
886 for (i = 0; i < priv->num_channels; i++) {
887 dpio = priv->channel[i]->dpio;
889 dpaa2_io_set_adaptive_coalescing(dpio,
890 ic->use_adaptive_rx_coalesce);
891 err = dpaa2_io_set_irq_coalescing(dpio, ic->rx_coalesce_usecs);
893 goto restore_rx_usecs;
899 for (j = 0; j < i; j++) {
900 dpio = priv->channel[j]->dpio;
902 dpaa2_io_set_irq_coalescing(dpio, prev_rx_usecs);
903 dpaa2_io_set_adaptive_coalescing(dpio, prev_adaptive);
909 static void dpaa2_eth_get_channels(struct net_device *net_dev,
910 struct ethtool_channels *channels)
912 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
913 int queue_count = dpaa2_eth_queue_count(priv);
915 channels->max_rx = queue_count;
916 channels->max_tx = queue_count;
917 channels->rx_count = queue_count;
918 channels->tx_count = queue_count;
920 /* Tx confirmation and Rx error */
921 channels->max_other = queue_count + 1;
922 channels->max_combined = channels->max_rx +
925 /* Tx conf and Rx err */
926 channels->other_count = queue_count + 1;
927 channels->combined_count = channels->rx_count +
929 channels->other_count;
932 const struct ethtool_ops dpaa2_ethtool_ops = {
933 .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS |
934 ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
935 .get_drvinfo = dpaa2_eth_get_drvinfo,
936 .nway_reset = dpaa2_eth_nway_reset,
937 .get_link = ethtool_op_get_link,
938 .get_link_ksettings = dpaa2_eth_get_link_ksettings,
939 .set_link_ksettings = dpaa2_eth_set_link_ksettings,
940 .get_pauseparam = dpaa2_eth_get_pauseparam,
941 .set_pauseparam = dpaa2_eth_set_pauseparam,
942 .get_sset_count = dpaa2_eth_get_sset_count,
943 .get_ethtool_stats = dpaa2_eth_get_ethtool_stats,
944 .get_strings = dpaa2_eth_get_strings,
945 .get_rxnfc = dpaa2_eth_get_rxnfc,
946 .set_rxnfc = dpaa2_eth_set_rxnfc,
947 .get_ts_info = dpaa2_eth_get_ts_info,
948 .get_tunable = dpaa2_eth_get_tunable,
949 .set_tunable = dpaa2_eth_set_tunable,
950 .get_coalesce = dpaa2_eth_get_coalesce,
951 .set_coalesce = dpaa2_eth_set_coalesce,
952 .get_channels = dpaa2_eth_get_channels,