1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018, Intel Corporation. */
4 /* ethtool support for ice */
9 char stat_string[ETH_GSTRING_LEN];
14 #define ICE_STAT(_type, _name, _stat) { \
15 .stat_string = _name, \
16 .sizeof_stat = FIELD_SIZEOF(_type, _stat), \
17 .stat_offset = offsetof(_type, _stat) \
20 #define ICE_VSI_STAT(_name, _stat) \
21 ICE_STAT(struct ice_vsi, _name, _stat)
22 #define ICE_PF_STAT(_name, _stat) \
23 ICE_STAT(struct ice_pf, _name, _stat)
25 static int ice_q_stats_len(struct net_device *netdev)
27 struct ice_netdev_priv *np = netdev_priv(netdev);
29 return ((np->vsi->alloc_txq + np->vsi->alloc_rxq) *
30 (sizeof(struct ice_q_stats) / sizeof(u64)));
33 #define ICE_PF_STATS_LEN ARRAY_SIZE(ice_gstrings_pf_stats)
34 #define ICE_VSI_STATS_LEN ARRAY_SIZE(ice_gstrings_vsi_stats)
36 #define ICE_ALL_STATS_LEN(n) (ICE_PF_STATS_LEN + ICE_VSI_STATS_LEN + \
39 static const struct ice_stats ice_gstrings_vsi_stats[] = {
40 ICE_VSI_STAT("tx_unicast", eth_stats.tx_unicast),
41 ICE_VSI_STAT("rx_unicast", eth_stats.rx_unicast),
42 ICE_VSI_STAT("tx_multicast", eth_stats.tx_multicast),
43 ICE_VSI_STAT("rx_multicast", eth_stats.rx_multicast),
44 ICE_VSI_STAT("tx_broadcast", eth_stats.tx_broadcast),
45 ICE_VSI_STAT("rx_broadcast", eth_stats.rx_broadcast),
46 ICE_VSI_STAT("tx_bytes", eth_stats.tx_bytes),
47 ICE_VSI_STAT("rx_bytes", eth_stats.rx_bytes),
48 ICE_VSI_STAT("rx_discards", eth_stats.rx_discards),
49 ICE_VSI_STAT("tx_errors", eth_stats.tx_errors),
50 ICE_VSI_STAT("tx_linearize", tx_linearize),
51 ICE_VSI_STAT("rx_unknown_protocol", eth_stats.rx_unknown_protocol),
52 ICE_VSI_STAT("rx_alloc_fail", rx_buf_failed),
53 ICE_VSI_STAT("rx_pg_alloc_fail", rx_page_failed),
56 /* These PF_STATs might look like duplicates of some NETDEV_STATs,
57 * but they aren't. This device is capable of supporting multiple
58 * VSIs/netdevs on a single PF. The NETDEV_STATs are for individual
59 * netdevs whereas the PF_STATs are for the physical function that's
60 * hosting these netdevs.
62 * The PF_STATs are appended to the netdev stats only when ethtool -S
63 * is queried on the base PF netdev.
65 static struct ice_stats ice_gstrings_pf_stats[] = {
66 ICE_PF_STAT("tx_bytes", stats.eth.tx_bytes),
67 ICE_PF_STAT("rx_bytes", stats.eth.rx_bytes),
68 ICE_PF_STAT("tx_unicast", stats.eth.tx_unicast),
69 ICE_PF_STAT("rx_unicast", stats.eth.rx_unicast),
70 ICE_PF_STAT("tx_multicast", stats.eth.tx_multicast),
71 ICE_PF_STAT("rx_multicast", stats.eth.rx_multicast),
72 ICE_PF_STAT("tx_broadcast", stats.eth.tx_broadcast),
73 ICE_PF_STAT("rx_broadcast", stats.eth.rx_broadcast),
74 ICE_PF_STAT("tx_errors", stats.eth.tx_errors),
75 ICE_PF_STAT("tx_size_64", stats.tx_size_64),
76 ICE_PF_STAT("rx_size_64", stats.rx_size_64),
77 ICE_PF_STAT("tx_size_127", stats.tx_size_127),
78 ICE_PF_STAT("rx_size_127", stats.rx_size_127),
79 ICE_PF_STAT("tx_size_255", stats.tx_size_255),
80 ICE_PF_STAT("rx_size_255", stats.rx_size_255),
81 ICE_PF_STAT("tx_size_511", stats.tx_size_511),
82 ICE_PF_STAT("rx_size_511", stats.rx_size_511),
83 ICE_PF_STAT("tx_size_1023", stats.tx_size_1023),
84 ICE_PF_STAT("rx_size_1023", stats.rx_size_1023),
85 ICE_PF_STAT("tx_size_1522", stats.tx_size_1522),
86 ICE_PF_STAT("rx_size_1522", stats.rx_size_1522),
87 ICE_PF_STAT("tx_size_big", stats.tx_size_big),
88 ICE_PF_STAT("rx_size_big", stats.rx_size_big),
89 ICE_PF_STAT("link_xon_tx", stats.link_xon_tx),
90 ICE_PF_STAT("link_xon_rx", stats.link_xon_rx),
91 ICE_PF_STAT("link_xoff_tx", stats.link_xoff_tx),
92 ICE_PF_STAT("link_xoff_rx", stats.link_xoff_rx),
93 ICE_PF_STAT("tx_dropped_link_down", stats.tx_dropped_link_down),
94 ICE_PF_STAT("rx_undersize", stats.rx_undersize),
95 ICE_PF_STAT("rx_fragments", stats.rx_fragments),
96 ICE_PF_STAT("rx_oversize", stats.rx_oversize),
97 ICE_PF_STAT("rx_jabber", stats.rx_jabber),
98 ICE_PF_STAT("rx_csum_bad", hw_csum_rx_error),
99 ICE_PF_STAT("rx_length_errors", stats.rx_len_errors),
100 ICE_PF_STAT("rx_dropped", stats.eth.rx_discards),
101 ICE_PF_STAT("rx_crc_errors", stats.crc_errors),
102 ICE_PF_STAT("illegal_bytes", stats.illegal_bytes),
103 ICE_PF_STAT("mac_local_faults", stats.mac_local_faults),
104 ICE_PF_STAT("mac_remote_faults", stats.mac_remote_faults),
107 static u32 ice_regs_dump_list[] = {
118 * ice_nvm_version_str - format the NVM version strings
119 * @hw: ptr to the hardware info
121 static char *ice_nvm_version_str(struct ice_hw *hw)
123 static char buf[ICE_ETHTOOL_FWVER_LEN];
128 full_ver = hw->nvm.oem_ver;
129 ver = (u8)((full_ver & ICE_OEM_VER_MASK) >> ICE_OEM_VER_SHIFT);
130 build = (u16)((full_ver & ICE_OEM_VER_BUILD_MASK) >>
131 ICE_OEM_VER_BUILD_SHIFT);
132 patch = (u8)(full_ver & ICE_OEM_VER_PATCH_MASK);
134 snprintf(buf, sizeof(buf), "%x.%02x 0x%x %d.%d.%d",
135 (hw->nvm.ver & ICE_NVM_VER_HI_MASK) >> ICE_NVM_VER_HI_SHIFT,
136 (hw->nvm.ver & ICE_NVM_VER_LO_MASK) >> ICE_NVM_VER_LO_SHIFT,
137 hw->nvm.eetrack, ver, build, patch);
143 ice_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
145 struct ice_netdev_priv *np = netdev_priv(netdev);
146 struct ice_vsi *vsi = np->vsi;
147 struct ice_pf *pf = vsi->back;
149 strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
150 strlcpy(drvinfo->version, ice_drv_ver, sizeof(drvinfo->version));
151 strlcpy(drvinfo->fw_version, ice_nvm_version_str(&pf->hw),
152 sizeof(drvinfo->fw_version));
153 strlcpy(drvinfo->bus_info, pci_name(pf->pdev),
154 sizeof(drvinfo->bus_info));
157 static int ice_get_regs_len(struct net_device __always_unused *netdev)
159 return sizeof(ice_regs_dump_list);
163 ice_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p)
165 struct ice_netdev_priv *np = netdev_priv(netdev);
166 struct ice_pf *pf = np->vsi->back;
167 struct ice_hw *hw = &pf->hw;
168 u32 *regs_buf = (u32 *)p;
173 for (i = 0; i < ARRAY_SIZE(ice_regs_dump_list); ++i)
174 regs_buf[i] = rd32(hw, ice_regs_dump_list[i]);
177 static u32 ice_get_msglevel(struct net_device *netdev)
179 struct ice_netdev_priv *np = netdev_priv(netdev);
180 struct ice_pf *pf = np->vsi->back;
182 #ifndef CONFIG_DYNAMIC_DEBUG
183 if (pf->hw.debug_mask)
184 netdev_info(netdev, "hw debug_mask: 0x%llX\n",
186 #endif /* !CONFIG_DYNAMIC_DEBUG */
188 return pf->msg_enable;
191 static void ice_set_msglevel(struct net_device *netdev, u32 data)
193 struct ice_netdev_priv *np = netdev_priv(netdev);
194 struct ice_pf *pf = np->vsi->back;
196 #ifndef CONFIG_DYNAMIC_DEBUG
197 if (ICE_DBG_USER & data)
198 pf->hw.debug_mask = data;
200 pf->msg_enable = data;
202 pf->msg_enable = data;
203 #endif /* !CONFIG_DYNAMIC_DEBUG */
206 static void ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
208 struct ice_netdev_priv *np = netdev_priv(netdev);
209 struct ice_vsi *vsi = np->vsi;
210 char *p = (char *)data;
215 for (i = 0; i < ICE_VSI_STATS_LEN; i++) {
216 snprintf(p, ETH_GSTRING_LEN, "%s",
217 ice_gstrings_vsi_stats[i].stat_string);
218 p += ETH_GSTRING_LEN;
221 ice_for_each_alloc_txq(vsi, i) {
222 snprintf(p, ETH_GSTRING_LEN,
223 "tx-queue-%u.tx_packets", i);
224 p += ETH_GSTRING_LEN;
225 snprintf(p, ETH_GSTRING_LEN, "tx-queue-%u.tx_bytes", i);
226 p += ETH_GSTRING_LEN;
229 ice_for_each_alloc_rxq(vsi, i) {
230 snprintf(p, ETH_GSTRING_LEN,
231 "rx-queue-%u.rx_packets", i);
232 p += ETH_GSTRING_LEN;
233 snprintf(p, ETH_GSTRING_LEN, "rx-queue-%u.rx_bytes", i);
234 p += ETH_GSTRING_LEN;
237 if (vsi->type != ICE_VSI_PF)
240 for (i = 0; i < ICE_PF_STATS_LEN; i++) {
241 snprintf(p, ETH_GSTRING_LEN, "port.%s",
242 ice_gstrings_pf_stats[i].stat_string);
243 p += ETH_GSTRING_LEN;
252 static int ice_get_sset_count(struct net_device *netdev, int sset)
256 /* The number (and order) of strings reported *must* remain
257 * constant for a given netdevice. This function must not
258 * report a different number based on run time parameters
259 * (such as the number of queues in use, or the setting of
260 * a private ethtool flag). This is due to the nature of the
263 * User space programs such as ethtool must make 3 separate
264 * ioctl requests, one for size, one for the strings, and
265 * finally one for the stats. Since these cross into
266 * user space, changes to the number or size could result in
267 * undefined memory access or incorrect string<->value
268 * correlations for statistics.
270 * Even if it appears to be safe, changes to the size or
271 * order of strings will suffer from race conditions and are
274 return ICE_ALL_STATS_LEN(netdev);
281 ice_get_ethtool_stats(struct net_device *netdev,
282 struct ethtool_stats __always_unused *stats, u64 *data)
284 struct ice_netdev_priv *np = netdev_priv(netdev);
285 struct ice_vsi *vsi = np->vsi;
286 struct ice_pf *pf = vsi->back;
287 struct ice_ring *ring;
292 for (j = 0; j < ICE_VSI_STATS_LEN; j++) {
293 p = (char *)vsi + ice_gstrings_vsi_stats[j].stat_offset;
294 data[i++] = (ice_gstrings_vsi_stats[j].sizeof_stat ==
295 sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
298 /* populate per queue stats */
301 ice_for_each_alloc_txq(vsi, j) {
302 ring = READ_ONCE(vsi->tx_rings[j]);
304 data[i++] = ring->stats.pkts;
305 data[i++] = ring->stats.bytes;
312 ice_for_each_alloc_rxq(vsi, j) {
313 ring = READ_ONCE(vsi->rx_rings[j]);
315 data[i++] = ring->stats.pkts;
316 data[i++] = ring->stats.bytes;
325 if (vsi->type != ICE_VSI_PF)
328 for (j = 0; j < ICE_PF_STATS_LEN; j++) {
329 p = (char *)pf + ice_gstrings_pf_stats[j].stat_offset;
330 data[i++] = (ice_gstrings_pf_stats[j].sizeof_stat ==
331 sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
336 ice_get_link_ksettings(struct net_device *netdev,
337 struct ethtool_link_ksettings *ks)
339 struct ice_netdev_priv *np = netdev_priv(netdev);
340 struct ice_link_status *hw_link_info;
341 struct ice_vsi *vsi = np->vsi;
344 hw_link_info = &vsi->port_info->phy.link_info;
345 link_up = hw_link_info->link_info & ICE_AQ_LINK_UP;
347 ethtool_link_ksettings_add_link_mode(ks, supported,
349 ethtool_link_ksettings_add_link_mode(ks, advertising,
352 /* set speed and duplex */
354 switch (hw_link_info->link_speed) {
355 case ICE_AQ_LINK_SPEED_100MB:
356 ks->base.speed = SPEED_100;
358 case ICE_AQ_LINK_SPEED_2500MB:
359 ks->base.speed = SPEED_2500;
361 case ICE_AQ_LINK_SPEED_5GB:
362 ks->base.speed = SPEED_5000;
364 case ICE_AQ_LINK_SPEED_10GB:
365 ks->base.speed = SPEED_10000;
367 case ICE_AQ_LINK_SPEED_25GB:
368 ks->base.speed = SPEED_25000;
370 case ICE_AQ_LINK_SPEED_40GB:
371 ks->base.speed = SPEED_40000;
374 ks->base.speed = SPEED_UNKNOWN;
378 ks->base.duplex = DUPLEX_FULL;
380 ks->base.speed = SPEED_UNKNOWN;
381 ks->base.duplex = DUPLEX_UNKNOWN;
384 /* set autoneg settings */
385 ks->base.autoneg = ((hw_link_info->an_info & ICE_AQ_AN_COMPLETED) ?
386 AUTONEG_ENABLE : AUTONEG_DISABLE);
388 /* set media type settings */
389 switch (vsi->port_info->phy.media_type) {
390 case ICE_MEDIA_FIBER:
391 ethtool_link_ksettings_add_link_mode(ks, supported, FIBRE);
392 ks->base.port = PORT_FIBRE;
394 case ICE_MEDIA_BASET:
395 ethtool_link_ksettings_add_link_mode(ks, supported, TP);
396 ethtool_link_ksettings_add_link_mode(ks, advertising, TP);
397 ks->base.port = PORT_TP;
399 case ICE_MEDIA_BACKPLANE:
400 ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
401 ethtool_link_ksettings_add_link_mode(ks, supported, Backplane);
402 ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
403 ethtool_link_ksettings_add_link_mode(ks, advertising,
405 ks->base.port = PORT_NONE;
408 ethtool_link_ksettings_add_link_mode(ks, supported, FIBRE);
409 ethtool_link_ksettings_add_link_mode(ks, advertising, FIBRE);
410 ks->base.port = PORT_DA;
413 ks->base.port = PORT_OTHER;
417 /* flow control is symmetric and always supported */
418 ethtool_link_ksettings_add_link_mode(ks, supported, Pause);
420 switch (vsi->port_info->fc.req_mode) {
422 ethtool_link_ksettings_add_link_mode(ks, advertising, Pause);
424 case ICE_FC_TX_PAUSE:
425 ethtool_link_ksettings_add_link_mode(ks, advertising,
428 case ICE_FC_RX_PAUSE:
429 ethtool_link_ksettings_add_link_mode(ks, advertising, Pause);
430 ethtool_link_ksettings_add_link_mode(ks, advertising,
435 ethtool_link_ksettings_del_link_mode(ks, advertising, Pause);
436 ethtool_link_ksettings_del_link_mode(ks, advertising,
445 * ice_get_rxnfc - command to get RX flow classification rules
446 * @netdev: network interface device structure
447 * @cmd: ethtool rxnfc command
448 * @rule_locs: buffer to rturn Rx flow classification rules
450 * Returns Success if the command is supported.
452 static int ice_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
453 u32 __always_unused *rule_locs)
455 struct ice_netdev_priv *np = netdev_priv(netdev);
456 struct ice_vsi *vsi = np->vsi;
457 int ret = -EOPNOTSUPP;
460 case ETHTOOL_GRXRINGS:
461 cmd->data = vsi->rss_size;
472 ice_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
474 struct ice_netdev_priv *np = netdev_priv(netdev);
475 struct ice_vsi *vsi = np->vsi;
477 ring->rx_max_pending = ICE_MAX_NUM_DESC;
478 ring->tx_max_pending = ICE_MAX_NUM_DESC;
479 ring->rx_pending = vsi->rx_rings[0]->count;
480 ring->tx_pending = vsi->tx_rings[0]->count;
482 /* Rx mini and jumbo rings are not supported */
483 ring->rx_mini_max_pending = 0;
484 ring->rx_jumbo_max_pending = 0;
485 ring->rx_mini_pending = 0;
486 ring->rx_jumbo_pending = 0;
490 ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
492 struct ice_ring *tx_rings = NULL, *rx_rings = NULL;
493 struct ice_netdev_priv *np = netdev_priv(netdev);
494 struct ice_vsi *vsi = np->vsi;
495 struct ice_pf *pf = vsi->back;
496 int i, timeout = 50, err = 0;
497 u32 new_rx_cnt, new_tx_cnt;
499 if (ring->tx_pending > ICE_MAX_NUM_DESC ||
500 ring->tx_pending < ICE_MIN_NUM_DESC ||
501 ring->rx_pending > ICE_MAX_NUM_DESC ||
502 ring->rx_pending < ICE_MIN_NUM_DESC) {
503 netdev_err(netdev, "Descriptors requested (Tx: %d / Rx: %d) out of range [%d-%d] (increment %d)\n",
504 ring->tx_pending, ring->rx_pending,
505 ICE_MIN_NUM_DESC, ICE_MAX_NUM_DESC,
506 ICE_REQ_DESC_MULTIPLE);
510 new_tx_cnt = ALIGN(ring->tx_pending, ICE_REQ_DESC_MULTIPLE);
511 if (new_tx_cnt != ring->tx_pending)
513 "Requested Tx descriptor count rounded up to %d\n",
515 new_rx_cnt = ALIGN(ring->rx_pending, ICE_REQ_DESC_MULTIPLE);
516 if (new_rx_cnt != ring->rx_pending)
518 "Requested Rx descriptor count rounded up to %d\n",
521 /* if nothing to do return success */
522 if (new_tx_cnt == vsi->tx_rings[0]->count &&
523 new_rx_cnt == vsi->rx_rings[0]->count) {
524 netdev_dbg(netdev, "Nothing to change, descriptor count is same as requested\n");
528 while (test_and_set_bit(__ICE_CFG_BUSY, pf->state)) {
532 usleep_range(1000, 2000);
535 /* set for the next time the netdev is started */
536 if (!netif_running(vsi->netdev)) {
537 for (i = 0; i < vsi->alloc_txq; i++)
538 vsi->tx_rings[i]->count = new_tx_cnt;
539 for (i = 0; i < vsi->alloc_rxq; i++)
540 vsi->rx_rings[i]->count = new_rx_cnt;
541 netdev_dbg(netdev, "Link is down, descriptor count change happens when link is brought up\n");
545 if (new_tx_cnt == vsi->tx_rings[0]->count)
548 /* alloc updated Tx resources */
549 netdev_info(netdev, "Changing Tx descriptor count from %d to %d\n",
550 vsi->tx_rings[0]->count, new_tx_cnt);
552 tx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_txq,
553 sizeof(struct ice_ring), GFP_KERNEL);
559 for (i = 0; i < vsi->alloc_txq; i++) {
560 /* clone ring and setup updated count */
561 tx_rings[i] = *vsi->tx_rings[i];
562 tx_rings[i].count = new_tx_cnt;
563 tx_rings[i].desc = NULL;
564 tx_rings[i].tx_buf = NULL;
565 err = ice_setup_tx_ring(&tx_rings[i]);
569 ice_clean_tx_ring(&tx_rings[i]);
571 devm_kfree(&pf->pdev->dev, tx_rings);
577 if (new_rx_cnt == vsi->rx_rings[0]->count)
580 /* alloc updated Rx resources */
581 netdev_info(netdev, "Changing Rx descriptor count from %d to %d\n",
582 vsi->rx_rings[0]->count, new_rx_cnt);
584 rx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_rxq,
585 sizeof(struct ice_ring), GFP_KERNEL);
591 for (i = 0; i < vsi->alloc_rxq; i++) {
592 /* clone ring and setup updated count */
593 rx_rings[i] = *vsi->rx_rings[i];
594 rx_rings[i].count = new_rx_cnt;
595 rx_rings[i].desc = NULL;
596 rx_rings[i].rx_buf = NULL;
597 /* this is to allow wr32 to have something to write to
598 * during early allocation of Rx buffers
600 rx_rings[i].tail = vsi->back->hw.hw_addr + PRTGEN_STATUS;
602 err = ice_setup_rx_ring(&rx_rings[i]);
606 /* allocate Rx buffers */
607 err = ice_alloc_rx_bufs(&rx_rings[i],
608 ICE_DESC_UNUSED(&rx_rings[i]));
613 ice_free_rx_ring(&rx_rings[i]);
615 devm_kfree(&pf->pdev->dev, rx_rings);
622 /* Bring interface down, copy in the new ring info, then restore the
623 * interface. if VSI is up, bring it down and then back up
625 if (!test_and_set_bit(__ICE_DOWN, vsi->state)) {
629 for (i = 0; i < vsi->alloc_txq; i++) {
630 ice_free_tx_ring(vsi->tx_rings[i]);
631 *vsi->tx_rings[i] = tx_rings[i];
633 devm_kfree(&pf->pdev->dev, tx_rings);
637 for (i = 0; i < vsi->alloc_rxq; i++) {
638 ice_free_rx_ring(vsi->rx_rings[i]);
639 /* copy the real tail offset */
640 rx_rings[i].tail = vsi->rx_rings[i]->tail;
641 /* this is to fake out the allocation routine
642 * into thinking it has to realloc everything
643 * but the recycling logic will let us re-use
644 * the buffers allocated above
646 rx_rings[i].next_to_use = 0;
647 rx_rings[i].next_to_clean = 0;
648 rx_rings[i].next_to_alloc = 0;
649 *vsi->rx_rings[i] = rx_rings[i];
651 devm_kfree(&pf->pdev->dev, rx_rings);
659 /* error cleanup if the Rx allocations failed after getting Tx */
661 for (i = 0; i < vsi->alloc_txq; i++)
662 ice_free_tx_ring(&tx_rings[i]);
663 devm_kfree(&pf->pdev->dev, tx_rings);
667 clear_bit(__ICE_CFG_BUSY, pf->state);
671 static int ice_nway_reset(struct net_device *netdev)
673 /* restart autonegotiation */
674 struct ice_netdev_priv *np = netdev_priv(netdev);
675 struct ice_link_status *hw_link_info;
676 struct ice_vsi *vsi = np->vsi;
677 struct ice_port_info *pi;
678 enum ice_status status;
682 hw_link_info = &pi->phy.link_info;
683 link_up = hw_link_info->link_info & ICE_AQ_LINK_UP;
685 status = ice_aq_set_link_restart_an(pi, link_up, NULL);
687 netdev_info(netdev, "link restart failed, err %d aq_err %d\n",
688 status, pi->hw->adminq.sq_last_status);
696 * ice_get_pauseparam - Get Flow Control status
697 * @netdev: network interface device structure
698 * @pause: ethernet pause (flow control) parameters
701 ice_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
703 struct ice_netdev_priv *np = netdev_priv(netdev);
704 struct ice_port_info *pi;
706 pi = np->vsi->port_info;
708 ((pi->phy.link_info.an_info & ICE_AQ_AN_COMPLETED) ?
709 AUTONEG_ENABLE : AUTONEG_DISABLE);
711 if (pi->fc.current_mode == ICE_FC_RX_PAUSE) {
713 } else if (pi->fc.current_mode == ICE_FC_TX_PAUSE) {
715 } else if (pi->fc.current_mode == ICE_FC_FULL) {
722 * ice_set_pauseparam - Set Flow Control parameter
723 * @netdev: network interface device structure
724 * @pause: return tx/rx flow control status
727 ice_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
729 struct ice_netdev_priv *np = netdev_priv(netdev);
730 struct ice_link_status *hw_link_info;
731 struct ice_pf *pf = np->vsi->back;
732 struct ice_vsi *vsi = np->vsi;
733 struct ice_hw *hw = &pf->hw;
734 struct ice_port_info *pi;
735 enum ice_status status;
741 hw_link_info = &pi->phy.link_info;
742 link_up = hw_link_info->link_info & ICE_AQ_LINK_UP;
744 /* Changing the port's flow control is not supported if this isn't the
747 if (vsi->type != ICE_VSI_PF) {
748 netdev_info(netdev, "Changing flow control parameters only supported for PF VSI\n");
752 if (pause->autoneg != (hw_link_info->an_info & ICE_AQ_AN_COMPLETED)) {
753 netdev_info(netdev, "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
757 /* If we have link and don't have autoneg */
758 if (!test_bit(__ICE_DOWN, pf->state) &&
759 !(hw_link_info->an_info & ICE_AQ_AN_COMPLETED)) {
760 /* Send message that it might not necessarily work*/
761 netdev_info(netdev, "Autoneg did not complete so changing settings may not result in an actual change.\n");
764 if (pause->rx_pause && pause->tx_pause)
765 pi->fc.req_mode = ICE_FC_FULL;
766 else if (pause->rx_pause && !pause->tx_pause)
767 pi->fc.req_mode = ICE_FC_RX_PAUSE;
768 else if (!pause->rx_pause && pause->tx_pause)
769 pi->fc.req_mode = ICE_FC_TX_PAUSE;
770 else if (!pause->rx_pause && !pause->tx_pause)
771 pi->fc.req_mode = ICE_FC_NONE;
775 /* Set the FC mode and only restart AN if link is up */
776 status = ice_set_fc(pi, &aq_failures, link_up);
778 if (aq_failures & ICE_SET_FC_AQ_FAIL_GET) {
779 netdev_info(netdev, "Set fc failed on the get_phy_capabilities call with err %d aq_err %d\n",
780 status, hw->adminq.sq_last_status);
782 } else if (aq_failures & ICE_SET_FC_AQ_FAIL_SET) {
783 netdev_info(netdev, "Set fc failed on the set_phy_config call with err %d aq_err %d\n",
784 status, hw->adminq.sq_last_status);
786 } else if (aq_failures & ICE_SET_FC_AQ_FAIL_UPDATE) {
787 netdev_info(netdev, "Set fc failed on the get_link_info call with err %d aq_err %d\n",
788 status, hw->adminq.sq_last_status);
792 if (!test_bit(__ICE_DOWN, pf->state)) {
793 /* Give it a little more time to try to come back. If still
794 * down, restart autoneg link or reinitialize the interface.
797 if (!test_bit(__ICE_DOWN, pf->state))
798 return ice_nway_reset(netdev);
808 * ice_get_rxfh_key_size - get the RSS hash key size
809 * @netdev: network interface device structure
811 * Returns the table size.
813 static u32 ice_get_rxfh_key_size(struct net_device __always_unused *netdev)
815 return ICE_VSIQF_HKEY_ARRAY_SIZE;
819 * ice_get_rxfh_indir_size - get the rx flow hash indirection table size
820 * @netdev: network interface device structure
822 * Returns the table size.
824 static u32 ice_get_rxfh_indir_size(struct net_device *netdev)
826 struct ice_netdev_priv *np = netdev_priv(netdev);
828 return np->vsi->rss_table_size;
832 * ice_get_rxfh - get the rx flow hash indirection table
833 * @netdev: network interface device structure
834 * @indir: indirection table
836 * @hfunc: hash function
838 * Reads the indirection table directly from the hardware.
841 ice_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc)
843 struct ice_netdev_priv *np = netdev_priv(netdev);
844 struct ice_vsi *vsi = np->vsi;
845 struct ice_pf *pf = vsi->back;
850 *hfunc = ETH_RSS_HASH_TOP;
855 if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
856 /* RSS not supported return error here */
857 netdev_warn(netdev, "RSS is not configured on this VSI!\n");
861 lut = devm_kzalloc(&pf->pdev->dev, vsi->rss_table_size, GFP_KERNEL);
865 if (ice_get_rss(vsi, key, lut, vsi->rss_table_size)) {
870 for (i = 0; i < vsi->rss_table_size; i++)
871 indir[i] = (u32)(lut[i]);
874 devm_kfree(&pf->pdev->dev, lut);
879 * ice_set_rxfh - set the rx flow hash indirection table
880 * @netdev: network interface device structure
881 * @indir: indirection table
883 * @hfunc: hash function
885 * Returns -EINVAL if the table specifies an invalid queue id, otherwise
886 * returns 0 after programming the table.
888 static int ice_set_rxfh(struct net_device *netdev, const u32 *indir,
889 const u8 *key, const u8 hfunc)
891 struct ice_netdev_priv *np = netdev_priv(netdev);
892 struct ice_vsi *vsi = np->vsi;
893 struct ice_pf *pf = vsi->back;
896 if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
899 if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
900 /* RSS not supported return error here */
901 netdev_warn(netdev, "RSS is not configured on this VSI!\n");
906 if (!vsi->rss_hkey_user) {
908 devm_kzalloc(&pf->pdev->dev,
909 ICE_VSIQF_HKEY_ARRAY_SIZE,
911 if (!vsi->rss_hkey_user)
914 memcpy(vsi->rss_hkey_user, key, ICE_VSIQF_HKEY_ARRAY_SIZE);
915 seed = vsi->rss_hkey_user;
918 if (!vsi->rss_lut_user) {
919 vsi->rss_lut_user = devm_kzalloc(&pf->pdev->dev,
922 if (!vsi->rss_lut_user)
926 /* Each 32 bits pointed by 'indir' is stored with a lut entry */
930 for (i = 0; i < vsi->rss_table_size; i++)
931 vsi->rss_lut_user[i] = (u8)(indir[i]);
933 ice_fill_rss_lut(vsi->rss_lut_user, vsi->rss_table_size,
937 if (ice_set_rss(vsi, seed, vsi->rss_lut_user, vsi->rss_table_size))
943 static const struct ethtool_ops ice_ethtool_ops = {
944 .get_link_ksettings = ice_get_link_ksettings,
945 .get_drvinfo = ice_get_drvinfo,
946 .get_regs_len = ice_get_regs_len,
947 .get_regs = ice_get_regs,
948 .get_msglevel = ice_get_msglevel,
949 .set_msglevel = ice_set_msglevel,
950 .get_link = ethtool_op_get_link,
951 .get_strings = ice_get_strings,
952 .get_ethtool_stats = ice_get_ethtool_stats,
953 .get_sset_count = ice_get_sset_count,
954 .get_rxnfc = ice_get_rxnfc,
955 .get_ringparam = ice_get_ringparam,
956 .set_ringparam = ice_set_ringparam,
957 .nway_reset = ice_nway_reset,
958 .get_pauseparam = ice_get_pauseparam,
959 .set_pauseparam = ice_set_pauseparam,
960 .get_rxfh_key_size = ice_get_rxfh_key_size,
961 .get_rxfh_indir_size = ice_get_rxfh_indir_size,
962 .get_rxfh = ice_get_rxfh,
963 .set_rxfh = ice_set_rxfh,
967 * ice_set_ethtool_ops - setup netdev ethtool ops
968 * @netdev: network interface device structure
970 * setup netdev ethtool ops with ice specific ops
972 void ice_set_ethtool_ops(struct net_device *netdev)
974 netdev->ethtool_ops = &ice_ethtool_ops;