1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 1999 - 2018 Intel Corporation. */
4 /* ethtool support for ixgbe */
6 #include <linux/interrupt.h>
7 #include <linux/types.h>
8 #include <linux/module.h>
9 #include <linux/slab.h>
10 #include <linux/pci.h>
11 #include <linux/netdevice.h>
12 #include <linux/ethtool.h>
13 #include <linux/vmalloc.h>
14 #include <linux/highmem.h>
15 #include <linux/uaccess.h>
18 #include "ixgbe_phy.h"
21 #define IXGBE_ALL_RAR_ENTRIES 16
23 enum {NETDEV_STATS, IXGBE_STATS};
26 char stat_string[ETH_GSTRING_LEN];
32 #define IXGBE_STAT(m) IXGBE_STATS, \
33 sizeof(((struct ixgbe_adapter *)0)->m), \
34 offsetof(struct ixgbe_adapter, m)
35 #define IXGBE_NETDEV_STAT(m) NETDEV_STATS, \
36 sizeof(((struct rtnl_link_stats64 *)0)->m), \
37 offsetof(struct rtnl_link_stats64, m)
39 static const struct ixgbe_stats ixgbe_gstrings_stats[] = {
40 {"rx_packets", IXGBE_NETDEV_STAT(rx_packets)},
41 {"tx_packets", IXGBE_NETDEV_STAT(tx_packets)},
42 {"rx_bytes", IXGBE_NETDEV_STAT(rx_bytes)},
43 {"tx_bytes", IXGBE_NETDEV_STAT(tx_bytes)},
44 {"rx_pkts_nic", IXGBE_STAT(stats.gprc)},
45 {"tx_pkts_nic", IXGBE_STAT(stats.gptc)},
46 {"rx_bytes_nic", IXGBE_STAT(stats.gorc)},
47 {"tx_bytes_nic", IXGBE_STAT(stats.gotc)},
48 {"lsc_int", IXGBE_STAT(lsc_int)},
49 {"tx_busy", IXGBE_STAT(tx_busy)},
50 {"non_eop_descs", IXGBE_STAT(non_eop_descs)},
51 {"rx_errors", IXGBE_NETDEV_STAT(rx_errors)},
52 {"tx_errors", IXGBE_NETDEV_STAT(tx_errors)},
53 {"rx_dropped", IXGBE_NETDEV_STAT(rx_dropped)},
54 {"tx_dropped", IXGBE_NETDEV_STAT(tx_dropped)},
55 {"multicast", IXGBE_NETDEV_STAT(multicast)},
56 {"broadcast", IXGBE_STAT(stats.bprc)},
57 {"rx_no_buffer_count", IXGBE_STAT(stats.rnbc[0]) },
58 {"collisions", IXGBE_NETDEV_STAT(collisions)},
59 {"rx_over_errors", IXGBE_NETDEV_STAT(rx_over_errors)},
60 {"rx_crc_errors", IXGBE_NETDEV_STAT(rx_crc_errors)},
61 {"rx_frame_errors", IXGBE_NETDEV_STAT(rx_frame_errors)},
62 {"hw_rsc_aggregated", IXGBE_STAT(rsc_total_count)},
63 {"hw_rsc_flushed", IXGBE_STAT(rsc_total_flush)},
64 {"fdir_match", IXGBE_STAT(stats.fdirmatch)},
65 {"fdir_miss", IXGBE_STAT(stats.fdirmiss)},
66 {"fdir_overflow", IXGBE_STAT(fdir_overflow)},
67 {"rx_fifo_errors", IXGBE_NETDEV_STAT(rx_fifo_errors)},
68 {"rx_missed_errors", IXGBE_NETDEV_STAT(rx_missed_errors)},
69 {"tx_aborted_errors", IXGBE_NETDEV_STAT(tx_aborted_errors)},
70 {"tx_carrier_errors", IXGBE_NETDEV_STAT(tx_carrier_errors)},
71 {"tx_fifo_errors", IXGBE_NETDEV_STAT(tx_fifo_errors)},
72 {"tx_heartbeat_errors", IXGBE_NETDEV_STAT(tx_heartbeat_errors)},
73 {"tx_timeout_count", IXGBE_STAT(tx_timeout_count)},
74 {"tx_restart_queue", IXGBE_STAT(restart_queue)},
75 {"rx_length_errors", IXGBE_STAT(stats.rlec)},
76 {"rx_long_length_errors", IXGBE_STAT(stats.roc)},
77 {"rx_short_length_errors", IXGBE_STAT(stats.ruc)},
78 {"tx_flow_control_xon", IXGBE_STAT(stats.lxontxc)},
79 {"rx_flow_control_xon", IXGBE_STAT(stats.lxonrxc)},
80 {"tx_flow_control_xoff", IXGBE_STAT(stats.lxofftxc)},
81 {"rx_flow_control_xoff", IXGBE_STAT(stats.lxoffrxc)},
82 {"rx_csum_offload_errors", IXGBE_STAT(hw_csum_rx_error)},
83 {"alloc_rx_page", IXGBE_STAT(alloc_rx_page)},
84 {"alloc_rx_page_failed", IXGBE_STAT(alloc_rx_page_failed)},
85 {"alloc_rx_buff_failed", IXGBE_STAT(alloc_rx_buff_failed)},
86 {"rx_no_dma_resources", IXGBE_STAT(hw_rx_no_dma_resources)},
87 {"os2bmc_rx_by_bmc", IXGBE_STAT(stats.o2bgptc)},
88 {"os2bmc_tx_by_bmc", IXGBE_STAT(stats.b2ospc)},
89 {"os2bmc_tx_by_host", IXGBE_STAT(stats.o2bspc)},
90 {"os2bmc_rx_by_host", IXGBE_STAT(stats.b2ogprc)},
91 {"tx_hwtstamp_timeouts", IXGBE_STAT(tx_hwtstamp_timeouts)},
92 {"tx_hwtstamp_skipped", IXGBE_STAT(tx_hwtstamp_skipped)},
93 {"rx_hwtstamp_cleared", IXGBE_STAT(rx_hwtstamp_cleared)},
94 {"tx_ipsec", IXGBE_STAT(tx_ipsec)},
95 {"rx_ipsec", IXGBE_STAT(rx_ipsec)},
97 {"fcoe_bad_fccrc", IXGBE_STAT(stats.fccrc)},
98 {"rx_fcoe_dropped", IXGBE_STAT(stats.fcoerpdc)},
99 {"rx_fcoe_packets", IXGBE_STAT(stats.fcoeprc)},
100 {"rx_fcoe_dwords", IXGBE_STAT(stats.fcoedwrc)},
101 {"fcoe_noddp", IXGBE_STAT(stats.fcoe_noddp)},
102 {"fcoe_noddp_ext_buff", IXGBE_STAT(stats.fcoe_noddp_ext_buff)},
103 {"tx_fcoe_packets", IXGBE_STAT(stats.fcoeptc)},
104 {"tx_fcoe_dwords", IXGBE_STAT(stats.fcoedwtc)},
105 #endif /* IXGBE_FCOE */
108 /* ixgbe allocates num_tx_queues and num_rx_queues symmetrically so
109 * we set the num_rx_queues to evaluate to num_tx_queues. This is
110 * used because we do not have a good way to get the max number of
111 * rx queues with CONFIG_RPS disabled.
113 #define IXGBE_NUM_RX_QUEUES netdev->num_tx_queues
115 #define IXGBE_QUEUE_STATS_LEN ( \
116 (netdev->num_tx_queues + IXGBE_NUM_RX_QUEUES) * \
117 (sizeof(struct ixgbe_queue_stats) / sizeof(u64)))
118 #define IXGBE_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbe_gstrings_stats)
119 #define IXGBE_PB_STATS_LEN ( \
120 (sizeof(((struct ixgbe_adapter *)0)->stats.pxonrxc) + \
121 sizeof(((struct ixgbe_adapter *)0)->stats.pxontxc) + \
122 sizeof(((struct ixgbe_adapter *)0)->stats.pxoffrxc) + \
123 sizeof(((struct ixgbe_adapter *)0)->stats.pxofftxc)) \
125 #define IXGBE_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + \
126 IXGBE_PB_STATS_LEN + \
127 IXGBE_QUEUE_STATS_LEN)
129 static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = {
130 "Register test (offline)", "Eeprom test (offline)",
131 "Interrupt test (offline)", "Loopback test (offline)",
132 "Link test (on/offline)"
134 #define IXGBE_TEST_LEN sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN
136 static const char ixgbe_priv_flags_strings[][ETH_GSTRING_LEN] = {
137 #define IXGBE_PRIV_FLAGS_LEGACY_RX BIT(0)
139 #define IXGBE_PRIV_FLAGS_VF_IPSEC_EN BIT(1)
143 #define IXGBE_PRIV_FLAGS_STR_LEN ARRAY_SIZE(ixgbe_priv_flags_strings)
145 #define ixgbe_isbackplane(type) ((type) == ixgbe_media_type_backplane)
147 static void ixgbe_set_supported_10gtypes(struct ixgbe_hw *hw,
148 struct ethtool_link_ksettings *cmd)
150 if (!ixgbe_isbackplane(hw->phy.media_type)) {
151 ethtool_link_ksettings_add_link_mode(cmd, supported,
156 switch (hw->device_id) {
157 case IXGBE_DEV_ID_82598:
158 case IXGBE_DEV_ID_82599_KX4:
159 case IXGBE_DEV_ID_82599_KX4_MEZZ:
160 case IXGBE_DEV_ID_X550EM_X_KX4:
161 ethtool_link_ksettings_add_link_mode
162 (cmd, supported, 10000baseKX4_Full);
164 case IXGBE_DEV_ID_82598_BX:
165 case IXGBE_DEV_ID_82599_KR:
166 case IXGBE_DEV_ID_X550EM_X_KR:
167 case IXGBE_DEV_ID_X550EM_X_XFI:
168 ethtool_link_ksettings_add_link_mode
169 (cmd, supported, 10000baseKR_Full);
172 ethtool_link_ksettings_add_link_mode
173 (cmd, supported, 10000baseKX4_Full);
174 ethtool_link_ksettings_add_link_mode
175 (cmd, supported, 10000baseKR_Full);
180 static void ixgbe_set_advertising_10gtypes(struct ixgbe_hw *hw,
181 struct ethtool_link_ksettings *cmd)
183 if (!ixgbe_isbackplane(hw->phy.media_type)) {
184 ethtool_link_ksettings_add_link_mode(cmd, advertising,
189 switch (hw->device_id) {
190 case IXGBE_DEV_ID_82598:
191 case IXGBE_DEV_ID_82599_KX4:
192 case IXGBE_DEV_ID_82599_KX4_MEZZ:
193 case IXGBE_DEV_ID_X550EM_X_KX4:
194 ethtool_link_ksettings_add_link_mode
195 (cmd, advertising, 10000baseKX4_Full);
197 case IXGBE_DEV_ID_82598_BX:
198 case IXGBE_DEV_ID_82599_KR:
199 case IXGBE_DEV_ID_X550EM_X_KR:
200 case IXGBE_DEV_ID_X550EM_X_XFI:
201 ethtool_link_ksettings_add_link_mode
202 (cmd, advertising, 10000baseKR_Full);
205 ethtool_link_ksettings_add_link_mode
206 (cmd, advertising, 10000baseKX4_Full);
207 ethtool_link_ksettings_add_link_mode
208 (cmd, advertising, 10000baseKR_Full);
213 static int ixgbe_get_link_ksettings(struct net_device *netdev,
214 struct ethtool_link_ksettings *cmd)
216 struct ixgbe_adapter *adapter = netdev_priv(netdev);
217 struct ixgbe_hw *hw = &adapter->hw;
218 ixgbe_link_speed supported_link;
219 bool autoneg = false;
221 ethtool_link_ksettings_zero_link_mode(cmd, supported);
222 ethtool_link_ksettings_zero_link_mode(cmd, advertising);
224 hw->mac.ops.get_link_capabilities(hw, &supported_link, &autoneg);
226 /* set the supported link speeds */
227 if (supported_link & IXGBE_LINK_SPEED_10GB_FULL) {
228 ixgbe_set_supported_10gtypes(hw, cmd);
229 ixgbe_set_advertising_10gtypes(hw, cmd);
231 if (supported_link & IXGBE_LINK_SPEED_5GB_FULL)
232 ethtool_link_ksettings_add_link_mode(cmd, supported,
235 if (supported_link & IXGBE_LINK_SPEED_2_5GB_FULL)
236 ethtool_link_ksettings_add_link_mode(cmd, supported,
239 if (supported_link & IXGBE_LINK_SPEED_1GB_FULL) {
240 if (ixgbe_isbackplane(hw->phy.media_type)) {
241 ethtool_link_ksettings_add_link_mode(cmd, supported,
243 ethtool_link_ksettings_add_link_mode(cmd, advertising,
246 ethtool_link_ksettings_add_link_mode(cmd, supported,
248 ethtool_link_ksettings_add_link_mode(cmd, advertising,
252 if (supported_link & IXGBE_LINK_SPEED_100_FULL) {
253 ethtool_link_ksettings_add_link_mode(cmd, supported,
255 ethtool_link_ksettings_add_link_mode(cmd, advertising,
258 if (supported_link & IXGBE_LINK_SPEED_10_FULL) {
259 ethtool_link_ksettings_add_link_mode(cmd, supported,
261 ethtool_link_ksettings_add_link_mode(cmd, advertising,
265 /* set the advertised speeds */
266 if (hw->phy.autoneg_advertised) {
267 ethtool_link_ksettings_zero_link_mode(cmd, advertising);
268 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10_FULL)
269 ethtool_link_ksettings_add_link_mode(cmd, advertising,
271 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL)
272 ethtool_link_ksettings_add_link_mode(cmd, advertising,
274 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL)
275 ixgbe_set_advertising_10gtypes(hw, cmd);
276 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) {
277 if (ethtool_link_ksettings_test_link_mode
278 (cmd, supported, 1000baseKX_Full))
279 ethtool_link_ksettings_add_link_mode
280 (cmd, advertising, 1000baseKX_Full);
282 ethtool_link_ksettings_add_link_mode
283 (cmd, advertising, 1000baseT_Full);
285 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_5GB_FULL)
286 ethtool_link_ksettings_add_link_mode(cmd, advertising,
288 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_2_5GB_FULL)
289 ethtool_link_ksettings_add_link_mode(cmd, advertising,
292 if (hw->phy.multispeed_fiber && !autoneg) {
293 if (supported_link & IXGBE_LINK_SPEED_10GB_FULL)
294 ethtool_link_ksettings_add_link_mode
295 (cmd, advertising, 10000baseT_Full);
300 ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg);
301 ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg);
302 cmd->base.autoneg = AUTONEG_ENABLE;
304 cmd->base.autoneg = AUTONEG_DISABLE;
306 /* Determine the remaining settings based on the PHY type. */
307 switch (adapter->hw.phy.type) {
310 case ixgbe_phy_x550em_ext_t:
312 case ixgbe_phy_cu_unknown:
313 ethtool_link_ksettings_add_link_mode(cmd, supported, TP);
314 ethtool_link_ksettings_add_link_mode(cmd, advertising, TP);
315 cmd->base.port = PORT_TP;
318 ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
319 ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE);
320 cmd->base.port = PORT_FIBRE;
323 case ixgbe_phy_sfp_passive_tyco:
324 case ixgbe_phy_sfp_passive_unknown:
325 case ixgbe_phy_sfp_ftl:
326 case ixgbe_phy_sfp_avago:
327 case ixgbe_phy_sfp_intel:
328 case ixgbe_phy_sfp_unknown:
329 case ixgbe_phy_qsfp_passive_unknown:
330 case ixgbe_phy_qsfp_active_unknown:
331 case ixgbe_phy_qsfp_intel:
332 case ixgbe_phy_qsfp_unknown:
333 /* SFP+ devices, further checking needed */
334 switch (adapter->hw.phy.sfp_type) {
335 case ixgbe_sfp_type_da_cu:
336 case ixgbe_sfp_type_da_cu_core0:
337 case ixgbe_sfp_type_da_cu_core1:
338 ethtool_link_ksettings_add_link_mode(cmd, supported,
340 ethtool_link_ksettings_add_link_mode(cmd, advertising,
342 cmd->base.port = PORT_DA;
344 case ixgbe_sfp_type_sr:
345 case ixgbe_sfp_type_lr:
346 case ixgbe_sfp_type_srlr_core0:
347 case ixgbe_sfp_type_srlr_core1:
348 case ixgbe_sfp_type_1g_sx_core0:
349 case ixgbe_sfp_type_1g_sx_core1:
350 case ixgbe_sfp_type_1g_lx_core0:
351 case ixgbe_sfp_type_1g_lx_core1:
352 ethtool_link_ksettings_add_link_mode(cmd, supported,
354 ethtool_link_ksettings_add_link_mode(cmd, advertising,
356 cmd->base.port = PORT_FIBRE;
358 case ixgbe_sfp_type_not_present:
359 ethtool_link_ksettings_add_link_mode(cmd, supported,
361 ethtool_link_ksettings_add_link_mode(cmd, advertising,
363 cmd->base.port = PORT_NONE;
365 case ixgbe_sfp_type_1g_cu_core0:
366 case ixgbe_sfp_type_1g_cu_core1:
367 ethtool_link_ksettings_add_link_mode(cmd, supported,
369 ethtool_link_ksettings_add_link_mode(cmd, advertising,
371 cmd->base.port = PORT_TP;
373 case ixgbe_sfp_type_unknown:
375 ethtool_link_ksettings_add_link_mode(cmd, supported,
377 ethtool_link_ksettings_add_link_mode(cmd, advertising,
379 cmd->base.port = PORT_OTHER;
384 ethtool_link_ksettings_add_link_mode(cmd, supported,
386 ethtool_link_ksettings_add_link_mode(cmd, advertising,
388 cmd->base.port = PORT_NONE;
390 case ixgbe_phy_unknown:
391 case ixgbe_phy_generic:
392 case ixgbe_phy_sfp_unsupported:
394 ethtool_link_ksettings_add_link_mode(cmd, supported,
396 ethtool_link_ksettings_add_link_mode(cmd, advertising,
398 cmd->base.port = PORT_OTHER;
402 /* Indicate pause support */
403 ethtool_link_ksettings_add_link_mode(cmd, supported, Pause);
405 switch (hw->fc.requested_mode) {
407 ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause);
409 case ixgbe_fc_rx_pause:
410 ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause);
411 ethtool_link_ksettings_add_link_mode(cmd, advertising,
414 case ixgbe_fc_tx_pause:
415 ethtool_link_ksettings_add_link_mode(cmd, advertising,
419 ethtool_link_ksettings_del_link_mode(cmd, advertising, Pause);
420 ethtool_link_ksettings_del_link_mode(cmd, advertising,
424 if (netif_carrier_ok(netdev)) {
425 switch (adapter->link_speed) {
426 case IXGBE_LINK_SPEED_10GB_FULL:
427 cmd->base.speed = SPEED_10000;
429 case IXGBE_LINK_SPEED_5GB_FULL:
430 cmd->base.speed = SPEED_5000;
432 case IXGBE_LINK_SPEED_2_5GB_FULL:
433 cmd->base.speed = SPEED_2500;
435 case IXGBE_LINK_SPEED_1GB_FULL:
436 cmd->base.speed = SPEED_1000;
438 case IXGBE_LINK_SPEED_100_FULL:
439 cmd->base.speed = SPEED_100;
441 case IXGBE_LINK_SPEED_10_FULL:
442 cmd->base.speed = SPEED_10;
447 cmd->base.duplex = DUPLEX_FULL;
449 cmd->base.speed = SPEED_UNKNOWN;
450 cmd->base.duplex = DUPLEX_UNKNOWN;
456 static int ixgbe_set_link_ksettings(struct net_device *netdev,
457 const struct ethtool_link_ksettings *cmd)
459 struct ixgbe_adapter *adapter = netdev_priv(netdev);
460 struct ixgbe_hw *hw = &adapter->hw;
464 if ((hw->phy.media_type == ixgbe_media_type_copper) ||
465 (hw->phy.multispeed_fiber)) {
467 * this function does not support duplex forcing, but can
468 * limit the advertising of the adapter to the specified speed
470 if (!bitmap_subset(cmd->link_modes.advertising,
471 cmd->link_modes.supported,
472 __ETHTOOL_LINK_MODE_MASK_NBITS))
475 /* only allow one speed at a time if no autoneg */
476 if (!cmd->base.autoneg && hw->phy.multispeed_fiber) {
477 if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
479 ethtool_link_ksettings_test_link_mode(cmd, advertising,
484 old = hw->phy.autoneg_advertised;
486 if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
488 advertised |= IXGBE_LINK_SPEED_10GB_FULL;
489 if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
491 advertised |= IXGBE_LINK_SPEED_5GB_FULL;
492 if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
494 advertised |= IXGBE_LINK_SPEED_2_5GB_FULL;
495 if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
497 advertised |= IXGBE_LINK_SPEED_1GB_FULL;
499 if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
501 advertised |= IXGBE_LINK_SPEED_100_FULL;
503 if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
505 advertised |= IXGBE_LINK_SPEED_10_FULL;
507 if (old == advertised)
509 /* this sets the link speed and restarts auto-neg */
510 while (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
511 usleep_range(1000, 2000);
513 hw->mac.autotry_restart = true;
514 err = hw->mac.ops.setup_link(hw, advertised, true);
516 e_info(probe, "setup link failed with code %d\n", err);
517 hw->mac.ops.setup_link(hw, old, true);
519 clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
521 /* in this case we currently only support 10Gb/FULL */
522 u32 speed = cmd->base.speed;
524 if ((cmd->base.autoneg == AUTONEG_ENABLE) ||
525 (!ethtool_link_ksettings_test_link_mode(cmd, advertising,
527 (speed + cmd->base.duplex != SPEED_10000 + DUPLEX_FULL))
534 static void ixgbe_get_pause_stats(struct net_device *netdev,
535 struct ethtool_pause_stats *stats)
537 struct ixgbe_adapter *adapter = netdev_priv(netdev);
538 struct ixgbe_hw_stats *hwstats = &adapter->stats;
540 stats->tx_pause_frames = hwstats->lxontxc + hwstats->lxofftxc;
541 stats->rx_pause_frames = hwstats->lxonrxc + hwstats->lxoffrxc;
544 static void ixgbe_get_pauseparam(struct net_device *netdev,
545 struct ethtool_pauseparam *pause)
547 struct ixgbe_adapter *adapter = netdev_priv(netdev);
548 struct ixgbe_hw *hw = &adapter->hw;
550 if (ixgbe_device_supports_autoneg_fc(hw) &&
551 !hw->fc.disable_fc_autoneg)
556 if (hw->fc.current_mode == ixgbe_fc_rx_pause) {
558 } else if (hw->fc.current_mode == ixgbe_fc_tx_pause) {
560 } else if (hw->fc.current_mode == ixgbe_fc_full) {
566 static int ixgbe_set_pauseparam(struct net_device *netdev,
567 struct ethtool_pauseparam *pause)
569 struct ixgbe_adapter *adapter = netdev_priv(netdev);
570 struct ixgbe_hw *hw = &adapter->hw;
571 struct ixgbe_fc_info fc = hw->fc;
573 /* 82598 does no support link flow control with DCB enabled */
574 if ((hw->mac.type == ixgbe_mac_82598EB) &&
575 (adapter->flags & IXGBE_FLAG_DCB_ENABLED))
578 /* some devices do not support autoneg of link flow control */
579 if ((pause->autoneg == AUTONEG_ENABLE) &&
580 !ixgbe_device_supports_autoneg_fc(hw))
583 fc.disable_fc_autoneg = (pause->autoneg != AUTONEG_ENABLE);
585 if ((pause->rx_pause && pause->tx_pause) || pause->autoneg)
586 fc.requested_mode = ixgbe_fc_full;
587 else if (pause->rx_pause && !pause->tx_pause)
588 fc.requested_mode = ixgbe_fc_rx_pause;
589 else if (!pause->rx_pause && pause->tx_pause)
590 fc.requested_mode = ixgbe_fc_tx_pause;
592 fc.requested_mode = ixgbe_fc_none;
594 /* if the thing changed then we'll update and use new autoneg */
595 if (memcmp(&fc, &hw->fc, sizeof(struct ixgbe_fc_info))) {
597 if (netif_running(netdev))
598 ixgbe_reinit_locked(adapter);
600 ixgbe_reset(adapter);
606 static u32 ixgbe_get_msglevel(struct net_device *netdev)
608 struct ixgbe_adapter *adapter = netdev_priv(netdev);
609 return adapter->msg_enable;
612 static void ixgbe_set_msglevel(struct net_device *netdev, u32 data)
614 struct ixgbe_adapter *adapter = netdev_priv(netdev);
615 adapter->msg_enable = data;
618 static int ixgbe_get_regs_len(struct net_device *netdev)
620 #define IXGBE_REGS_LEN 1145
621 return IXGBE_REGS_LEN * sizeof(u32);
624 #define IXGBE_GET_STAT(_A_, _R_) _A_->stats._R_
626 static void ixgbe_get_regs(struct net_device *netdev,
627 struct ethtool_regs *regs, void *p)
629 struct ixgbe_adapter *adapter = netdev_priv(netdev);
630 struct ixgbe_hw *hw = &adapter->hw;
634 memset(p, 0, IXGBE_REGS_LEN * sizeof(u32));
636 regs->version = hw->mac.type << 24 | hw->revision_id << 16 |
639 /* General Registers */
640 regs_buff[0] = IXGBE_READ_REG(hw, IXGBE_CTRL);
641 regs_buff[1] = IXGBE_READ_REG(hw, IXGBE_STATUS);
642 regs_buff[2] = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
643 regs_buff[3] = IXGBE_READ_REG(hw, IXGBE_ESDP);
644 regs_buff[4] = IXGBE_READ_REG(hw, IXGBE_EODSDP);
645 regs_buff[5] = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
646 regs_buff[6] = IXGBE_READ_REG(hw, IXGBE_FRTIMER);
647 regs_buff[7] = IXGBE_READ_REG(hw, IXGBE_TCPTIMER);
650 regs_buff[8] = IXGBE_READ_REG(hw, IXGBE_EEC(hw));
651 regs_buff[9] = IXGBE_READ_REG(hw, IXGBE_EERD);
652 regs_buff[10] = IXGBE_READ_REG(hw, IXGBE_FLA(hw));
653 regs_buff[11] = IXGBE_READ_REG(hw, IXGBE_EEMNGCTL);
654 regs_buff[12] = IXGBE_READ_REG(hw, IXGBE_EEMNGDATA);
655 regs_buff[13] = IXGBE_READ_REG(hw, IXGBE_FLMNGCTL);
656 regs_buff[14] = IXGBE_READ_REG(hw, IXGBE_FLMNGDATA);
657 regs_buff[15] = IXGBE_READ_REG(hw, IXGBE_FLMNGCNT);
658 regs_buff[16] = IXGBE_READ_REG(hw, IXGBE_FLOP);
659 regs_buff[17] = IXGBE_READ_REG(hw, IXGBE_GRC(hw));
662 /* don't read EICR because it can clear interrupt causes, instead
663 * read EICS which is a shadow but doesn't clear EICR */
664 regs_buff[18] = IXGBE_READ_REG(hw, IXGBE_EICS);
665 regs_buff[19] = IXGBE_READ_REG(hw, IXGBE_EICS);
666 regs_buff[20] = IXGBE_READ_REG(hw, IXGBE_EIMS);
667 regs_buff[21] = IXGBE_READ_REG(hw, IXGBE_EIMC);
668 regs_buff[22] = IXGBE_READ_REG(hw, IXGBE_EIAC);
669 regs_buff[23] = IXGBE_READ_REG(hw, IXGBE_EIAM);
670 regs_buff[24] = IXGBE_READ_REG(hw, IXGBE_EITR(0));
671 regs_buff[25] = IXGBE_READ_REG(hw, IXGBE_IVAR(0));
672 regs_buff[26] = IXGBE_READ_REG(hw, IXGBE_MSIXT);
673 regs_buff[27] = IXGBE_READ_REG(hw, IXGBE_MSIXPBA);
674 regs_buff[28] = IXGBE_READ_REG(hw, IXGBE_PBACL(0));
675 regs_buff[29] = IXGBE_READ_REG(hw, IXGBE_GPIE);
678 regs_buff[30] = IXGBE_READ_REG(hw, IXGBE_PFCTOP);
679 for (i = 0; i < 4; i++)
680 regs_buff[31 + i] = IXGBE_READ_REG(hw, IXGBE_FCTTV(i));
681 for (i = 0; i < 8; i++) {
682 switch (hw->mac.type) {
683 case ixgbe_mac_82598EB:
684 regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL(i));
685 regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH(i));
687 case ixgbe_mac_82599EB:
690 case ixgbe_mac_X550EM_x:
691 case ixgbe_mac_x550em_a:
692 regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL_82599(i));
693 regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i));
699 regs_buff[51] = IXGBE_READ_REG(hw, IXGBE_FCRTV);
700 regs_buff[52] = IXGBE_READ_REG(hw, IXGBE_TFCS);
703 for (i = 0; i < 64; i++)
704 regs_buff[53 + i] = IXGBE_READ_REG(hw, IXGBE_RDBAL(i));
705 for (i = 0; i < 64; i++)
706 regs_buff[117 + i] = IXGBE_READ_REG(hw, IXGBE_RDBAH(i));
707 for (i = 0; i < 64; i++)
708 regs_buff[181 + i] = IXGBE_READ_REG(hw, IXGBE_RDLEN(i));
709 for (i = 0; i < 64; i++)
710 regs_buff[245 + i] = IXGBE_READ_REG(hw, IXGBE_RDH(i));
711 for (i = 0; i < 64; i++)
712 regs_buff[309 + i] = IXGBE_READ_REG(hw, IXGBE_RDT(i));
713 for (i = 0; i < 64; i++)
714 regs_buff[373 + i] = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
715 for (i = 0; i < 16; i++)
716 regs_buff[437 + i] = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
717 for (i = 0; i < 16; i++)
718 regs_buff[453 + i] = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
719 regs_buff[469] = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
720 for (i = 0; i < 8; i++)
721 regs_buff[470 + i] = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
722 regs_buff[478] = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
723 regs_buff[479] = IXGBE_READ_REG(hw, IXGBE_DROPEN);
726 regs_buff[480] = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
727 regs_buff[481] = IXGBE_READ_REG(hw, IXGBE_RFCTL);
728 for (i = 0; i < 16; i++)
729 regs_buff[482 + i] = IXGBE_READ_REG(hw, IXGBE_RAL(i));
730 for (i = 0; i < 16; i++)
731 regs_buff[498 + i] = IXGBE_READ_REG(hw, IXGBE_RAH(i));
732 regs_buff[514] = IXGBE_READ_REG(hw, IXGBE_PSRTYPE(0));
733 regs_buff[515] = IXGBE_READ_REG(hw, IXGBE_FCTRL);
734 regs_buff[516] = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
735 regs_buff[517] = IXGBE_READ_REG(hw, IXGBE_MCSTCTRL);
736 regs_buff[518] = IXGBE_READ_REG(hw, IXGBE_MRQC);
737 regs_buff[519] = IXGBE_READ_REG(hw, IXGBE_VMD_CTL);
738 for (i = 0; i < 8; i++)
739 regs_buff[520 + i] = IXGBE_READ_REG(hw, IXGBE_IMIR(i));
740 for (i = 0; i < 8; i++)
741 regs_buff[528 + i] = IXGBE_READ_REG(hw, IXGBE_IMIREXT(i));
742 regs_buff[536] = IXGBE_READ_REG(hw, IXGBE_IMIRVP);
745 for (i = 0; i < 32; i++)
746 regs_buff[537 + i] = IXGBE_READ_REG(hw, IXGBE_TDBAL(i));
747 for (i = 0; i < 32; i++)
748 regs_buff[569 + i] = IXGBE_READ_REG(hw, IXGBE_TDBAH(i));
749 for (i = 0; i < 32; i++)
750 regs_buff[601 + i] = IXGBE_READ_REG(hw, IXGBE_TDLEN(i));
751 for (i = 0; i < 32; i++)
752 regs_buff[633 + i] = IXGBE_READ_REG(hw, IXGBE_TDH(i));
753 for (i = 0; i < 32; i++)
754 regs_buff[665 + i] = IXGBE_READ_REG(hw, IXGBE_TDT(i));
755 for (i = 0; i < 32; i++)
756 regs_buff[697 + i] = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
757 for (i = 0; i < 32; i++)
758 regs_buff[729 + i] = IXGBE_READ_REG(hw, IXGBE_TDWBAL(i));
759 for (i = 0; i < 32; i++)
760 regs_buff[761 + i] = IXGBE_READ_REG(hw, IXGBE_TDWBAH(i));
761 regs_buff[793] = IXGBE_READ_REG(hw, IXGBE_DTXCTL);
762 for (i = 0; i < 16; i++)
763 regs_buff[794 + i] = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
764 regs_buff[810] = IXGBE_READ_REG(hw, IXGBE_TIPG);
765 for (i = 0; i < 8; i++)
766 regs_buff[811 + i] = IXGBE_READ_REG(hw, IXGBE_TXPBSIZE(i));
767 regs_buff[819] = IXGBE_READ_REG(hw, IXGBE_MNGTXMAP);
770 regs_buff[820] = IXGBE_READ_REG(hw, IXGBE_WUC);
771 regs_buff[821] = IXGBE_READ_REG(hw, IXGBE_WUFC);
772 regs_buff[822] = IXGBE_READ_REG(hw, IXGBE_WUS);
773 regs_buff[823] = IXGBE_READ_REG(hw, IXGBE_IPAV);
774 regs_buff[824] = IXGBE_READ_REG(hw, IXGBE_IP4AT);
775 regs_buff[825] = IXGBE_READ_REG(hw, IXGBE_IP6AT);
776 regs_buff[826] = IXGBE_READ_REG(hw, IXGBE_WUPL);
777 regs_buff[827] = IXGBE_READ_REG(hw, IXGBE_WUPM);
778 regs_buff[828] = IXGBE_READ_REG(hw, IXGBE_FHFT(0));
781 regs_buff[829] = IXGBE_READ_REG(hw, IXGBE_RMCS); /* same as FCCFG */
782 regs_buff[831] = IXGBE_READ_REG(hw, IXGBE_PDPMCS); /* same as RTTPCS */
784 switch (hw->mac.type) {
785 case ixgbe_mac_82598EB:
786 regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_DPMCS);
787 regs_buff[832] = IXGBE_READ_REG(hw, IXGBE_RUPPBMR);
788 for (i = 0; i < 8; i++)
790 IXGBE_READ_REG(hw, IXGBE_RT2CR(i));
791 for (i = 0; i < 8; i++)
793 IXGBE_READ_REG(hw, IXGBE_RT2SR(i));
794 for (i = 0; i < 8; i++)
796 IXGBE_READ_REG(hw, IXGBE_TDTQ2TCCR(i));
797 for (i = 0; i < 8; i++)
799 IXGBE_READ_REG(hw, IXGBE_TDTQ2TCSR(i));
801 case ixgbe_mac_82599EB:
804 case ixgbe_mac_X550EM_x:
805 case ixgbe_mac_x550em_a:
806 regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
807 regs_buff[832] = IXGBE_READ_REG(hw, IXGBE_RTRPCS);
808 for (i = 0; i < 8; i++)
810 IXGBE_READ_REG(hw, IXGBE_RTRPT4C(i));
811 for (i = 0; i < 8; i++)
813 IXGBE_READ_REG(hw, IXGBE_RTRPT4S(i));
814 for (i = 0; i < 8; i++)
816 IXGBE_READ_REG(hw, IXGBE_RTTDT2C(i));
817 for (i = 0; i < 8; i++)
819 IXGBE_READ_REG(hw, IXGBE_RTTDT2S(i));
825 for (i = 0; i < 8; i++)
827 IXGBE_READ_REG(hw, IXGBE_TDPT2TCCR(i)); /* same as RTTPT2C */
828 for (i = 0; i < 8; i++)
830 IXGBE_READ_REG(hw, IXGBE_TDPT2TCSR(i)); /* same as RTTPT2S */
833 regs_buff[881] = IXGBE_GET_STAT(adapter, crcerrs);
834 regs_buff[882] = IXGBE_GET_STAT(adapter, illerrc);
835 regs_buff[883] = IXGBE_GET_STAT(adapter, errbc);
836 regs_buff[884] = IXGBE_GET_STAT(adapter, mspdc);
837 for (i = 0; i < 8; i++)
838 regs_buff[885 + i] = IXGBE_GET_STAT(adapter, mpc[i]);
839 regs_buff[893] = IXGBE_GET_STAT(adapter, mlfc);
840 regs_buff[894] = IXGBE_GET_STAT(adapter, mrfc);
841 regs_buff[895] = IXGBE_GET_STAT(adapter, rlec);
842 regs_buff[896] = IXGBE_GET_STAT(adapter, lxontxc);
843 regs_buff[897] = IXGBE_GET_STAT(adapter, lxonrxc);
844 regs_buff[898] = IXGBE_GET_STAT(adapter, lxofftxc);
845 regs_buff[899] = IXGBE_GET_STAT(adapter, lxoffrxc);
846 for (i = 0; i < 8; i++)
847 regs_buff[900 + i] = IXGBE_GET_STAT(adapter, pxontxc[i]);
848 for (i = 0; i < 8; i++)
849 regs_buff[908 + i] = IXGBE_GET_STAT(adapter, pxonrxc[i]);
850 for (i = 0; i < 8; i++)
851 regs_buff[916 + i] = IXGBE_GET_STAT(adapter, pxofftxc[i]);
852 for (i = 0; i < 8; i++)
853 regs_buff[924 + i] = IXGBE_GET_STAT(adapter, pxoffrxc[i]);
854 regs_buff[932] = IXGBE_GET_STAT(adapter, prc64);
855 regs_buff[933] = IXGBE_GET_STAT(adapter, prc127);
856 regs_buff[934] = IXGBE_GET_STAT(adapter, prc255);
857 regs_buff[935] = IXGBE_GET_STAT(adapter, prc511);
858 regs_buff[936] = IXGBE_GET_STAT(adapter, prc1023);
859 regs_buff[937] = IXGBE_GET_STAT(adapter, prc1522);
860 regs_buff[938] = IXGBE_GET_STAT(adapter, gprc);
861 regs_buff[939] = IXGBE_GET_STAT(adapter, bprc);
862 regs_buff[940] = IXGBE_GET_STAT(adapter, mprc);
863 regs_buff[941] = IXGBE_GET_STAT(adapter, gptc);
864 regs_buff[942] = (u32)IXGBE_GET_STAT(adapter, gorc);
865 regs_buff[943] = (u32)(IXGBE_GET_STAT(adapter, gorc) >> 32);
866 regs_buff[944] = (u32)IXGBE_GET_STAT(adapter, gotc);
867 regs_buff[945] = (u32)(IXGBE_GET_STAT(adapter, gotc) >> 32);
868 for (i = 0; i < 8; i++)
869 regs_buff[946 + i] = IXGBE_GET_STAT(adapter, rnbc[i]);
870 regs_buff[954] = IXGBE_GET_STAT(adapter, ruc);
871 regs_buff[955] = IXGBE_GET_STAT(adapter, rfc);
872 regs_buff[956] = IXGBE_GET_STAT(adapter, roc);
873 regs_buff[957] = IXGBE_GET_STAT(adapter, rjc);
874 regs_buff[958] = IXGBE_GET_STAT(adapter, mngprc);
875 regs_buff[959] = IXGBE_GET_STAT(adapter, mngpdc);
876 regs_buff[960] = IXGBE_GET_STAT(adapter, mngptc);
877 regs_buff[961] = (u32)IXGBE_GET_STAT(adapter, tor);
878 regs_buff[962] = (u32)(IXGBE_GET_STAT(adapter, tor) >> 32);
879 regs_buff[963] = IXGBE_GET_STAT(adapter, tpr);
880 regs_buff[964] = IXGBE_GET_STAT(adapter, tpt);
881 regs_buff[965] = IXGBE_GET_STAT(adapter, ptc64);
882 regs_buff[966] = IXGBE_GET_STAT(adapter, ptc127);
883 regs_buff[967] = IXGBE_GET_STAT(adapter, ptc255);
884 regs_buff[968] = IXGBE_GET_STAT(adapter, ptc511);
885 regs_buff[969] = IXGBE_GET_STAT(adapter, ptc1023);
886 regs_buff[970] = IXGBE_GET_STAT(adapter, ptc1522);
887 regs_buff[971] = IXGBE_GET_STAT(adapter, mptc);
888 regs_buff[972] = IXGBE_GET_STAT(adapter, bptc);
889 regs_buff[973] = IXGBE_GET_STAT(adapter, xec);
890 for (i = 0; i < 16; i++)
891 regs_buff[974 + i] = IXGBE_GET_STAT(adapter, qprc[i]);
892 for (i = 0; i < 16; i++)
893 regs_buff[990 + i] = IXGBE_GET_STAT(adapter, qptc[i]);
894 for (i = 0; i < 16; i++)
895 regs_buff[1006 + i] = IXGBE_GET_STAT(adapter, qbrc[i]);
896 for (i = 0; i < 16; i++)
897 regs_buff[1022 + i] = IXGBE_GET_STAT(adapter, qbtc[i]);
900 regs_buff[1038] = IXGBE_READ_REG(hw, IXGBE_PCS1GCFIG);
901 regs_buff[1039] = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL);
902 regs_buff[1040] = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
903 regs_buff[1041] = IXGBE_READ_REG(hw, IXGBE_PCS1GDBG0);
904 regs_buff[1042] = IXGBE_READ_REG(hw, IXGBE_PCS1GDBG1);
905 regs_buff[1043] = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
906 regs_buff[1044] = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
907 regs_buff[1045] = IXGBE_READ_REG(hw, IXGBE_PCS1GANNP);
908 regs_buff[1046] = IXGBE_READ_REG(hw, IXGBE_PCS1GANLPNP);
909 regs_buff[1047] = IXGBE_READ_REG(hw, IXGBE_HLREG0);
910 regs_buff[1048] = IXGBE_READ_REG(hw, IXGBE_HLREG1);
911 regs_buff[1049] = IXGBE_READ_REG(hw, IXGBE_PAP);
912 regs_buff[1050] = IXGBE_READ_REG(hw, IXGBE_MACA);
913 regs_buff[1051] = IXGBE_READ_REG(hw, IXGBE_APAE);
914 regs_buff[1052] = IXGBE_READ_REG(hw, IXGBE_ARD);
915 regs_buff[1053] = IXGBE_READ_REG(hw, IXGBE_AIS);
916 regs_buff[1054] = IXGBE_READ_REG(hw, IXGBE_MSCA);
917 regs_buff[1055] = IXGBE_READ_REG(hw, IXGBE_MSRWD);
918 regs_buff[1056] = IXGBE_READ_REG(hw, IXGBE_MLADD);
919 regs_buff[1057] = IXGBE_READ_REG(hw, IXGBE_MHADD);
920 regs_buff[1058] = IXGBE_READ_REG(hw, IXGBE_TREG);
921 regs_buff[1059] = IXGBE_READ_REG(hw, IXGBE_PCSS1);
922 regs_buff[1060] = IXGBE_READ_REG(hw, IXGBE_PCSS2);
923 regs_buff[1061] = IXGBE_READ_REG(hw, IXGBE_XPCSS);
924 regs_buff[1062] = IXGBE_READ_REG(hw, IXGBE_SERDESC);
925 regs_buff[1063] = IXGBE_READ_REG(hw, IXGBE_MACS);
926 regs_buff[1064] = IXGBE_READ_REG(hw, IXGBE_AUTOC);
927 regs_buff[1065] = IXGBE_READ_REG(hw, IXGBE_LINKS);
928 regs_buff[1066] = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
929 regs_buff[1067] = IXGBE_READ_REG(hw, IXGBE_AUTOC3);
930 regs_buff[1068] = IXGBE_READ_REG(hw, IXGBE_ANLP1);
931 regs_buff[1069] = IXGBE_READ_REG(hw, IXGBE_ANLP2);
932 regs_buff[1070] = IXGBE_READ_REG(hw, IXGBE_ATLASCTL);
935 regs_buff[1071] = IXGBE_READ_REG(hw, IXGBE_RDSTATCTL);
936 for (i = 0; i < 8; i++)
937 regs_buff[1072 + i] = IXGBE_READ_REG(hw, IXGBE_RDSTAT(i));
938 regs_buff[1080] = IXGBE_READ_REG(hw, IXGBE_RDHMPN);
939 for (i = 0; i < 4; i++)
940 regs_buff[1081 + i] = IXGBE_READ_REG(hw, IXGBE_RIC_DW(i));
941 regs_buff[1085] = IXGBE_READ_REG(hw, IXGBE_RDPROBE);
942 regs_buff[1086] = IXGBE_READ_REG(hw, IXGBE_TDSTATCTL);
943 for (i = 0; i < 8; i++)
944 regs_buff[1087 + i] = IXGBE_READ_REG(hw, IXGBE_TDSTAT(i));
945 regs_buff[1095] = IXGBE_READ_REG(hw, IXGBE_TDHMPN);
946 for (i = 0; i < 4; i++)
947 regs_buff[1096 + i] = IXGBE_READ_REG(hw, IXGBE_TIC_DW(i));
948 regs_buff[1100] = IXGBE_READ_REG(hw, IXGBE_TDPROBE);
949 regs_buff[1101] = IXGBE_READ_REG(hw, IXGBE_TXBUFCTRL);
950 for (i = 0; i < 4; i++)
951 regs_buff[1102 + i] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA(i));
952 regs_buff[1106] = IXGBE_READ_REG(hw, IXGBE_RXBUFCTRL);
953 for (i = 0; i < 4; i++)
954 regs_buff[1107 + i] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA(i));
955 for (i = 0; i < 8; i++)
956 regs_buff[1111 + i] = IXGBE_READ_REG(hw, IXGBE_PCIE_DIAG(i));
957 regs_buff[1119] = IXGBE_READ_REG(hw, IXGBE_RFVAL);
958 regs_buff[1120] = IXGBE_READ_REG(hw, IXGBE_MDFTC1);
959 regs_buff[1121] = IXGBE_READ_REG(hw, IXGBE_MDFTC2);
960 regs_buff[1122] = IXGBE_READ_REG(hw, IXGBE_MDFTFIFO1);
961 regs_buff[1123] = IXGBE_READ_REG(hw, IXGBE_MDFTFIFO2);
962 regs_buff[1124] = IXGBE_READ_REG(hw, IXGBE_MDFTS);
963 regs_buff[1125] = IXGBE_READ_REG(hw, IXGBE_PCIEECCCTL);
964 regs_buff[1126] = IXGBE_READ_REG(hw, IXGBE_PBTXECC);
965 regs_buff[1127] = IXGBE_READ_REG(hw, IXGBE_PBRXECC);
967 /* 82599 X540 specific registers */
968 regs_buff[1128] = IXGBE_READ_REG(hw, IXGBE_MFLCN);
970 /* 82599 X540 specific DCB registers */
971 regs_buff[1129] = IXGBE_READ_REG(hw, IXGBE_RTRUP2TC);
972 regs_buff[1130] = IXGBE_READ_REG(hw, IXGBE_RTTUP2TC);
973 for (i = 0; i < 4; i++)
974 regs_buff[1131 + i] = IXGBE_READ_REG(hw, IXGBE_TXLLQ(i));
975 regs_buff[1135] = IXGBE_READ_REG(hw, IXGBE_RTTBCNRM);
976 /* same as RTTQCNRM */
977 regs_buff[1136] = IXGBE_READ_REG(hw, IXGBE_RTTBCNRD);
978 /* same as RTTQCNRR */
980 /* X540 specific DCB registers */
981 regs_buff[1137] = IXGBE_READ_REG(hw, IXGBE_RTTQCNCR);
982 regs_buff[1138] = IXGBE_READ_REG(hw, IXGBE_RTTQCNTG);
984 /* Security config registers */
985 regs_buff[1139] = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
986 regs_buff[1140] = IXGBE_READ_REG(hw, IXGBE_SECTXSTAT);
987 regs_buff[1141] = IXGBE_READ_REG(hw, IXGBE_SECTXBUFFAF);
988 regs_buff[1142] = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
989 regs_buff[1143] = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
990 regs_buff[1144] = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT);
993 static int ixgbe_get_eeprom_len(struct net_device *netdev)
995 struct ixgbe_adapter *adapter = netdev_priv(netdev);
996 return adapter->hw.eeprom.word_size * 2;
999 static int ixgbe_get_eeprom(struct net_device *netdev,
1000 struct ethtool_eeprom *eeprom, u8 *bytes)
1002 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1003 struct ixgbe_hw *hw = &adapter->hw;
1005 int first_word, last_word, eeprom_len;
1009 if (eeprom->len == 0)
1012 eeprom->magic = hw->vendor_id | (hw->device_id << 16);
1014 first_word = eeprom->offset >> 1;
1015 last_word = (eeprom->offset + eeprom->len - 1) >> 1;
1016 eeprom_len = last_word - first_word + 1;
1018 eeprom_buff = kmalloc_array(eeprom_len, sizeof(u16), GFP_KERNEL);
1022 ret_val = hw->eeprom.ops.read_buffer(hw, first_word, eeprom_len,
1025 /* Device's eeprom is always little-endian, word addressable */
1026 for (i = 0; i < eeprom_len; i++)
1027 le16_to_cpus(&eeprom_buff[i]);
1029 memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), eeprom->len);
1035 static int ixgbe_set_eeprom(struct net_device *netdev,
1036 struct ethtool_eeprom *eeprom, u8 *bytes)
1038 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1039 struct ixgbe_hw *hw = &adapter->hw;
1042 int max_len, first_word, last_word, ret_val = 0;
1045 if (eeprom->len == 0)
1048 if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16)))
1051 max_len = hw->eeprom.word_size * 2;
1053 first_word = eeprom->offset >> 1;
1054 last_word = (eeprom->offset + eeprom->len - 1) >> 1;
1055 eeprom_buff = kmalloc(max_len, GFP_KERNEL);
1061 if (eeprom->offset & 1) {
1063 * need read/modify/write of first changed EEPROM word
1064 * only the second byte of the word is being modified
1066 ret_val = hw->eeprom.ops.read(hw, first_word, &eeprom_buff[0]);
1072 if ((eeprom->offset + eeprom->len) & 1) {
1074 * need read/modify/write of last changed EEPROM word
1075 * only the first byte of the word is being modified
1077 ret_val = hw->eeprom.ops.read(hw, last_word,
1078 &eeprom_buff[last_word - first_word]);
1083 /* Device's eeprom is always little-endian, word addressable */
1084 for (i = 0; i < last_word - first_word + 1; i++)
1085 le16_to_cpus(&eeprom_buff[i]);
1087 memcpy(ptr, bytes, eeprom->len);
1089 for (i = 0; i < last_word - first_word + 1; i++)
1090 cpu_to_le16s(&eeprom_buff[i]);
1092 ret_val = hw->eeprom.ops.write_buffer(hw, first_word,
1093 last_word - first_word + 1,
1096 /* Update the checksum */
1098 hw->eeprom.ops.update_checksum(hw);
1105 static void ixgbe_get_drvinfo(struct net_device *netdev,
1106 struct ethtool_drvinfo *drvinfo)
1108 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1110 strlcpy(drvinfo->driver, ixgbe_driver_name, sizeof(drvinfo->driver));
1112 strlcpy(drvinfo->fw_version, adapter->eeprom_id,
1113 sizeof(drvinfo->fw_version));
1115 strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
1116 sizeof(drvinfo->bus_info));
1118 drvinfo->n_priv_flags = IXGBE_PRIV_FLAGS_STR_LEN;
1121 static void ixgbe_get_ringparam(struct net_device *netdev,
1122 struct ethtool_ringparam *ring)
1124 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1125 struct ixgbe_ring *tx_ring = adapter->tx_ring[0];
1126 struct ixgbe_ring *rx_ring = adapter->rx_ring[0];
1128 ring->rx_max_pending = IXGBE_MAX_RXD;
1129 ring->tx_max_pending = IXGBE_MAX_TXD;
1130 ring->rx_pending = rx_ring->count;
1131 ring->tx_pending = tx_ring->count;
1134 static int ixgbe_set_ringparam(struct net_device *netdev,
1135 struct ethtool_ringparam *ring)
1137 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1138 struct ixgbe_ring *temp_ring;
1140 u32 new_rx_count, new_tx_count;
1142 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
1145 new_tx_count = clamp_t(u32, ring->tx_pending,
1146 IXGBE_MIN_TXD, IXGBE_MAX_TXD);
1147 new_tx_count = ALIGN(new_tx_count, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE);
1149 new_rx_count = clamp_t(u32, ring->rx_pending,
1150 IXGBE_MIN_RXD, IXGBE_MAX_RXD);
1151 new_rx_count = ALIGN(new_rx_count, IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE);
1153 if ((new_tx_count == adapter->tx_ring_count) &&
1154 (new_rx_count == adapter->rx_ring_count)) {
1159 while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
1160 usleep_range(1000, 2000);
1162 if (!netif_running(adapter->netdev)) {
1163 for (i = 0; i < adapter->num_tx_queues; i++)
1164 adapter->tx_ring[i]->count = new_tx_count;
1165 for (i = 0; i < adapter->num_xdp_queues; i++)
1166 adapter->xdp_ring[i]->count = new_tx_count;
1167 for (i = 0; i < adapter->num_rx_queues; i++)
1168 adapter->rx_ring[i]->count = new_rx_count;
1169 adapter->tx_ring_count = new_tx_count;
1170 adapter->xdp_ring_count = new_tx_count;
1171 adapter->rx_ring_count = new_rx_count;
1175 /* allocate temporary buffer to store rings in */
1176 i = max_t(int, adapter->num_tx_queues + adapter->num_xdp_queues,
1177 adapter->num_rx_queues);
1178 temp_ring = vmalloc(array_size(i, sizeof(struct ixgbe_ring)));
1185 ixgbe_down(adapter);
1188 * Setup new Tx resources and free the old Tx resources in that order.
1189 * We can then assign the new resources to the rings via a memcpy.
1190 * The advantage to this approach is that we are guaranteed to still
1191 * have resources even in the case of an allocation failure.
1193 if (new_tx_count != adapter->tx_ring_count) {
1194 for (i = 0; i < adapter->num_tx_queues; i++) {
1195 memcpy(&temp_ring[i], adapter->tx_ring[i],
1196 sizeof(struct ixgbe_ring));
1198 temp_ring[i].count = new_tx_count;
1199 err = ixgbe_setup_tx_resources(&temp_ring[i]);
1203 ixgbe_free_tx_resources(&temp_ring[i]);
1209 for (j = 0; j < adapter->num_xdp_queues; j++, i++) {
1210 memcpy(&temp_ring[i], adapter->xdp_ring[j],
1211 sizeof(struct ixgbe_ring));
1213 temp_ring[i].count = new_tx_count;
1214 err = ixgbe_setup_tx_resources(&temp_ring[i]);
1218 ixgbe_free_tx_resources(&temp_ring[i]);
1224 for (i = 0; i < adapter->num_tx_queues; i++) {
1225 ixgbe_free_tx_resources(adapter->tx_ring[i]);
1227 memcpy(adapter->tx_ring[i], &temp_ring[i],
1228 sizeof(struct ixgbe_ring));
1230 for (j = 0; j < adapter->num_xdp_queues; j++, i++) {
1231 ixgbe_free_tx_resources(adapter->xdp_ring[j]);
1233 memcpy(adapter->xdp_ring[j], &temp_ring[i],
1234 sizeof(struct ixgbe_ring));
1237 adapter->tx_ring_count = new_tx_count;
1240 /* Repeat the process for the Rx rings if needed */
1241 if (new_rx_count != adapter->rx_ring_count) {
1242 for (i = 0; i < adapter->num_rx_queues; i++) {
1243 memcpy(&temp_ring[i], adapter->rx_ring[i],
1244 sizeof(struct ixgbe_ring));
1246 /* Clear copied XDP RX-queue info */
1247 memset(&temp_ring[i].xdp_rxq, 0,
1248 sizeof(temp_ring[i].xdp_rxq));
1250 temp_ring[i].count = new_rx_count;
1251 err = ixgbe_setup_rx_resources(adapter, &temp_ring[i]);
1255 ixgbe_free_rx_resources(&temp_ring[i]);
1262 for (i = 0; i < adapter->num_rx_queues; i++) {
1263 ixgbe_free_rx_resources(adapter->rx_ring[i]);
1265 memcpy(adapter->rx_ring[i], &temp_ring[i],
1266 sizeof(struct ixgbe_ring));
1269 adapter->rx_ring_count = new_rx_count;
1276 clear_bit(__IXGBE_RESETTING, &adapter->state);
1280 static int ixgbe_get_sset_count(struct net_device *netdev, int sset)
1284 return IXGBE_TEST_LEN;
1286 return IXGBE_STATS_LEN;
1287 case ETH_SS_PRIV_FLAGS:
1288 return IXGBE_PRIV_FLAGS_STR_LEN;
1294 static void ixgbe_get_ethtool_stats(struct net_device *netdev,
1295 struct ethtool_stats *stats, u64 *data)
1297 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1298 struct rtnl_link_stats64 temp;
1299 const struct rtnl_link_stats64 *net_stats;
1301 struct ixgbe_ring *ring;
1305 ixgbe_update_stats(adapter);
1306 net_stats = dev_get_stats(netdev, &temp);
1307 for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
1308 switch (ixgbe_gstrings_stats[i].type) {
1310 p = (char *) net_stats +
1311 ixgbe_gstrings_stats[i].stat_offset;
1314 p = (char *) adapter +
1315 ixgbe_gstrings_stats[i].stat_offset;
1322 data[i] = (ixgbe_gstrings_stats[i].sizeof_stat ==
1323 sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
1325 for (j = 0; j < netdev->num_tx_queues; j++) {
1326 ring = adapter->tx_ring[j];
1335 start = u64_stats_fetch_begin_irq(&ring->syncp);
1336 data[i] = ring->stats.packets;
1337 data[i+1] = ring->stats.bytes;
1338 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
1341 for (j = 0; j < IXGBE_NUM_RX_QUEUES; j++) {
1342 ring = adapter->rx_ring[j];
1351 start = u64_stats_fetch_begin_irq(&ring->syncp);
1352 data[i] = ring->stats.packets;
1353 data[i+1] = ring->stats.bytes;
1354 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
1358 for (j = 0; j < IXGBE_MAX_PACKET_BUFFERS; j++) {
1359 data[i++] = adapter->stats.pxontxc[j];
1360 data[i++] = adapter->stats.pxofftxc[j];
1362 for (j = 0; j < IXGBE_MAX_PACKET_BUFFERS; j++) {
1363 data[i++] = adapter->stats.pxonrxc[j];
1364 data[i++] = adapter->stats.pxoffrxc[j];
1368 static void ixgbe_get_strings(struct net_device *netdev, u32 stringset,
1371 char *p = (char *)data;
1374 switch (stringset) {
1376 for (i = 0; i < IXGBE_TEST_LEN; i++) {
1377 memcpy(data, ixgbe_gstrings_test[i], ETH_GSTRING_LEN);
1378 data += ETH_GSTRING_LEN;
1382 for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
1383 memcpy(p, ixgbe_gstrings_stats[i].stat_string,
1385 p += ETH_GSTRING_LEN;
1387 for (i = 0; i < netdev->num_tx_queues; i++) {
1388 sprintf(p, "tx_queue_%u_packets", i);
1389 p += ETH_GSTRING_LEN;
1390 sprintf(p, "tx_queue_%u_bytes", i);
1391 p += ETH_GSTRING_LEN;
1393 for (i = 0; i < IXGBE_NUM_RX_QUEUES; i++) {
1394 sprintf(p, "rx_queue_%u_packets", i);
1395 p += ETH_GSTRING_LEN;
1396 sprintf(p, "rx_queue_%u_bytes", i);
1397 p += ETH_GSTRING_LEN;
1399 for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) {
1400 sprintf(p, "tx_pb_%u_pxon", i);
1401 p += ETH_GSTRING_LEN;
1402 sprintf(p, "tx_pb_%u_pxoff", i);
1403 p += ETH_GSTRING_LEN;
1405 for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) {
1406 sprintf(p, "rx_pb_%u_pxon", i);
1407 p += ETH_GSTRING_LEN;
1408 sprintf(p, "rx_pb_%u_pxoff", i);
1409 p += ETH_GSTRING_LEN;
1411 /* BUG_ON(p - data != IXGBE_STATS_LEN * ETH_GSTRING_LEN); */
1413 case ETH_SS_PRIV_FLAGS:
1414 memcpy(data, ixgbe_priv_flags_strings,
1415 IXGBE_PRIV_FLAGS_STR_LEN * ETH_GSTRING_LEN);
1419 static int ixgbe_link_test(struct ixgbe_adapter *adapter, u64 *data)
1421 struct ixgbe_hw *hw = &adapter->hw;
1425 if (ixgbe_removed(hw->hw_addr)) {
1431 hw->mac.ops.check_link(hw, &link_speed, &link_up, true);
1439 /* ethtool register test data */
1440 struct ixgbe_reg_test {
1448 /* In the hardware, registers are laid out either singly, in arrays
1449 * spaced 0x40 bytes apart, or in contiguous tables. We assume
1450 * most tests take place on arrays or single registers (handled
1451 * as a single-element array) and special-case the tables.
1452 * Table tests are always pattern tests.
1454 * We also make provision for some required setup steps by specifying
1455 * registers to be written without any read-back testing.
1458 #define PATTERN_TEST 1
1459 #define SET_READ_TEST 2
1460 #define WRITE_NO_TEST 3
1461 #define TABLE32_TEST 4
1462 #define TABLE64_TEST_LO 5
1463 #define TABLE64_TEST_HI 6
1465 /* default 82599 register test */
1466 static const struct ixgbe_reg_test reg_test_82599[] = {
1467 { IXGBE_FCRTL_82599(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1468 { IXGBE_FCRTH_82599(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1469 { IXGBE_PFCTOP, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1470 { IXGBE_VLNCTRL, 1, PATTERN_TEST, 0x00000000, 0x00000000 },
1471 { IXGBE_RDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFF80 },
1472 { IXGBE_RDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1473 { IXGBE_RDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
1474 { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, IXGBE_RXDCTL_ENABLE },
1475 { IXGBE_RDT(0), 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
1476 { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, 0 },
1477 { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1478 { IXGBE_FCTTV(0), 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1479 { IXGBE_TDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
1480 { IXGBE_TDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1481 { IXGBE_TDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFF80 },
1482 { IXGBE_RXCTRL, 1, SET_READ_TEST, 0x00000001, 0x00000001 },
1483 { IXGBE_RAL(0), 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF },
1484 { IXGBE_RAL(0), 16, TABLE64_TEST_HI, 0x8001FFFF, 0x800CFFFF },
1485 { IXGBE_MTA(0), 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1489 /* default 82598 register test */
1490 static const struct ixgbe_reg_test reg_test_82598[] = {
1491 { IXGBE_FCRTL(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1492 { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1493 { IXGBE_PFCTOP, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1494 { IXGBE_VLNCTRL, 1, PATTERN_TEST, 0x00000000, 0x00000000 },
1495 { IXGBE_RDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
1496 { IXGBE_RDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1497 { IXGBE_RDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
1498 /* Enable all four RX queues before testing. */
1499 { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, IXGBE_RXDCTL_ENABLE },
1500 /* RDH is read-only for 82598, only test RDT. */
1501 { IXGBE_RDT(0), 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
1502 { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, 0 },
1503 { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1504 { IXGBE_FCTTV(0), 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1505 { IXGBE_TIPG, 1, PATTERN_TEST, 0x000000FF, 0x000000FF },
1506 { IXGBE_TDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
1507 { IXGBE_TDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1508 { IXGBE_TDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
1509 { IXGBE_RXCTRL, 1, SET_READ_TEST, 0x00000003, 0x00000003 },
1510 { IXGBE_DTXCTL, 1, SET_READ_TEST, 0x00000005, 0x00000005 },
1511 { IXGBE_RAL(0), 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF },
1512 { IXGBE_RAL(0), 16, TABLE64_TEST_HI, 0x800CFFFF, 0x800CFFFF },
1513 { IXGBE_MTA(0), 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1517 static bool reg_pattern_test(struct ixgbe_adapter *adapter, u64 *data, int reg,
1518 u32 mask, u32 write)
1520 u32 pat, val, before;
1521 static const u32 test_pattern[] = {
1522 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF};
1524 if (ixgbe_removed(adapter->hw.hw_addr)) {
1528 for (pat = 0; pat < ARRAY_SIZE(test_pattern); pat++) {
1529 before = ixgbe_read_reg(&adapter->hw, reg);
1530 ixgbe_write_reg(&adapter->hw, reg, test_pattern[pat] & write);
1531 val = ixgbe_read_reg(&adapter->hw, reg);
1532 if (val != (test_pattern[pat] & write & mask)) {
1533 e_err(drv, "pattern test reg %04X failed: got 0x%08X expected 0x%08X\n",
1534 reg, val, (test_pattern[pat] & write & mask));
1536 ixgbe_write_reg(&adapter->hw, reg, before);
1539 ixgbe_write_reg(&adapter->hw, reg, before);
1544 static bool reg_set_and_check(struct ixgbe_adapter *adapter, u64 *data, int reg,
1545 u32 mask, u32 write)
1549 if (ixgbe_removed(adapter->hw.hw_addr)) {
1553 before = ixgbe_read_reg(&adapter->hw, reg);
1554 ixgbe_write_reg(&adapter->hw, reg, write & mask);
1555 val = ixgbe_read_reg(&adapter->hw, reg);
1556 if ((write & mask) != (val & mask)) {
1557 e_err(drv, "set/check reg %04X test failed: got 0x%08X expected 0x%08X\n",
1558 reg, (val & mask), (write & mask));
1560 ixgbe_write_reg(&adapter->hw, reg, before);
1563 ixgbe_write_reg(&adapter->hw, reg, before);
1567 static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data)
1569 const struct ixgbe_reg_test *test;
1570 u32 value, before, after;
1573 if (ixgbe_removed(adapter->hw.hw_addr)) {
1574 e_err(drv, "Adapter removed - register test blocked\n");
1578 switch (adapter->hw.mac.type) {
1579 case ixgbe_mac_82598EB:
1580 toggle = 0x7FFFF3FF;
1581 test = reg_test_82598;
1583 case ixgbe_mac_82599EB:
1584 case ixgbe_mac_X540:
1585 case ixgbe_mac_X550:
1586 case ixgbe_mac_X550EM_x:
1587 case ixgbe_mac_x550em_a:
1588 toggle = 0x7FFFF30F;
1589 test = reg_test_82599;
1597 * Because the status register is such a special case,
1598 * we handle it separately from the rest of the register
1599 * tests. Some bits are read-only, some toggle, and some
1600 * are writeable on newer MACs.
1602 before = ixgbe_read_reg(&adapter->hw, IXGBE_STATUS);
1603 value = (ixgbe_read_reg(&adapter->hw, IXGBE_STATUS) & toggle);
1604 ixgbe_write_reg(&adapter->hw, IXGBE_STATUS, toggle);
1605 after = ixgbe_read_reg(&adapter->hw, IXGBE_STATUS) & toggle;
1606 if (value != after) {
1607 e_err(drv, "failed STATUS register test got: 0x%08X expected: 0x%08X\n",
1612 /* restore previous status */
1613 ixgbe_write_reg(&adapter->hw, IXGBE_STATUS, before);
1616 * Perform the remainder of the register test, looping through
1617 * the test table until we either fail or reach the null entry.
1620 for (i = 0; i < test->array_len; i++) {
1623 switch (test->test_type) {
1625 b = reg_pattern_test(adapter, data,
1626 test->reg + (i * 0x40),
1631 b = reg_set_and_check(adapter, data,
1632 test->reg + (i * 0x40),
1637 ixgbe_write_reg(&adapter->hw,
1638 test->reg + (i * 0x40),
1642 b = reg_pattern_test(adapter, data,
1643 test->reg + (i * 4),
1647 case TABLE64_TEST_LO:
1648 b = reg_pattern_test(adapter, data,
1649 test->reg + (i * 8),
1653 case TABLE64_TEST_HI:
1654 b = reg_pattern_test(adapter, data,
1655 (test->reg + 4) + (i * 8),
1670 static int ixgbe_eeprom_test(struct ixgbe_adapter *adapter, u64 *data)
1672 struct ixgbe_hw *hw = &adapter->hw;
1673 if (hw->eeprom.ops.validate_checksum(hw, NULL))
1680 static irqreturn_t ixgbe_test_intr(int irq, void *data)
1682 struct net_device *netdev = (struct net_device *) data;
1683 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1685 adapter->test_icr |= IXGBE_READ_REG(&adapter->hw, IXGBE_EICR);
1690 static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data)
1692 struct net_device *netdev = adapter->netdev;
1693 u32 mask, i = 0, shared_int = true;
1694 u32 irq = adapter->pdev->irq;
1698 /* Hook up test interrupt handler just for this test */
1699 if (adapter->msix_entries) {
1700 /* NOTE: we don't test MSI-X interrupts here, yet */
1702 } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
1704 if (request_irq(irq, ixgbe_test_intr, 0, netdev->name,
1709 } else if (!request_irq(irq, ixgbe_test_intr, IRQF_PROBE_SHARED,
1710 netdev->name, netdev)) {
1712 } else if (request_irq(irq, ixgbe_test_intr, IRQF_SHARED,
1713 netdev->name, netdev)) {
1717 e_info(hw, "testing %s interrupt\n", shared_int ?
1718 "shared" : "unshared");
1720 /* Disable all the interrupts */
1721 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF);
1722 IXGBE_WRITE_FLUSH(&adapter->hw);
1723 usleep_range(10000, 20000);
1725 /* Test each interrupt */
1726 for (; i < 10; i++) {
1727 /* Interrupt to test */
1732 * Disable the interrupts to be reported in
1733 * the cause register and then force the same
1734 * interrupt and see if one gets posted. If
1735 * an interrupt was posted to the bus, the
1738 adapter->test_icr = 0;
1739 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC,
1740 ~mask & 0x00007FFF);
1741 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS,
1742 ~mask & 0x00007FFF);
1743 IXGBE_WRITE_FLUSH(&adapter->hw);
1744 usleep_range(10000, 20000);
1746 if (adapter->test_icr & mask) {
1753 * Enable the interrupt to be reported in the cause
1754 * register and then force the same interrupt and see
1755 * if one gets posted. If an interrupt was not posted
1756 * to the bus, the test failed.
1758 adapter->test_icr = 0;
1759 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
1760 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
1761 IXGBE_WRITE_FLUSH(&adapter->hw);
1762 usleep_range(10000, 20000);
1764 if (!(adapter->test_icr & mask)) {
1771 * Disable the other interrupts to be reported in
1772 * the cause register and then force the other
1773 * interrupts and see if any get posted. If
1774 * an interrupt was posted to the bus, the
1777 adapter->test_icr = 0;
1778 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC,
1779 ~mask & 0x00007FFF);
1780 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS,
1781 ~mask & 0x00007FFF);
1782 IXGBE_WRITE_FLUSH(&adapter->hw);
1783 usleep_range(10000, 20000);
1785 if (adapter->test_icr) {
1792 /* Disable all the interrupts */
1793 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF);
1794 IXGBE_WRITE_FLUSH(&adapter->hw);
1795 usleep_range(10000, 20000);
1797 /* Unhook test interrupt handler */
1798 free_irq(irq, netdev);
1803 static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter)
1805 /* Shut down the DMA engines now so they can be reinitialized later,
1806 * since the test rings and normally used rings should overlap on
1807 * queue 0 we can just use the standard disable Rx/Tx calls and they
1808 * will take care of disabling the test rings for us.
1812 ixgbe_disable_rx(adapter);
1815 ixgbe_disable_tx(adapter);
1817 ixgbe_reset(adapter);
1819 ixgbe_free_tx_resources(&adapter->test_tx_ring);
1820 ixgbe_free_rx_resources(&adapter->test_rx_ring);
1823 static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
1825 struct ixgbe_ring *tx_ring = &adapter->test_tx_ring;
1826 struct ixgbe_ring *rx_ring = &adapter->test_rx_ring;
1827 struct ixgbe_hw *hw = &adapter->hw;
1832 /* Setup Tx descriptor ring and Tx buffers */
1833 tx_ring->count = IXGBE_DEFAULT_TXD;
1834 tx_ring->queue_index = 0;
1835 tx_ring->dev = &adapter->pdev->dev;
1836 tx_ring->netdev = adapter->netdev;
1837 tx_ring->reg_idx = adapter->tx_ring[0]->reg_idx;
1839 err = ixgbe_setup_tx_resources(tx_ring);
1843 switch (adapter->hw.mac.type) {
1844 case ixgbe_mac_82599EB:
1845 case ixgbe_mac_X540:
1846 case ixgbe_mac_X550:
1847 case ixgbe_mac_X550EM_x:
1848 case ixgbe_mac_x550em_a:
1849 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_DMATXCTL);
1850 reg_data |= IXGBE_DMATXCTL_TE;
1851 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DMATXCTL, reg_data);
1857 ixgbe_configure_tx_ring(adapter, tx_ring);
1859 /* Setup Rx Descriptor ring and Rx buffers */
1860 rx_ring->count = IXGBE_DEFAULT_RXD;
1861 rx_ring->queue_index = 0;
1862 rx_ring->dev = &adapter->pdev->dev;
1863 rx_ring->netdev = adapter->netdev;
1864 rx_ring->reg_idx = adapter->rx_ring[0]->reg_idx;
1866 err = ixgbe_setup_rx_resources(adapter, rx_ring);
1872 hw->mac.ops.disable_rx(hw);
1874 ixgbe_configure_rx_ring(adapter, rx_ring);
1876 rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXCTRL);
1877 rctl |= IXGBE_RXCTRL_DMBYPS;
1878 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL, rctl);
1880 hw->mac.ops.enable_rx(hw);
1885 ixgbe_free_desc_rings(adapter);
1889 static int ixgbe_setup_loopback_test(struct ixgbe_adapter *adapter)
1891 struct ixgbe_hw *hw = &adapter->hw;
1895 /* Setup MAC loopback */
1896 reg_data = IXGBE_READ_REG(hw, IXGBE_HLREG0);
1897 reg_data |= IXGBE_HLREG0_LPBK;
1898 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg_data);
1900 reg_data = IXGBE_READ_REG(hw, IXGBE_FCTRL);
1901 reg_data |= IXGBE_FCTRL_BAM | IXGBE_FCTRL_SBP | IXGBE_FCTRL_MPE;
1902 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg_data);
1904 /* X540 and X550 needs to set the MACC.FLU bit to force link up */
1905 switch (adapter->hw.mac.type) {
1906 case ixgbe_mac_X540:
1907 case ixgbe_mac_X550:
1908 case ixgbe_mac_X550EM_x:
1909 case ixgbe_mac_x550em_a:
1910 reg_data = IXGBE_READ_REG(hw, IXGBE_MACC);
1911 reg_data |= IXGBE_MACC_FLU;
1912 IXGBE_WRITE_REG(hw, IXGBE_MACC, reg_data);
1915 if (hw->mac.orig_autoc) {
1916 reg_data = hw->mac.orig_autoc | IXGBE_AUTOC_FLU;
1917 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_data);
1922 IXGBE_WRITE_FLUSH(hw);
1923 usleep_range(10000, 20000);
1925 /* Disable Atlas Tx lanes; re-enabled in reset path */
1926 if (hw->mac.type == ixgbe_mac_82598EB) {
1929 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &atlas);
1930 atlas |= IXGBE_ATLAS_PDN_TX_REG_EN;
1931 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, atlas);
1933 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, &atlas);
1934 atlas |= IXGBE_ATLAS_PDN_TX_10G_QL_ALL;
1935 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, atlas);
1937 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, &atlas);
1938 atlas |= IXGBE_ATLAS_PDN_TX_1G_QL_ALL;
1939 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, atlas);
1941 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, &atlas);
1942 atlas |= IXGBE_ATLAS_PDN_TX_AN_QL_ALL;
1943 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, atlas);
1949 static void ixgbe_loopback_cleanup(struct ixgbe_adapter *adapter)
1953 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_HLREG0);
1954 reg_data &= ~IXGBE_HLREG0_LPBK;
1955 IXGBE_WRITE_REG(&adapter->hw, IXGBE_HLREG0, reg_data);
1958 static void ixgbe_create_lbtest_frame(struct sk_buff *skb,
1959 unsigned int frame_size)
1961 memset(skb->data, 0xFF, frame_size);
1963 memset(&skb->data[frame_size], 0xAA, frame_size / 2 - 1);
1964 skb->data[frame_size + 10] = 0xBE;
1965 skb->data[frame_size + 12] = 0xAF;
1968 static bool ixgbe_check_lbtest_frame(struct ixgbe_rx_buffer *rx_buffer,
1969 unsigned int frame_size)
1971 unsigned char *data;
1976 data = kmap(rx_buffer->page) + rx_buffer->page_offset;
1978 if (data[3] != 0xFF ||
1979 data[frame_size + 10] != 0xBE ||
1980 data[frame_size + 12] != 0xAF)
1983 kunmap(rx_buffer->page);
1988 static u16 ixgbe_clean_test_rings(struct ixgbe_ring *rx_ring,
1989 struct ixgbe_ring *tx_ring,
1992 union ixgbe_adv_rx_desc *rx_desc;
1993 u16 rx_ntc, tx_ntc, count = 0;
1995 /* initialize next to clean and descriptor values */
1996 rx_ntc = rx_ring->next_to_clean;
1997 tx_ntc = tx_ring->next_to_clean;
1998 rx_desc = IXGBE_RX_DESC(rx_ring, rx_ntc);
2000 while (tx_ntc != tx_ring->next_to_use) {
2001 union ixgbe_adv_tx_desc *tx_desc;
2002 struct ixgbe_tx_buffer *tx_buffer;
2004 tx_desc = IXGBE_TX_DESC(tx_ring, tx_ntc);
2006 /* if DD is not set transmit has not completed */
2007 if (!(tx_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
2010 /* unmap buffer on Tx side */
2011 tx_buffer = &tx_ring->tx_buffer_info[tx_ntc];
2013 /* Free all the Tx ring sk_buffs */
2014 dev_kfree_skb_any(tx_buffer->skb);
2016 /* unmap skb header data */
2017 dma_unmap_single(tx_ring->dev,
2018 dma_unmap_addr(tx_buffer, dma),
2019 dma_unmap_len(tx_buffer, len),
2021 dma_unmap_len_set(tx_buffer, len, 0);
2023 /* increment Tx next to clean counter */
2025 if (tx_ntc == tx_ring->count)
2029 while (rx_desc->wb.upper.length) {
2030 struct ixgbe_rx_buffer *rx_buffer;
2032 /* check Rx buffer */
2033 rx_buffer = &rx_ring->rx_buffer_info[rx_ntc];
2035 /* sync Rx buffer for CPU read */
2036 dma_sync_single_for_cpu(rx_ring->dev,
2038 ixgbe_rx_bufsz(rx_ring),
2041 /* verify contents of skb */
2042 if (ixgbe_check_lbtest_frame(rx_buffer, size))
2047 /* sync Rx buffer for device write */
2048 dma_sync_single_for_device(rx_ring->dev,
2050 ixgbe_rx_bufsz(rx_ring),
2053 /* increment Rx next to clean counter */
2055 if (rx_ntc == rx_ring->count)
2058 /* fetch next descriptor */
2059 rx_desc = IXGBE_RX_DESC(rx_ring, rx_ntc);
2062 netdev_tx_reset_queue(txring_txq(tx_ring));
2064 /* re-map buffers to ring, store next to clean values */
2065 ixgbe_alloc_rx_buffers(rx_ring, count);
2066 rx_ring->next_to_clean = rx_ntc;
2067 tx_ring->next_to_clean = tx_ntc;
2072 static int ixgbe_run_loopback_test(struct ixgbe_adapter *adapter)
2074 struct ixgbe_ring *tx_ring = &adapter->test_tx_ring;
2075 struct ixgbe_ring *rx_ring = &adapter->test_rx_ring;
2076 int i, j, lc, good_cnt, ret_val = 0;
2077 unsigned int size = 1024;
2078 netdev_tx_t tx_ret_val;
2079 struct sk_buff *skb;
2080 u32 flags_orig = adapter->flags;
2082 /* DCB can modify the frames on Tx */
2083 adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
2085 /* allocate test skb */
2086 skb = alloc_skb(size, GFP_KERNEL);
2090 /* place data into test skb */
2091 ixgbe_create_lbtest_frame(skb, size);
2095 * Calculate the loop count based on the largest descriptor ring
2096 * The idea is to wrap the largest ring a number of times using 64
2097 * send/receive pairs during each loop
2100 if (rx_ring->count <= tx_ring->count)
2101 lc = ((tx_ring->count / 64) * 2) + 1;
2103 lc = ((rx_ring->count / 64) * 2) + 1;
2105 for (j = 0; j <= lc; j++) {
2106 /* reset count of good packets */
2109 /* place 64 packets on the transmit queue*/
2110 for (i = 0; i < 64; i++) {
2112 tx_ret_val = ixgbe_xmit_frame_ring(skb,
2115 if (tx_ret_val == NETDEV_TX_OK)
2119 if (good_cnt != 64) {
2124 /* allow 200 milliseconds for packets to go from Tx to Rx */
2127 good_cnt = ixgbe_clean_test_rings(rx_ring, tx_ring, size);
2128 if (good_cnt != 64) {
2134 /* free the original skb */
2136 adapter->flags = flags_orig;
2141 static int ixgbe_loopback_test(struct ixgbe_adapter *adapter, u64 *data)
2143 *data = ixgbe_setup_desc_rings(adapter);
2146 *data = ixgbe_setup_loopback_test(adapter);
2149 *data = ixgbe_run_loopback_test(adapter);
2150 ixgbe_loopback_cleanup(adapter);
2153 ixgbe_free_desc_rings(adapter);
2158 static void ixgbe_diag_test(struct net_device *netdev,
2159 struct ethtool_test *eth_test, u64 *data)
2161 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2162 bool if_running = netif_running(netdev);
2164 if (ixgbe_removed(adapter->hw.hw_addr)) {
2165 e_err(hw, "Adapter removed - test blocked\n");
2171 eth_test->flags |= ETH_TEST_FL_FAILED;
2174 set_bit(__IXGBE_TESTING, &adapter->state);
2175 if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
2176 struct ixgbe_hw *hw = &adapter->hw;
2178 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
2180 for (i = 0; i < adapter->num_vfs; i++) {
2181 if (adapter->vfinfo[i].clear_to_send) {
2182 netdev_warn(netdev, "offline diagnostic is not supported when VFs are present\n");
2188 eth_test->flags |= ETH_TEST_FL_FAILED;
2189 clear_bit(__IXGBE_TESTING,
2197 e_info(hw, "offline testing starting\n");
2199 /* Link test performed before hardware reset so autoneg doesn't
2200 * interfere with test result
2202 if (ixgbe_link_test(adapter, &data[4]))
2203 eth_test->flags |= ETH_TEST_FL_FAILED;
2206 /* indicate we're in test mode */
2207 ixgbe_close(netdev);
2209 ixgbe_reset(adapter);
2211 e_info(hw, "register testing starting\n");
2212 if (ixgbe_reg_test(adapter, &data[0]))
2213 eth_test->flags |= ETH_TEST_FL_FAILED;
2215 ixgbe_reset(adapter);
2216 e_info(hw, "eeprom testing starting\n");
2217 if (ixgbe_eeprom_test(adapter, &data[1]))
2218 eth_test->flags |= ETH_TEST_FL_FAILED;
2220 ixgbe_reset(adapter);
2221 e_info(hw, "interrupt testing starting\n");
2222 if (ixgbe_intr_test(adapter, &data[2]))
2223 eth_test->flags |= ETH_TEST_FL_FAILED;
2225 /* If SRIOV or VMDq is enabled then skip MAC
2226 * loopback diagnostic. */
2227 if (adapter->flags & (IXGBE_FLAG_SRIOV_ENABLED |
2228 IXGBE_FLAG_VMDQ_ENABLED)) {
2229 e_info(hw, "Skip MAC loopback diagnostic in VT mode\n");
2234 ixgbe_reset(adapter);
2235 e_info(hw, "loopback testing starting\n");
2236 if (ixgbe_loopback_test(adapter, &data[3]))
2237 eth_test->flags |= ETH_TEST_FL_FAILED;
2240 ixgbe_reset(adapter);
2242 /* clear testing bit and return adapter to previous state */
2243 clear_bit(__IXGBE_TESTING, &adapter->state);
2246 else if (hw->mac.ops.disable_tx_laser)
2247 hw->mac.ops.disable_tx_laser(hw);
2249 e_info(hw, "online testing starting\n");
2252 if (ixgbe_link_test(adapter, &data[4]))
2253 eth_test->flags |= ETH_TEST_FL_FAILED;
2255 /* Offline tests aren't run; pass by default */
2261 clear_bit(__IXGBE_TESTING, &adapter->state);
2265 static int ixgbe_wol_exclusion(struct ixgbe_adapter *adapter,
2266 struct ethtool_wolinfo *wol)
2268 struct ixgbe_hw *hw = &adapter->hw;
2271 /* WOL not supported for all devices */
2272 if (!ixgbe_wol_supported(adapter, hw->device_id,
2273 hw->subsystem_device_id)) {
2281 static void ixgbe_get_wol(struct net_device *netdev,
2282 struct ethtool_wolinfo *wol)
2284 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2286 wol->supported = WAKE_UCAST | WAKE_MCAST |
2287 WAKE_BCAST | WAKE_MAGIC;
2290 if (ixgbe_wol_exclusion(adapter, wol) ||
2291 !device_can_wakeup(&adapter->pdev->dev))
2294 if (adapter->wol & IXGBE_WUFC_EX)
2295 wol->wolopts |= WAKE_UCAST;
2296 if (adapter->wol & IXGBE_WUFC_MC)
2297 wol->wolopts |= WAKE_MCAST;
2298 if (adapter->wol & IXGBE_WUFC_BC)
2299 wol->wolopts |= WAKE_BCAST;
2300 if (adapter->wol & IXGBE_WUFC_MAG)
2301 wol->wolopts |= WAKE_MAGIC;
2304 static int ixgbe_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
2306 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2308 if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE |
2312 if (ixgbe_wol_exclusion(adapter, wol))
2313 return wol->wolopts ? -EOPNOTSUPP : 0;
2317 if (wol->wolopts & WAKE_UCAST)
2318 adapter->wol |= IXGBE_WUFC_EX;
2319 if (wol->wolopts & WAKE_MCAST)
2320 adapter->wol |= IXGBE_WUFC_MC;
2321 if (wol->wolopts & WAKE_BCAST)
2322 adapter->wol |= IXGBE_WUFC_BC;
2323 if (wol->wolopts & WAKE_MAGIC)
2324 adapter->wol |= IXGBE_WUFC_MAG;
2326 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
2331 static int ixgbe_nway_reset(struct net_device *netdev)
2333 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2335 if (netif_running(netdev))
2336 ixgbe_reinit_locked(adapter);
2341 static int ixgbe_set_phys_id(struct net_device *netdev,
2342 enum ethtool_phys_id_state state)
2344 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2345 struct ixgbe_hw *hw = &adapter->hw;
2347 if (!hw->mac.ops.led_on || !hw->mac.ops.led_off)
2351 case ETHTOOL_ID_ACTIVE:
2352 adapter->led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
2356 hw->mac.ops.led_on(hw, hw->mac.led_link_act);
2359 case ETHTOOL_ID_OFF:
2360 hw->mac.ops.led_off(hw, hw->mac.led_link_act);
2363 case ETHTOOL_ID_INACTIVE:
2364 /* Restore LED settings */
2365 IXGBE_WRITE_REG(&adapter->hw, IXGBE_LEDCTL, adapter->led_reg);
2372 static int ixgbe_get_coalesce(struct net_device *netdev,
2373 struct ethtool_coalesce *ec)
2375 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2377 /* only valid if in constant ITR mode */
2378 if (adapter->rx_itr_setting <= 1)
2379 ec->rx_coalesce_usecs = adapter->rx_itr_setting;
2381 ec->rx_coalesce_usecs = adapter->rx_itr_setting >> 2;
2383 /* if in mixed tx/rx queues per vector mode, report only rx settings */
2384 if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count)
2387 /* only valid if in constant ITR mode */
2388 if (adapter->tx_itr_setting <= 1)
2389 ec->tx_coalesce_usecs = adapter->tx_itr_setting;
2391 ec->tx_coalesce_usecs = adapter->tx_itr_setting >> 2;
2397 * this function must be called before setting the new value of
2400 static bool ixgbe_update_rsc(struct ixgbe_adapter *adapter)
2402 struct net_device *netdev = adapter->netdev;
2404 /* nothing to do if LRO or RSC are not enabled */
2405 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) ||
2406 !(netdev->features & NETIF_F_LRO))
2409 /* check the feature flag value and enable RSC if necessary */
2410 if (adapter->rx_itr_setting == 1 ||
2411 adapter->rx_itr_setting > IXGBE_MIN_RSC_ITR) {
2412 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) {
2413 adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
2414 e_info(probe, "rx-usecs value high enough to re-enable RSC\n");
2417 /* if interrupt rate is too high then disable RSC */
2418 } else if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
2419 adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED;
2420 e_info(probe, "rx-usecs set too low, disabling RSC\n");
2426 static int ixgbe_set_coalesce(struct net_device *netdev,
2427 struct ethtool_coalesce *ec)
2429 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2430 struct ixgbe_q_vector *q_vector;
2432 u16 tx_itr_param, rx_itr_param, tx_itr_prev;
2433 bool need_reset = false;
2435 if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count) {
2436 /* reject Tx specific changes in case of mixed RxTx vectors */
2437 if (ec->tx_coalesce_usecs)
2439 tx_itr_prev = adapter->rx_itr_setting;
2441 tx_itr_prev = adapter->tx_itr_setting;
2444 if ((ec->rx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)) ||
2445 (ec->tx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)))
2448 if (ec->rx_coalesce_usecs > 1)
2449 adapter->rx_itr_setting = ec->rx_coalesce_usecs << 2;
2451 adapter->rx_itr_setting = ec->rx_coalesce_usecs;
2453 if (adapter->rx_itr_setting == 1)
2454 rx_itr_param = IXGBE_20K_ITR;
2456 rx_itr_param = adapter->rx_itr_setting;
2458 if (ec->tx_coalesce_usecs > 1)
2459 adapter->tx_itr_setting = ec->tx_coalesce_usecs << 2;
2461 adapter->tx_itr_setting = ec->tx_coalesce_usecs;
2463 if (adapter->tx_itr_setting == 1)
2464 tx_itr_param = IXGBE_12K_ITR;
2466 tx_itr_param = adapter->tx_itr_setting;
2469 if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count)
2470 adapter->tx_itr_setting = adapter->rx_itr_setting;
2472 /* detect ITR changes that require update of TXDCTL.WTHRESH */
2473 if ((adapter->tx_itr_setting != 1) &&
2474 (adapter->tx_itr_setting < IXGBE_100K_ITR)) {
2475 if ((tx_itr_prev == 1) ||
2476 (tx_itr_prev >= IXGBE_100K_ITR))
2479 if ((tx_itr_prev != 1) &&
2480 (tx_itr_prev < IXGBE_100K_ITR))
2484 /* check the old value and enable RSC if necessary */
2485 need_reset |= ixgbe_update_rsc(adapter);
2487 for (i = 0; i < adapter->num_q_vectors; i++) {
2488 q_vector = adapter->q_vector[i];
2489 if (q_vector->tx.count && !q_vector->rx.count)
2491 q_vector->itr = tx_itr_param;
2493 /* rx only or mixed */
2494 q_vector->itr = rx_itr_param;
2495 ixgbe_write_eitr(q_vector);
2499 * do reset here at the end to make sure EITR==0 case is handled
2500 * correctly w.r.t stopping tx, and changing TXDCTL.WTHRESH settings
2501 * also locks in RSC enable/disable which requires reset
2504 ixgbe_do_reset(netdev);
2509 static int ixgbe_get_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
2510 struct ethtool_rxnfc *cmd)
2512 union ixgbe_atr_input *mask = &adapter->fdir_mask;
2513 struct ethtool_rx_flow_spec *fsp =
2514 (struct ethtool_rx_flow_spec *)&cmd->fs;
2515 struct hlist_node *node2;
2516 struct ixgbe_fdir_filter *rule = NULL;
2518 /* report total rule count */
2519 cmd->data = (1024 << adapter->fdir_pballoc) - 2;
2521 hlist_for_each_entry_safe(rule, node2,
2522 &adapter->fdir_filter_list, fdir_node) {
2523 if (fsp->location <= rule->sw_idx)
2527 if (!rule || fsp->location != rule->sw_idx)
2530 /* fill out the flow spec entry */
2532 /* set flow type field */
2533 switch (rule->filter.formatted.flow_type) {
2534 case IXGBE_ATR_FLOW_TYPE_TCPV4:
2535 fsp->flow_type = TCP_V4_FLOW;
2537 case IXGBE_ATR_FLOW_TYPE_UDPV4:
2538 fsp->flow_type = UDP_V4_FLOW;
2540 case IXGBE_ATR_FLOW_TYPE_SCTPV4:
2541 fsp->flow_type = SCTP_V4_FLOW;
2543 case IXGBE_ATR_FLOW_TYPE_IPV4:
2544 fsp->flow_type = IP_USER_FLOW;
2545 fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
2546 fsp->h_u.usr_ip4_spec.proto = 0;
2547 fsp->m_u.usr_ip4_spec.proto = 0;
2553 fsp->h_u.tcp_ip4_spec.psrc = rule->filter.formatted.src_port;
2554 fsp->m_u.tcp_ip4_spec.psrc = mask->formatted.src_port;
2555 fsp->h_u.tcp_ip4_spec.pdst = rule->filter.formatted.dst_port;
2556 fsp->m_u.tcp_ip4_spec.pdst = mask->formatted.dst_port;
2557 fsp->h_u.tcp_ip4_spec.ip4src = rule->filter.formatted.src_ip[0];
2558 fsp->m_u.tcp_ip4_spec.ip4src = mask->formatted.src_ip[0];
2559 fsp->h_u.tcp_ip4_spec.ip4dst = rule->filter.formatted.dst_ip[0];
2560 fsp->m_u.tcp_ip4_spec.ip4dst = mask->formatted.dst_ip[0];
2561 fsp->h_ext.vlan_tci = rule->filter.formatted.vlan_id;
2562 fsp->m_ext.vlan_tci = mask->formatted.vlan_id;
2563 fsp->h_ext.vlan_etype = rule->filter.formatted.flex_bytes;
2564 fsp->m_ext.vlan_etype = mask->formatted.flex_bytes;
2565 fsp->h_ext.data[1] = htonl(rule->filter.formatted.vm_pool);
2566 fsp->m_ext.data[1] = htonl(mask->formatted.vm_pool);
2567 fsp->flow_type |= FLOW_EXT;
2570 if (rule->action == IXGBE_FDIR_DROP_QUEUE)
2571 fsp->ring_cookie = RX_CLS_FLOW_DISC;
2573 fsp->ring_cookie = rule->action;
2578 static int ixgbe_get_ethtool_fdir_all(struct ixgbe_adapter *adapter,
2579 struct ethtool_rxnfc *cmd,
2582 struct hlist_node *node2;
2583 struct ixgbe_fdir_filter *rule;
2586 /* report total rule count */
2587 cmd->data = (1024 << adapter->fdir_pballoc) - 2;
2589 hlist_for_each_entry_safe(rule, node2,
2590 &adapter->fdir_filter_list, fdir_node) {
2591 if (cnt == cmd->rule_cnt)
2593 rule_locs[cnt] = rule->sw_idx;
2597 cmd->rule_cnt = cnt;
2602 static int ixgbe_get_rss_hash_opts(struct ixgbe_adapter *adapter,
2603 struct ethtool_rxnfc *cmd)
2607 /* Report default options for RSS on ixgbe */
2608 switch (cmd->flow_type) {
2610 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
2613 if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP)
2614 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
2617 case AH_ESP_V4_FLOW:
2621 cmd->data |= RXH_IP_SRC | RXH_IP_DST;
2624 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
2627 if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP)
2628 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
2631 case AH_ESP_V6_FLOW:
2635 cmd->data |= RXH_IP_SRC | RXH_IP_DST;
2644 static int ixgbe_rss_indir_tbl_max(struct ixgbe_adapter *adapter)
2646 if (adapter->hw.mac.type < ixgbe_mac_X550)
2652 static int ixgbe_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
2655 struct ixgbe_adapter *adapter = netdev_priv(dev);
2656 int ret = -EOPNOTSUPP;
2659 case ETHTOOL_GRXRINGS:
2660 cmd->data = min_t(int, adapter->num_rx_queues,
2661 ixgbe_rss_indir_tbl_max(adapter));
2664 case ETHTOOL_GRXCLSRLCNT:
2665 cmd->rule_cnt = adapter->fdir_filter_count;
2668 case ETHTOOL_GRXCLSRULE:
2669 ret = ixgbe_get_ethtool_fdir_entry(adapter, cmd);
2671 case ETHTOOL_GRXCLSRLALL:
2672 ret = ixgbe_get_ethtool_fdir_all(adapter, cmd, rule_locs);
2675 ret = ixgbe_get_rss_hash_opts(adapter, cmd);
2684 int ixgbe_update_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
2685 struct ixgbe_fdir_filter *input,
2688 struct ixgbe_hw *hw = &adapter->hw;
2689 struct hlist_node *node2;
2690 struct ixgbe_fdir_filter *rule, *parent;
2696 hlist_for_each_entry_safe(rule, node2,
2697 &adapter->fdir_filter_list, fdir_node) {
2698 /* hash found, or no matching entry */
2699 if (rule->sw_idx >= sw_idx)
2704 /* if there is an old rule occupying our place remove it */
2705 if (rule && (rule->sw_idx == sw_idx)) {
2706 if (!input || (rule->filter.formatted.bkt_hash !=
2707 input->filter.formatted.bkt_hash)) {
2708 err = ixgbe_fdir_erase_perfect_filter_82599(hw,
2713 hlist_del(&rule->fdir_node);
2715 adapter->fdir_filter_count--;
2719 * If no input this was a delete, err should be 0 if a rule was
2720 * successfully found and removed from the list else -EINVAL
2725 /* initialize node and set software index */
2726 INIT_HLIST_NODE(&input->fdir_node);
2728 /* add filter to the list */
2730 hlist_add_behind(&input->fdir_node, &parent->fdir_node);
2732 hlist_add_head(&input->fdir_node,
2733 &adapter->fdir_filter_list);
2736 adapter->fdir_filter_count++;
2741 static int ixgbe_flowspec_to_flow_type(struct ethtool_rx_flow_spec *fsp,
2744 switch (fsp->flow_type & ~FLOW_EXT) {
2746 *flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4;
2749 *flow_type = IXGBE_ATR_FLOW_TYPE_UDPV4;
2752 *flow_type = IXGBE_ATR_FLOW_TYPE_SCTPV4;
2755 switch (fsp->h_u.usr_ip4_spec.proto) {
2757 *flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4;
2760 *flow_type = IXGBE_ATR_FLOW_TYPE_UDPV4;
2763 *flow_type = IXGBE_ATR_FLOW_TYPE_SCTPV4;
2766 if (!fsp->m_u.usr_ip4_spec.proto) {
2767 *flow_type = IXGBE_ATR_FLOW_TYPE_IPV4;
2782 static int ixgbe_add_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
2783 struct ethtool_rxnfc *cmd)
2785 struct ethtool_rx_flow_spec *fsp =
2786 (struct ethtool_rx_flow_spec *)&cmd->fs;
2787 struct ixgbe_hw *hw = &adapter->hw;
2788 struct ixgbe_fdir_filter *input;
2789 union ixgbe_atr_input mask;
2793 if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
2796 /* ring_cookie is a masked into a set of queues and ixgbe pools or
2797 * we use the drop index.
2799 if (fsp->ring_cookie == RX_CLS_FLOW_DISC) {
2800 queue = IXGBE_FDIR_DROP_QUEUE;
2802 u32 ring = ethtool_get_flow_spec_ring(fsp->ring_cookie);
2803 u8 vf = ethtool_get_flow_spec_ring_vf(fsp->ring_cookie);
2805 if (!vf && (ring >= adapter->num_rx_queues))
2808 ((vf > adapter->num_vfs) ||
2809 ring >= adapter->num_rx_queues_per_pool))
2812 /* Map the ring onto the absolute queue index */
2814 queue = adapter->rx_ring[ring]->reg_idx;
2817 adapter->num_rx_queues_per_pool) + ring;
2820 /* Don't allow indexes to exist outside of available space */
2821 if (fsp->location >= ((1024 << adapter->fdir_pballoc) - 2)) {
2822 e_err(drv, "Location out of range\n");
2826 input = kzalloc(sizeof(*input), GFP_ATOMIC);
2830 memset(&mask, 0, sizeof(union ixgbe_atr_input));
2833 input->sw_idx = fsp->location;
2835 /* record flow type */
2836 if (!ixgbe_flowspec_to_flow_type(fsp,
2837 &input->filter.formatted.flow_type)) {
2838 e_err(drv, "Unrecognized flow type\n");
2842 mask.formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK |
2843 IXGBE_ATR_L4TYPE_MASK;
2845 if (input->filter.formatted.flow_type == IXGBE_ATR_FLOW_TYPE_IPV4)
2846 mask.formatted.flow_type &= IXGBE_ATR_L4TYPE_IPV6_MASK;
2848 /* Copy input into formatted structures */
2849 input->filter.formatted.src_ip[0] = fsp->h_u.tcp_ip4_spec.ip4src;
2850 mask.formatted.src_ip[0] = fsp->m_u.tcp_ip4_spec.ip4src;
2851 input->filter.formatted.dst_ip[0] = fsp->h_u.tcp_ip4_spec.ip4dst;
2852 mask.formatted.dst_ip[0] = fsp->m_u.tcp_ip4_spec.ip4dst;
2853 input->filter.formatted.src_port = fsp->h_u.tcp_ip4_spec.psrc;
2854 mask.formatted.src_port = fsp->m_u.tcp_ip4_spec.psrc;
2855 input->filter.formatted.dst_port = fsp->h_u.tcp_ip4_spec.pdst;
2856 mask.formatted.dst_port = fsp->m_u.tcp_ip4_spec.pdst;
2858 if (fsp->flow_type & FLOW_EXT) {
2859 input->filter.formatted.vm_pool =
2860 (unsigned char)ntohl(fsp->h_ext.data[1]);
2861 mask.formatted.vm_pool =
2862 (unsigned char)ntohl(fsp->m_ext.data[1]);
2863 input->filter.formatted.vlan_id = fsp->h_ext.vlan_tci;
2864 mask.formatted.vlan_id = fsp->m_ext.vlan_tci;
2865 input->filter.formatted.flex_bytes =
2866 fsp->h_ext.vlan_etype;
2867 mask.formatted.flex_bytes = fsp->m_ext.vlan_etype;
2870 /* determine if we need to drop or route the packet */
2871 if (fsp->ring_cookie == RX_CLS_FLOW_DISC)
2872 input->action = IXGBE_FDIR_DROP_QUEUE;
2874 input->action = fsp->ring_cookie;
2876 spin_lock(&adapter->fdir_perfect_lock);
2878 if (hlist_empty(&adapter->fdir_filter_list)) {
2879 /* save mask and program input mask into HW */
2880 memcpy(&adapter->fdir_mask, &mask, sizeof(mask));
2881 err = ixgbe_fdir_set_input_mask_82599(hw, &mask);
2883 e_err(drv, "Error writing mask\n");
2884 goto err_out_w_lock;
2886 } else if (memcmp(&adapter->fdir_mask, &mask, sizeof(mask))) {
2887 e_err(drv, "Only one mask supported per port\n");
2888 goto err_out_w_lock;
2891 /* apply mask and compute/store hash */
2892 ixgbe_atr_compute_perfect_hash_82599(&input->filter, &mask);
2894 /* program filters to filter memory */
2895 err = ixgbe_fdir_write_perfect_filter_82599(hw,
2896 &input->filter, input->sw_idx, queue);
2898 goto err_out_w_lock;
2900 ixgbe_update_ethtool_fdir_entry(adapter, input, input->sw_idx);
2902 spin_unlock(&adapter->fdir_perfect_lock);
2906 spin_unlock(&adapter->fdir_perfect_lock);
2912 static int ixgbe_del_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
2913 struct ethtool_rxnfc *cmd)
2915 struct ethtool_rx_flow_spec *fsp =
2916 (struct ethtool_rx_flow_spec *)&cmd->fs;
2919 spin_lock(&adapter->fdir_perfect_lock);
2920 err = ixgbe_update_ethtool_fdir_entry(adapter, NULL, fsp->location);
2921 spin_unlock(&adapter->fdir_perfect_lock);
2926 #define UDP_RSS_FLAGS (IXGBE_FLAG2_RSS_FIELD_IPV4_UDP | \
2927 IXGBE_FLAG2_RSS_FIELD_IPV6_UDP)
2928 static int ixgbe_set_rss_hash_opt(struct ixgbe_adapter *adapter,
2929 struct ethtool_rxnfc *nfc)
2931 u32 flags2 = adapter->flags2;
2934 * RSS does not support anything other than hashing
2935 * to queues on src and dst IPs and ports
2937 if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
2938 RXH_L4_B_0_1 | RXH_L4_B_2_3))
2941 switch (nfc->flow_type) {
2944 if (!(nfc->data & RXH_IP_SRC) ||
2945 !(nfc->data & RXH_IP_DST) ||
2946 !(nfc->data & RXH_L4_B_0_1) ||
2947 !(nfc->data & RXH_L4_B_2_3))
2951 if (!(nfc->data & RXH_IP_SRC) ||
2952 !(nfc->data & RXH_IP_DST))
2954 switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
2956 flags2 &= ~IXGBE_FLAG2_RSS_FIELD_IPV4_UDP;
2958 case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
2959 flags2 |= IXGBE_FLAG2_RSS_FIELD_IPV4_UDP;
2966 if (!(nfc->data & RXH_IP_SRC) ||
2967 !(nfc->data & RXH_IP_DST))
2969 switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
2971 flags2 &= ~IXGBE_FLAG2_RSS_FIELD_IPV6_UDP;
2973 case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
2974 flags2 |= IXGBE_FLAG2_RSS_FIELD_IPV6_UDP;
2980 case AH_ESP_V4_FLOW:
2984 case AH_ESP_V6_FLOW:
2988 if (!(nfc->data & RXH_IP_SRC) ||
2989 !(nfc->data & RXH_IP_DST) ||
2990 (nfc->data & RXH_L4_B_0_1) ||
2991 (nfc->data & RXH_L4_B_2_3))
2998 /* if we changed something we need to update flags */
2999 if (flags2 != adapter->flags2) {
3000 struct ixgbe_hw *hw = &adapter->hw;
3002 unsigned int pf_pool = adapter->num_vfs;
3004 if ((hw->mac.type >= ixgbe_mac_X550) &&
3005 (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
3006 mrqc = IXGBE_READ_REG(hw, IXGBE_PFVFMRQC(pf_pool));
3008 mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC);
3010 if ((flags2 & UDP_RSS_FLAGS) &&
3011 !(adapter->flags2 & UDP_RSS_FLAGS))
3012 e_warn(drv, "enabling UDP RSS: fragmented packets may arrive out of order to the stack above\n");
3014 adapter->flags2 = flags2;
3016 /* Perform hash on these packet types */
3017 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4
3018 | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
3019 | IXGBE_MRQC_RSS_FIELD_IPV6
3020 | IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
3022 mrqc &= ~(IXGBE_MRQC_RSS_FIELD_IPV4_UDP |
3023 IXGBE_MRQC_RSS_FIELD_IPV6_UDP);
3025 if (flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP)
3026 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
3028 if (flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP)
3029 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
3031 if ((hw->mac.type >= ixgbe_mac_X550) &&
3032 (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
3033 IXGBE_WRITE_REG(hw, IXGBE_PFVFMRQC(pf_pool), mrqc);
3035 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
3041 static int ixgbe_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
3043 struct ixgbe_adapter *adapter = netdev_priv(dev);
3044 int ret = -EOPNOTSUPP;
3047 case ETHTOOL_SRXCLSRLINS:
3048 ret = ixgbe_add_ethtool_fdir_entry(adapter, cmd);
3050 case ETHTOOL_SRXCLSRLDEL:
3051 ret = ixgbe_del_ethtool_fdir_entry(adapter, cmd);
3054 ret = ixgbe_set_rss_hash_opt(adapter, cmd);
3063 static u32 ixgbe_get_rxfh_key_size(struct net_device *netdev)
3065 return IXGBE_RSS_KEY_SIZE;
3068 static u32 ixgbe_rss_indir_size(struct net_device *netdev)
3070 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3072 return ixgbe_rss_indir_tbl_entries(adapter);
3075 static void ixgbe_get_reta(struct ixgbe_adapter *adapter, u32 *indir)
3077 int i, reta_size = ixgbe_rss_indir_tbl_entries(adapter);
3078 u16 rss_m = adapter->ring_feature[RING_F_RSS].mask;
3080 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
3081 rss_m = adapter->ring_feature[RING_F_RSS].indices - 1;
3083 for (i = 0; i < reta_size; i++)
3084 indir[i] = adapter->rss_indir_tbl[i] & rss_m;
3087 static int ixgbe_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
3090 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3093 *hfunc = ETH_RSS_HASH_TOP;
3096 ixgbe_get_reta(adapter, indir);
3099 memcpy(key, adapter->rss_key, ixgbe_get_rxfh_key_size(netdev));
3104 static int ixgbe_set_rxfh(struct net_device *netdev, const u32 *indir,
3105 const u8 *key, const u8 hfunc)
3107 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3109 u32 reta_entries = ixgbe_rss_indir_tbl_entries(adapter);
3111 if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
3114 /* Fill out the redirection table */
3116 int max_queues = min_t(int, adapter->num_rx_queues,
3117 ixgbe_rss_indir_tbl_max(adapter));
3119 /*Allow at least 2 queues w/ SR-IOV.*/
3120 if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) &&
3124 /* Verify user input. */
3125 for (i = 0; i < reta_entries; i++)
3126 if (indir[i] >= max_queues)
3129 for (i = 0; i < reta_entries; i++)
3130 adapter->rss_indir_tbl[i] = indir[i];
3132 ixgbe_store_reta(adapter);
3135 /* Fill out the rss hash key */
3137 memcpy(adapter->rss_key, key, ixgbe_get_rxfh_key_size(netdev));
3138 ixgbe_store_key(adapter);
3144 static int ixgbe_get_ts_info(struct net_device *dev,
3145 struct ethtool_ts_info *info)
3147 struct ixgbe_adapter *adapter = netdev_priv(dev);
3149 /* we always support timestamping disabled */
3150 info->rx_filters = BIT(HWTSTAMP_FILTER_NONE);
3152 switch (adapter->hw.mac.type) {
3153 case ixgbe_mac_X550:
3154 case ixgbe_mac_X550EM_x:
3155 case ixgbe_mac_x550em_a:
3156 info->rx_filters |= BIT(HWTSTAMP_FILTER_ALL);
3158 case ixgbe_mac_X540:
3159 case ixgbe_mac_82599EB:
3161 BIT(HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
3162 BIT(HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
3163 BIT(HWTSTAMP_FILTER_PTP_V2_EVENT);
3166 return ethtool_op_get_ts_info(dev, info);
3169 info->so_timestamping =
3170 SOF_TIMESTAMPING_TX_SOFTWARE |
3171 SOF_TIMESTAMPING_RX_SOFTWARE |
3172 SOF_TIMESTAMPING_SOFTWARE |
3173 SOF_TIMESTAMPING_TX_HARDWARE |
3174 SOF_TIMESTAMPING_RX_HARDWARE |
3175 SOF_TIMESTAMPING_RAW_HARDWARE;
3177 if (adapter->ptp_clock)
3178 info->phc_index = ptp_clock_index(adapter->ptp_clock);
3180 info->phc_index = -1;
3183 BIT(HWTSTAMP_TX_OFF) |
3184 BIT(HWTSTAMP_TX_ON);
3189 static unsigned int ixgbe_max_channels(struct ixgbe_adapter *adapter)
3191 unsigned int max_combined;
3192 u8 tcs = adapter->hw_tcs;
3194 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
3195 /* We only support one q_vector without MSI-X */
3197 } else if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
3198 /* Limit value based on the queue mask */
3199 max_combined = adapter->ring_feature[RING_F_RSS].mask + 1;
3200 } else if (tcs > 1) {
3201 /* For DCB report channels per traffic class */
3202 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
3203 /* 8 TC w/ 4 queues per TC */
3205 } else if (tcs > 4) {
3206 /* 8 TC w/ 8 queues per TC */
3209 /* 4 TC w/ 16 queues per TC */
3212 } else if (adapter->atr_sample_rate) {
3213 /* support up to 64 queues with ATR */
3214 max_combined = IXGBE_MAX_FDIR_INDICES;
3216 /* support up to 16 queues with RSS */
3217 max_combined = ixgbe_max_rss_indices(adapter);
3220 return min_t(int, max_combined, num_online_cpus());
3223 static void ixgbe_get_channels(struct net_device *dev,
3224 struct ethtool_channels *ch)
3226 struct ixgbe_adapter *adapter = netdev_priv(dev);
3228 /* report maximum channels */
3229 ch->max_combined = ixgbe_max_channels(adapter);
3231 /* report info for other vector */
3232 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
3233 ch->max_other = NON_Q_VECTORS;
3234 ch->other_count = NON_Q_VECTORS;
3237 /* record RSS queues */
3238 ch->combined_count = adapter->ring_feature[RING_F_RSS].indices;
3240 /* nothing else to report if RSS is disabled */
3241 if (ch->combined_count == 1)
3244 /* we do not support ATR queueing if SR-IOV is enabled */
3245 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
3248 /* same thing goes for being DCB enabled */
3249 if (adapter->hw_tcs > 1)
3252 /* if ATR is disabled we can exit */
3253 if (!adapter->atr_sample_rate)
3256 /* report flow director queues as maximum channels */
3257 ch->combined_count = adapter->ring_feature[RING_F_FDIR].indices;
3260 static int ixgbe_set_channels(struct net_device *dev,
3261 struct ethtool_channels *ch)
3263 struct ixgbe_adapter *adapter = netdev_priv(dev);
3264 unsigned int count = ch->combined_count;
3265 u8 max_rss_indices = ixgbe_max_rss_indices(adapter);
3267 /* verify they are not requesting separate vectors */
3268 if (!count || ch->rx_count || ch->tx_count)
3271 /* verify other_count has not changed */
3272 if (ch->other_count != NON_Q_VECTORS)
3275 /* verify the number of channels does not exceed hardware limits */
3276 if (count > ixgbe_max_channels(adapter))
3279 /* update feature limits from largest to smallest supported values */
3280 adapter->ring_feature[RING_F_FDIR].limit = count;
3283 if (count > max_rss_indices)
3284 count = max_rss_indices;
3285 adapter->ring_feature[RING_F_RSS].limit = count;
3288 /* cap FCoE limit at 8 */
3289 if (count > IXGBE_FCRETA_SIZE)
3290 count = IXGBE_FCRETA_SIZE;
3291 adapter->ring_feature[RING_F_FCOE].limit = count;
3294 /* use setup TC to update any traffic class queue mapping */
3295 return ixgbe_setup_tc(dev, adapter->hw_tcs);
3298 static int ixgbe_get_module_info(struct net_device *dev,
3299 struct ethtool_modinfo *modinfo)
3301 struct ixgbe_adapter *adapter = netdev_priv(dev);
3302 struct ixgbe_hw *hw = &adapter->hw;
3304 u8 sff8472_rev, addr_mode;
3305 bool page_swap = false;
3307 if (hw->phy.type == ixgbe_phy_fw)
3310 /* Check whether we support SFF-8472 or not */
3311 status = hw->phy.ops.read_i2c_eeprom(hw,
3312 IXGBE_SFF_SFF_8472_COMP,
3317 /* addressing mode is not supported */
3318 status = hw->phy.ops.read_i2c_eeprom(hw,
3319 IXGBE_SFF_SFF_8472_SWAP,
3324 if (addr_mode & IXGBE_SFF_ADDRESSING_MODE) {
3325 e_err(drv, "Address change required to access page 0xA2, but not supported. Please report the module type to the driver maintainers.\n");
3329 if (sff8472_rev == IXGBE_SFF_SFF_8472_UNSUP || page_swap ||
3330 !(addr_mode & IXGBE_SFF_DDM_IMPLEMENTED)) {
3331 /* We have a SFP, but it does not support SFF-8472 */
3332 modinfo->type = ETH_MODULE_SFF_8079;
3333 modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
3335 /* We have a SFP which supports a revision of SFF-8472. */
3336 modinfo->type = ETH_MODULE_SFF_8472;
3337 modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
3343 static int ixgbe_get_module_eeprom(struct net_device *dev,
3344 struct ethtool_eeprom *ee,
3347 struct ixgbe_adapter *adapter = netdev_priv(dev);
3348 struct ixgbe_hw *hw = &adapter->hw;
3349 s32 status = -EFAULT;
3356 if (hw->phy.type == ixgbe_phy_fw)
3359 for (i = ee->offset; i < ee->offset + ee->len; i++) {
3360 /* I2C reads can take long time */
3361 if (test_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
3364 if (i < ETH_MODULE_SFF_8079_LEN)
3365 status = hw->phy.ops.read_i2c_eeprom(hw, i, &databyte);
3367 status = hw->phy.ops.read_i2c_sff8472(hw, i, &databyte);
3372 data[i - ee->offset] = databyte;
3378 static const struct {
3379 ixgbe_link_speed mac_speed;
3381 } ixgbe_ls_map[] = {
3382 { IXGBE_LINK_SPEED_10_FULL, SUPPORTED_10baseT_Full },
3383 { IXGBE_LINK_SPEED_100_FULL, SUPPORTED_100baseT_Full },
3384 { IXGBE_LINK_SPEED_1GB_FULL, SUPPORTED_1000baseT_Full },
3385 { IXGBE_LINK_SPEED_2_5GB_FULL, SUPPORTED_2500baseX_Full },
3386 { IXGBE_LINK_SPEED_10GB_FULL, SUPPORTED_10000baseT_Full },
3389 static const struct {
3392 } ixgbe_lp_map[] = {
3393 { FW_PHY_ACT_UD_2_100M_TX_EEE, SUPPORTED_100baseT_Full },
3394 { FW_PHY_ACT_UD_2_1G_T_EEE, SUPPORTED_1000baseT_Full },
3395 { FW_PHY_ACT_UD_2_10G_T_EEE, SUPPORTED_10000baseT_Full },
3396 { FW_PHY_ACT_UD_2_1G_KX_EEE, SUPPORTED_1000baseKX_Full },
3397 { FW_PHY_ACT_UD_2_10G_KX4_EEE, SUPPORTED_10000baseKX4_Full },
3398 { FW_PHY_ACT_UD_2_10G_KR_EEE, SUPPORTED_10000baseKR_Full},
3402 ixgbe_get_eee_fw(struct ixgbe_adapter *adapter, struct ethtool_eee *edata)
3404 u32 info[FW_PHY_ACT_DATA_COUNT] = { 0 };
3405 struct ixgbe_hw *hw = &adapter->hw;
3409 rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_UD_2, &info);
3413 edata->lp_advertised = 0;
3414 for (i = 0; i < ARRAY_SIZE(ixgbe_lp_map); ++i) {
3415 if (info[0] & ixgbe_lp_map[i].lp_advertised)
3416 edata->lp_advertised |= ixgbe_lp_map[i].mac_speed;
3419 edata->supported = 0;
3420 for (i = 0; i < ARRAY_SIZE(ixgbe_ls_map); ++i) {
3421 if (hw->phy.eee_speeds_supported & ixgbe_ls_map[i].mac_speed)
3422 edata->supported |= ixgbe_ls_map[i].supported;
3425 edata->advertised = 0;
3426 for (i = 0; i < ARRAY_SIZE(ixgbe_ls_map); ++i) {
3427 if (hw->phy.eee_speeds_advertised & ixgbe_ls_map[i].mac_speed)
3428 edata->advertised |= ixgbe_ls_map[i].supported;
3431 edata->eee_enabled = !!edata->advertised;
3432 edata->tx_lpi_enabled = edata->eee_enabled;
3433 if (edata->advertised & edata->lp_advertised)
3434 edata->eee_active = true;
3439 static int ixgbe_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
3441 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3442 struct ixgbe_hw *hw = &adapter->hw;
3444 if (!(adapter->flags2 & IXGBE_FLAG2_EEE_CAPABLE))
3447 if (hw->phy.eee_speeds_supported && hw->phy.type == ixgbe_phy_fw)
3448 return ixgbe_get_eee_fw(adapter, edata);
3453 static int ixgbe_set_eee(struct net_device *netdev, struct ethtool_eee *edata)
3455 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3456 struct ixgbe_hw *hw = &adapter->hw;
3457 struct ethtool_eee eee_data;
3460 if (!(adapter->flags2 & IXGBE_FLAG2_EEE_CAPABLE))
3463 memset(&eee_data, 0, sizeof(struct ethtool_eee));
3465 ret_val = ixgbe_get_eee(netdev, &eee_data);
3469 if (eee_data.eee_enabled && !edata->eee_enabled) {
3470 if (eee_data.tx_lpi_enabled != edata->tx_lpi_enabled) {
3471 e_err(drv, "Setting EEE tx-lpi is not supported\n");
3475 if (eee_data.tx_lpi_timer != edata->tx_lpi_timer) {
3477 "Setting EEE Tx LPI timer is not supported\n");
3481 if (eee_data.advertised != edata->advertised) {
3483 "Setting EEE advertised speeds is not supported\n");
3488 if (eee_data.eee_enabled != edata->eee_enabled) {
3489 if (edata->eee_enabled) {
3490 adapter->flags2 |= IXGBE_FLAG2_EEE_ENABLED;
3491 hw->phy.eee_speeds_advertised =
3492 hw->phy.eee_speeds_supported;
3494 adapter->flags2 &= ~IXGBE_FLAG2_EEE_ENABLED;
3495 hw->phy.eee_speeds_advertised = 0;
3499 if (netif_running(netdev))
3500 ixgbe_reinit_locked(adapter);
3502 ixgbe_reset(adapter);
3508 static u32 ixgbe_get_priv_flags(struct net_device *netdev)
3510 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3513 if (adapter->flags2 & IXGBE_FLAG2_RX_LEGACY)
3514 priv_flags |= IXGBE_PRIV_FLAGS_LEGACY_RX;
3516 if (adapter->flags2 & IXGBE_FLAG2_VF_IPSEC_ENABLED)
3517 priv_flags |= IXGBE_PRIV_FLAGS_VF_IPSEC_EN;
3522 static int ixgbe_set_priv_flags(struct net_device *netdev, u32 priv_flags)
3524 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3525 unsigned int flags2 = adapter->flags2;
3527 flags2 &= ~IXGBE_FLAG2_RX_LEGACY;
3528 if (priv_flags & IXGBE_PRIV_FLAGS_LEGACY_RX)
3529 flags2 |= IXGBE_FLAG2_RX_LEGACY;
3531 flags2 &= ~IXGBE_FLAG2_VF_IPSEC_ENABLED;
3532 if (priv_flags & IXGBE_PRIV_FLAGS_VF_IPSEC_EN)
3533 flags2 |= IXGBE_FLAG2_VF_IPSEC_ENABLED;
3535 if (flags2 != adapter->flags2) {
3536 adapter->flags2 = flags2;
3538 /* reset interface to repopulate queues */
3539 if (netif_running(netdev))
3540 ixgbe_reinit_locked(adapter);
3546 static const struct ethtool_ops ixgbe_ethtool_ops = {
3547 .supported_coalesce_params = ETHTOOL_COALESCE_USECS,
3548 .get_drvinfo = ixgbe_get_drvinfo,
3549 .get_regs_len = ixgbe_get_regs_len,
3550 .get_regs = ixgbe_get_regs,
3551 .get_wol = ixgbe_get_wol,
3552 .set_wol = ixgbe_set_wol,
3553 .nway_reset = ixgbe_nway_reset,
3554 .get_link = ethtool_op_get_link,
3555 .get_eeprom_len = ixgbe_get_eeprom_len,
3556 .get_eeprom = ixgbe_get_eeprom,
3557 .set_eeprom = ixgbe_set_eeprom,
3558 .get_ringparam = ixgbe_get_ringparam,
3559 .set_ringparam = ixgbe_set_ringparam,
3560 .get_pause_stats = ixgbe_get_pause_stats,
3561 .get_pauseparam = ixgbe_get_pauseparam,
3562 .set_pauseparam = ixgbe_set_pauseparam,
3563 .get_msglevel = ixgbe_get_msglevel,
3564 .set_msglevel = ixgbe_set_msglevel,
3565 .self_test = ixgbe_diag_test,
3566 .get_strings = ixgbe_get_strings,
3567 .set_phys_id = ixgbe_set_phys_id,
3568 .get_sset_count = ixgbe_get_sset_count,
3569 .get_ethtool_stats = ixgbe_get_ethtool_stats,
3570 .get_coalesce = ixgbe_get_coalesce,
3571 .set_coalesce = ixgbe_set_coalesce,
3572 .get_rxnfc = ixgbe_get_rxnfc,
3573 .set_rxnfc = ixgbe_set_rxnfc,
3574 .get_rxfh_indir_size = ixgbe_rss_indir_size,
3575 .get_rxfh_key_size = ixgbe_get_rxfh_key_size,
3576 .get_rxfh = ixgbe_get_rxfh,
3577 .set_rxfh = ixgbe_set_rxfh,
3578 .get_eee = ixgbe_get_eee,
3579 .set_eee = ixgbe_set_eee,
3580 .get_channels = ixgbe_get_channels,
3581 .set_channels = ixgbe_set_channels,
3582 .get_priv_flags = ixgbe_get_priv_flags,
3583 .set_priv_flags = ixgbe_set_priv_flags,
3584 .get_ts_info = ixgbe_get_ts_info,
3585 .get_module_info = ixgbe_get_module_info,
3586 .get_module_eeprom = ixgbe_get_module_eeprom,
3587 .get_link_ksettings = ixgbe_get_link_ksettings,
3588 .set_link_ksettings = ixgbe_set_link_ksettings,
3591 void ixgbe_set_ethtool_ops(struct net_device *netdev)
3593 netdev->ethtool_ops = &ixgbe_ethtool_ops;