2 * Copyright (C) 2005 - 2016 Broadcom
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
10 * Contact Information:
11 * linux-drivers@emulex.com
15 * Costa Mesa, CA 92626
20 #include <linux/ethtool.h>
22 struct be_ethtool_stat {
23 char desc[ETH_GSTRING_LEN];
29 enum {DRVSTAT_TX, DRVSTAT_RX, DRVSTAT};
30 #define FIELDINFO(_struct, field) FIELD_SIZEOF(_struct, field), \
31 offsetof(_struct, field)
32 #define DRVSTAT_TX_INFO(field) #field, DRVSTAT_TX,\
33 FIELDINFO(struct be_tx_stats, field)
34 #define DRVSTAT_RX_INFO(field) #field, DRVSTAT_RX,\
35 FIELDINFO(struct be_rx_stats, field)
36 #define DRVSTAT_INFO(field) #field, DRVSTAT,\
37 FIELDINFO(struct be_drv_stats, field)
39 static const struct be_ethtool_stat et_stats[] = {
40 {DRVSTAT_INFO(rx_crc_errors)},
41 {DRVSTAT_INFO(rx_alignment_symbol_errors)},
42 {DRVSTAT_INFO(rx_pause_frames)},
43 {DRVSTAT_INFO(rx_control_frames)},
44 /* Received packets dropped when the Ethernet length field
45 * is not equal to the actual Ethernet data length.
47 {DRVSTAT_INFO(rx_in_range_errors)},
48 /* Received packets dropped when their length field is >= 1501 bytes
51 {DRVSTAT_INFO(rx_out_range_errors)},
52 /* Received packets dropped when they are longer than 9216 bytes */
53 {DRVSTAT_INFO(rx_frame_too_long)},
54 /* Received packets dropped when they don't pass the unicast or
55 * multicast address filtering.
57 {DRVSTAT_INFO(rx_address_filtered)},
58 /* Received packets dropped when IP packet length field is less than
59 * the IP header length field.
61 {DRVSTAT_INFO(rx_dropped_too_small)},
62 /* Received packets dropped when IP length field is greater than
63 * the actual packet length.
65 {DRVSTAT_INFO(rx_dropped_too_short)},
66 /* Received packets dropped when the IP header length field is less
69 {DRVSTAT_INFO(rx_dropped_header_too_small)},
70 /* Received packets dropped when the TCP header length field is less
71 * than 5 or the TCP header length + IP header length is more
72 * than IP packet length.
74 {DRVSTAT_INFO(rx_dropped_tcp_length)},
75 {DRVSTAT_INFO(rx_dropped_runt)},
76 /* Number of received packets dropped when a fifo for descriptors going
77 * into the packet demux block overflows. In normal operation, this
78 * fifo must never overflow.
80 {DRVSTAT_INFO(rxpp_fifo_overflow_drop)},
81 /* Received packets dropped when the RX block runs out of space in
82 * one of its input FIFOs. This could happen due a long burst of
83 * minimum-sized (64b) frames in the receive path.
84 * This counter may also be erroneously incremented rarely.
86 {DRVSTAT_INFO(rx_input_fifo_overflow_drop)},
87 {DRVSTAT_INFO(rx_ip_checksum_errs)},
88 {DRVSTAT_INFO(rx_tcp_checksum_errs)},
89 {DRVSTAT_INFO(rx_udp_checksum_errs)},
90 {DRVSTAT_INFO(tx_pauseframes)},
91 {DRVSTAT_INFO(tx_controlframes)},
92 {DRVSTAT_INFO(rx_priority_pause_frames)},
93 {DRVSTAT_INFO(tx_priority_pauseframes)},
94 /* Received packets dropped when an internal fifo going into
95 * main packet buffer tank (PMEM) overflows.
97 {DRVSTAT_INFO(pmem_fifo_overflow_drop)},
98 {DRVSTAT_INFO(jabber_events)},
99 /* Received packets dropped due to lack of available HW packet buffers
100 * used to temporarily hold the received packets.
102 {DRVSTAT_INFO(rx_drops_no_pbuf)},
103 /* Received packets dropped due to input receive buffer
104 * descriptor fifo overflowing.
106 {DRVSTAT_INFO(rx_drops_no_erx_descr)},
107 /* Packets dropped because the internal FIFO to the offloaded TCP
108 * receive processing block is full. This could happen only for
109 * offloaded iSCSI or FCoE trarffic.
111 {DRVSTAT_INFO(rx_drops_no_tpre_descr)},
112 /* Received packets dropped when they need more than 8
113 * receive buffers. This cannot happen as the driver configures
114 * 2048 byte receive buffers.
116 {DRVSTAT_INFO(rx_drops_too_many_frags)},
117 {DRVSTAT_INFO(forwarded_packets)},
118 /* Received packets dropped when the frame length
119 * is more than 9018 bytes
121 {DRVSTAT_INFO(rx_drops_mtu)},
122 /* Number of dma mapping errors */
123 {DRVSTAT_INFO(dma_map_errors)},
124 /* Number of packets dropped due to random early drop function */
125 {DRVSTAT_INFO(eth_red_drops)},
126 {DRVSTAT_INFO(rx_roce_bytes_lsd)},
127 {DRVSTAT_INFO(rx_roce_bytes_msd)},
128 {DRVSTAT_INFO(rx_roce_frames)},
129 {DRVSTAT_INFO(roce_drops_payload_len)},
130 {DRVSTAT_INFO(roce_drops_crc)}
133 #define ETHTOOL_STATS_NUM ARRAY_SIZE(et_stats)
135 /* Stats related to multi RX queues: get_stats routine assumes bytes, pkts
136 * are first and second members respectively.
138 static const struct be_ethtool_stat et_rx_stats[] = {
139 {DRVSTAT_RX_INFO(rx_bytes)},/* If moving this member see above note */
140 {DRVSTAT_RX_INFO(rx_pkts)}, /* If moving this member see above note */
141 {DRVSTAT_RX_INFO(rx_vxlan_offload_pkts)},
142 {DRVSTAT_RX_INFO(rx_compl)},
143 {DRVSTAT_RX_INFO(rx_compl_err)},
144 {DRVSTAT_RX_INFO(rx_mcast_pkts)},
145 /* Number of page allocation failures while posting receive buffers
148 {DRVSTAT_RX_INFO(rx_post_fail)},
149 /* Recevied packets dropped due to skb allocation failure */
150 {DRVSTAT_RX_INFO(rx_drops_no_skbs)},
151 /* Received packets dropped due to lack of available fetched buffers
152 * posted by the driver.
154 {DRVSTAT_RX_INFO(rx_drops_no_frags)}
157 #define ETHTOOL_RXSTATS_NUM (ARRAY_SIZE(et_rx_stats))
159 /* Stats related to multi TX queues: get_stats routine assumes compl is the
162 static const struct be_ethtool_stat et_tx_stats[] = {
163 {DRVSTAT_TX_INFO(tx_compl)}, /* If moving this member see above note */
164 /* This counter is incremented when the HW encounters an error while
165 * parsing the packet header of an outgoing TX request. This counter is
166 * applicable only for BE2, BE3 and Skyhawk based adapters.
168 {DRVSTAT_TX_INFO(tx_hdr_parse_err)},
169 /* This counter is incremented when an error occurs in the DMA
170 * operation associated with the TX request from the host to the device.
172 {DRVSTAT_TX_INFO(tx_dma_err)},
173 /* This counter is incremented when MAC or VLAN spoof checking is
174 * enabled on the interface and the TX request fails the spoof check
177 {DRVSTAT_TX_INFO(tx_spoof_check_err)},
178 /* This counter is incremented when the HW encounters an error while
179 * performing TSO offload. This counter is applicable only for Lancer
182 {DRVSTAT_TX_INFO(tx_tso_err)},
183 /* This counter is incremented when the HW detects Q-in-Q style VLAN
184 * tagging in a packet and such tagging is not expected on the outgoing
185 * interface. This counter is applicable only for Lancer adapters.
187 {DRVSTAT_TX_INFO(tx_qinq_err)},
188 /* This counter is incremented when the HW detects parity errors in the
189 * packet data. This counter is applicable only for Lancer adapters.
191 {DRVSTAT_TX_INFO(tx_internal_parity_err)},
192 {DRVSTAT_TX_INFO(tx_bytes)},
193 {DRVSTAT_TX_INFO(tx_pkts)},
194 {DRVSTAT_TX_INFO(tx_vxlan_offload_pkts)},
195 /* Number of skbs queued for trasmission by the driver */
196 {DRVSTAT_TX_INFO(tx_reqs)},
197 /* Number of times the TX queue was stopped due to lack
198 * of spaces in the TXQ.
200 {DRVSTAT_TX_INFO(tx_stops)},
201 /* Pkts dropped in the driver's transmit path */
202 {DRVSTAT_TX_INFO(tx_drv_drops)}
205 #define ETHTOOL_TXSTATS_NUM (ARRAY_SIZE(et_tx_stats))
207 static const char et_self_tests[][ETH_GSTRING_LEN] = {
210 "External Loopback test",
215 #define ETHTOOL_TESTS_NUM ARRAY_SIZE(et_self_tests)
216 #define BE_MAC_LOOPBACK 0x0
217 #define BE_PHY_LOOPBACK 0x1
218 #define BE_ONE_PORT_EXT_LOOPBACK 0x2
219 #define BE_NO_LOOPBACK 0xff
221 static void be_get_drvinfo(struct net_device *netdev,
222 struct ethtool_drvinfo *drvinfo)
224 struct be_adapter *adapter = netdev_priv(netdev);
226 strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
227 strlcpy(drvinfo->version, DRV_VER, sizeof(drvinfo->version));
228 if (!memcmp(adapter->fw_ver, adapter->fw_on_flash, FW_VER_LEN))
229 strlcpy(drvinfo->fw_version, adapter->fw_ver,
230 sizeof(drvinfo->fw_version));
232 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
233 "%s [%s]", adapter->fw_ver, adapter->fw_on_flash);
235 strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
236 sizeof(drvinfo->bus_info));
239 static u32 lancer_cmd_get_file_len(struct be_adapter *adapter, u8 *file_name)
241 u32 data_read = 0, eof;
243 struct be_dma_mem data_len_cmd;
245 memset(&data_len_cmd, 0, sizeof(data_len_cmd));
246 /* data_offset and data_size should be 0 to get reg len */
247 lancer_cmd_read_object(adapter, &data_len_cmd, 0, 0, file_name,
248 &data_read, &eof, &addn_status);
253 static int be_get_dump_len(struct be_adapter *adapter)
257 if (lancer_chip(adapter))
258 dump_size = lancer_cmd_get_file_len(adapter,
259 LANCER_FW_DUMP_FILE);
261 dump_size = adapter->fat_dump_len;
266 static int lancer_cmd_read_file(struct be_adapter *adapter, u8 *file_name,
267 u32 buf_len, void *buf)
269 struct be_dma_mem read_cmd;
270 u32 read_len = 0, total_read_len = 0, chunk_size;
275 read_cmd.size = LANCER_READ_FILE_CHUNK;
276 read_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, read_cmd.size,
277 &read_cmd.dma, GFP_ATOMIC);
280 dev_err(&adapter->pdev->dev,
281 "Memory allocation failure while reading dump\n");
285 while ((total_read_len < buf_len) && !eof) {
286 chunk_size = min_t(u32, (buf_len - total_read_len),
287 LANCER_READ_FILE_CHUNK);
288 chunk_size = ALIGN(chunk_size, 4);
289 status = lancer_cmd_read_object(adapter, &read_cmd, chunk_size,
290 total_read_len, file_name,
291 &read_len, &eof, &addn_status);
293 memcpy(buf + total_read_len, read_cmd.va, read_len);
294 total_read_len += read_len;
295 eof &= LANCER_READ_FILE_EOF_MASK;
301 dma_free_coherent(&adapter->pdev->dev, read_cmd.size, read_cmd.va,
307 static int be_read_dump_data(struct be_adapter *adapter, u32 dump_len,
312 if (lancer_chip(adapter))
313 status = lancer_cmd_read_file(adapter, LANCER_FW_DUMP_FILE,
316 status = be_cmd_get_fat_dump(adapter, dump_len, buf);
321 static int be_get_coalesce(struct net_device *netdev,
322 struct ethtool_coalesce *et)
324 struct be_adapter *adapter = netdev_priv(netdev);
325 struct be_aic_obj *aic = &adapter->aic_obj[0];
327 et->rx_coalesce_usecs = aic->prev_eqd;
328 et->rx_coalesce_usecs_high = aic->max_eqd;
329 et->rx_coalesce_usecs_low = aic->min_eqd;
331 et->tx_coalesce_usecs = aic->prev_eqd;
332 et->tx_coalesce_usecs_high = aic->max_eqd;
333 et->tx_coalesce_usecs_low = aic->min_eqd;
335 et->use_adaptive_rx_coalesce = aic->enable;
336 et->use_adaptive_tx_coalesce = aic->enable;
341 /* TX attributes are ignored. Only RX attributes are considered
342 * eqd cmd is issued in the worker thread.
344 static int be_set_coalesce(struct net_device *netdev,
345 struct ethtool_coalesce *et)
347 struct be_adapter *adapter = netdev_priv(netdev);
348 struct be_aic_obj *aic = &adapter->aic_obj[0];
349 struct be_eq_obj *eqo;
352 for_all_evt_queues(adapter, eqo, i) {
353 aic->enable = et->use_adaptive_rx_coalesce;
354 aic->max_eqd = min(et->rx_coalesce_usecs_high, BE_MAX_EQD);
355 aic->min_eqd = min(et->rx_coalesce_usecs_low, aic->max_eqd);
356 aic->et_eqd = min(et->rx_coalesce_usecs, aic->max_eqd);
357 aic->et_eqd = max(aic->et_eqd, aic->min_eqd);
361 /* For Skyhawk, the EQD setting happens via EQ_DB when AIC is enabled.
362 * When AIC is disabled, persistently force set EQD value via the
363 * FW cmd, so that we don't have to calculate the delay multiplier
364 * encode value each time EQ_DB is rung
366 if (!et->use_adaptive_rx_coalesce && skyhawk_chip(adapter))
367 be_eqd_update(adapter, true);
372 static void be_get_ethtool_stats(struct net_device *netdev,
373 struct ethtool_stats *stats, uint64_t *data)
375 struct be_adapter *adapter = netdev_priv(netdev);
376 struct be_rx_obj *rxo;
377 struct be_tx_obj *txo;
379 unsigned int i, j, base = 0, start;
381 for (i = 0; i < ETHTOOL_STATS_NUM; i++) {
382 p = (u8 *)&adapter->drv_stats + et_stats[i].offset;
385 base += ETHTOOL_STATS_NUM;
387 for_all_rx_queues(adapter, rxo, j) {
388 struct be_rx_stats *stats = rx_stats(rxo);
391 start = u64_stats_fetch_begin_irq(&stats->sync);
392 data[base] = stats->rx_bytes;
393 data[base + 1] = stats->rx_pkts;
394 } while (u64_stats_fetch_retry_irq(&stats->sync, start));
396 for (i = 2; i < ETHTOOL_RXSTATS_NUM; i++) {
397 p = (u8 *)stats + et_rx_stats[i].offset;
398 data[base + i] = *(u32 *)p;
400 base += ETHTOOL_RXSTATS_NUM;
403 for_all_tx_queues(adapter, txo, j) {
404 struct be_tx_stats *stats = tx_stats(txo);
407 start = u64_stats_fetch_begin_irq(&stats->sync_compl);
408 data[base] = stats->tx_compl;
409 } while (u64_stats_fetch_retry_irq(&stats->sync_compl, start));
412 start = u64_stats_fetch_begin_irq(&stats->sync);
413 for (i = 1; i < ETHTOOL_TXSTATS_NUM; i++) {
414 p = (u8 *)stats + et_tx_stats[i].offset;
416 (et_tx_stats[i].size == sizeof(u64)) ?
417 *(u64 *)p : *(u32 *)p;
419 } while (u64_stats_fetch_retry_irq(&stats->sync, start));
420 base += ETHTOOL_TXSTATS_NUM;
424 static const char be_priv_flags[][ETH_GSTRING_LEN] = {
425 "disable-tpe-recovery"
428 static void be_get_stat_strings(struct net_device *netdev, uint32_t stringset,
431 struct be_adapter *adapter = netdev_priv(netdev);
436 for (i = 0; i < ETHTOOL_STATS_NUM; i++) {
437 memcpy(data, et_stats[i].desc, ETH_GSTRING_LEN);
438 data += ETH_GSTRING_LEN;
440 for (i = 0; i < adapter->num_rx_qs; i++) {
441 for (j = 0; j < ETHTOOL_RXSTATS_NUM; j++) {
442 sprintf(data, "rxq%d: %s", i,
443 et_rx_stats[j].desc);
444 data += ETH_GSTRING_LEN;
447 for (i = 0; i < adapter->num_tx_qs; i++) {
448 for (j = 0; j < ETHTOOL_TXSTATS_NUM; j++) {
449 sprintf(data, "txq%d: %s", i,
450 et_tx_stats[j].desc);
451 data += ETH_GSTRING_LEN;
456 for (i = 0; i < ETHTOOL_TESTS_NUM; i++) {
457 memcpy(data, et_self_tests[i], ETH_GSTRING_LEN);
458 data += ETH_GSTRING_LEN;
461 case ETH_SS_PRIV_FLAGS:
462 for (i = 0; i < ARRAY_SIZE(be_priv_flags); i++)
463 strcpy(data + i * ETH_GSTRING_LEN, be_priv_flags[i]);
468 static int be_get_sset_count(struct net_device *netdev, int stringset)
470 struct be_adapter *adapter = netdev_priv(netdev);
474 return ETHTOOL_TESTS_NUM;
476 return ETHTOOL_STATS_NUM +
477 adapter->num_rx_qs * ETHTOOL_RXSTATS_NUM +
478 adapter->num_tx_qs * ETHTOOL_TXSTATS_NUM;
479 case ETH_SS_PRIV_FLAGS:
480 return ARRAY_SIZE(be_priv_flags);
486 static u32 be_get_port_type(struct be_adapter *adapter)
490 switch (adapter->phy.interface_type) {
491 case PHY_TYPE_BASET_1GB:
492 case PHY_TYPE_BASEX_1GB:
496 case PHY_TYPE_SFP_PLUS_10GB:
497 if (adapter->phy.cable_type & SFP_PLUS_COPPER_CABLE)
503 if (adapter->phy.cable_type & QSFP_PLUS_CR4_CABLE)
508 case PHY_TYPE_XFP_10GB:
509 case PHY_TYPE_SFP_1GB:
512 case PHY_TYPE_BASET_10GB:
522 static u32 convert_to_et_setting(struct be_adapter *adapter, u32 if_speeds)
526 switch (adapter->phy.interface_type) {
527 case PHY_TYPE_BASET_1GB:
528 case PHY_TYPE_BASEX_1GB:
531 if (if_speeds & BE_SUPPORTED_SPEED_1GBPS)
532 val |= SUPPORTED_1000baseT_Full;
533 if (if_speeds & BE_SUPPORTED_SPEED_100MBPS)
534 val |= SUPPORTED_100baseT_Full;
535 if (if_speeds & BE_SUPPORTED_SPEED_10MBPS)
536 val |= SUPPORTED_10baseT_Full;
538 case PHY_TYPE_KX4_10GB:
539 val |= SUPPORTED_Backplane;
540 if (if_speeds & BE_SUPPORTED_SPEED_1GBPS)
541 val |= SUPPORTED_1000baseKX_Full;
542 if (if_speeds & BE_SUPPORTED_SPEED_10GBPS)
543 val |= SUPPORTED_10000baseKX4_Full;
545 case PHY_TYPE_KR2_20GB:
546 val |= SUPPORTED_Backplane;
547 if (if_speeds & BE_SUPPORTED_SPEED_10GBPS)
548 val |= SUPPORTED_10000baseKR_Full;
549 if (if_speeds & BE_SUPPORTED_SPEED_20GBPS)
550 val |= SUPPORTED_20000baseKR2_Full;
552 case PHY_TYPE_KR_10GB:
553 val |= SUPPORTED_Backplane |
554 SUPPORTED_10000baseKR_Full;
556 case PHY_TYPE_KR4_40GB:
557 val |= SUPPORTED_Backplane;
558 if (if_speeds & BE_SUPPORTED_SPEED_10GBPS)
559 val |= SUPPORTED_10000baseKR_Full;
560 if (if_speeds & BE_SUPPORTED_SPEED_40GBPS)
561 val |= SUPPORTED_40000baseKR4_Full;
564 if (if_speeds & BE_SUPPORTED_SPEED_40GBPS) {
565 switch (adapter->phy.cable_type) {
566 case QSFP_PLUS_CR4_CABLE:
567 val |= SUPPORTED_40000baseCR4_Full;
569 case QSFP_PLUS_LR4_CABLE:
570 val |= SUPPORTED_40000baseLR4_Full;
573 val |= SUPPORTED_40000baseSR4_Full;
577 case PHY_TYPE_SFP_PLUS_10GB:
578 case PHY_TYPE_XFP_10GB:
579 case PHY_TYPE_SFP_1GB:
580 val |= SUPPORTED_FIBRE;
581 if (if_speeds & BE_SUPPORTED_SPEED_10GBPS)
582 val |= SUPPORTED_10000baseT_Full;
583 if (if_speeds & BE_SUPPORTED_SPEED_1GBPS)
584 val |= SUPPORTED_1000baseT_Full;
586 case PHY_TYPE_BASET_10GB:
588 if (if_speeds & BE_SUPPORTED_SPEED_10GBPS)
589 val |= SUPPORTED_10000baseT_Full;
590 if (if_speeds & BE_SUPPORTED_SPEED_1GBPS)
591 val |= SUPPORTED_1000baseT_Full;
592 if (if_speeds & BE_SUPPORTED_SPEED_100MBPS)
593 val |= SUPPORTED_100baseT_Full;
602 bool be_pause_supported(struct be_adapter *adapter)
604 return (adapter->phy.interface_type == PHY_TYPE_SFP_PLUS_10GB ||
605 adapter->phy.interface_type == PHY_TYPE_XFP_10GB) ?
609 static int be_get_link_ksettings(struct net_device *netdev,
610 struct ethtool_link_ksettings *cmd)
612 struct be_adapter *adapter = netdev_priv(netdev);
618 u32 supported = 0, advertising = 0;
620 if (adapter->phy.link_speed < 0) {
621 status = be_cmd_link_status_query(adapter, &link_speed,
624 be_link_status_update(adapter, link_status);
625 cmd->base.speed = link_speed;
627 status = be_cmd_get_phy_info(adapter);
629 auto_speeds = adapter->phy.auto_speeds_supported;
630 fixed_speeds = adapter->phy.fixed_speeds_supported;
632 be_cmd_query_cable_type(adapter);
635 convert_to_et_setting(adapter,
639 convert_to_et_setting(adapter, auto_speeds);
641 cmd->base.port = be_get_port_type(adapter);
643 if (adapter->phy.auto_speeds_supported) {
644 supported |= SUPPORTED_Autoneg;
645 cmd->base.autoneg = AUTONEG_ENABLE;
646 advertising |= ADVERTISED_Autoneg;
649 supported |= SUPPORTED_Pause;
650 if (be_pause_supported(adapter))
651 advertising |= ADVERTISED_Pause;
653 cmd->base.port = PORT_OTHER;
654 cmd->base.autoneg = AUTONEG_DISABLE;
657 /* Save for future use */
658 adapter->phy.link_speed = cmd->base.speed;
659 adapter->phy.port_type = cmd->base.port;
660 adapter->phy.autoneg = cmd->base.autoneg;
661 adapter->phy.advertising = advertising;
662 adapter->phy.supported = supported;
664 cmd->base.speed = adapter->phy.link_speed;
665 cmd->base.port = adapter->phy.port_type;
666 cmd->base.autoneg = adapter->phy.autoneg;
667 advertising = adapter->phy.advertising;
668 supported = adapter->phy.supported;
671 cmd->base.duplex = netif_carrier_ok(netdev) ?
672 DUPLEX_FULL : DUPLEX_UNKNOWN;
673 cmd->base.phy_address = adapter->port_num;
675 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
677 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
683 static void be_get_ringparam(struct net_device *netdev,
684 struct ethtool_ringparam *ring)
686 struct be_adapter *adapter = netdev_priv(netdev);
688 ring->rx_max_pending = adapter->rx_obj[0].q.len;
689 ring->rx_pending = adapter->rx_obj[0].q.len;
690 ring->tx_max_pending = adapter->tx_obj[0].q.len;
691 ring->tx_pending = adapter->tx_obj[0].q.len;
695 be_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *ecmd)
697 struct be_adapter *adapter = netdev_priv(netdev);
699 be_cmd_get_flow_control(adapter, &ecmd->tx_pause, &ecmd->rx_pause);
700 ecmd->autoneg = adapter->phy.fc_autoneg;
704 be_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *ecmd)
706 struct be_adapter *adapter = netdev_priv(netdev);
709 if (ecmd->autoneg != adapter->phy.fc_autoneg)
712 status = be_cmd_set_flow_control(adapter, ecmd->tx_pause,
715 dev_warn(&adapter->pdev->dev, "Pause param set failed\n");
716 return be_cmd_status(status);
719 adapter->tx_fc = ecmd->tx_pause;
720 adapter->rx_fc = ecmd->rx_pause;
724 static int be_set_phys_id(struct net_device *netdev,
725 enum ethtool_phys_id_state state)
727 struct be_adapter *adapter = netdev_priv(netdev);
731 case ETHTOOL_ID_ACTIVE:
732 status = be_cmd_get_beacon_state(adapter, adapter->hba_port_num,
733 &adapter->beacon_state);
735 return be_cmd_status(status);
736 return 1; /* cycle on/off once per second */
739 status = be_cmd_set_beacon_state(adapter, adapter->hba_port_num,
740 0, 0, BEACON_STATE_ENABLED);
744 status = be_cmd_set_beacon_state(adapter, adapter->hba_port_num,
745 0, 0, BEACON_STATE_DISABLED);
748 case ETHTOOL_ID_INACTIVE:
749 status = be_cmd_set_beacon_state(adapter, adapter->hba_port_num,
750 0, 0, adapter->beacon_state);
753 return be_cmd_status(status);
756 static int be_set_dump(struct net_device *netdev, struct ethtool_dump *dump)
758 struct be_adapter *adapter = netdev_priv(netdev);
759 struct device *dev = &adapter->pdev->dev;
762 if (!lancer_chip(adapter) ||
763 !check_privilege(adapter, MAX_PRIVILEGES))
766 switch (dump->flag) {
767 case LANCER_INITIATE_FW_DUMP:
768 status = lancer_initiate_dump(adapter);
770 dev_info(dev, "FW dump initiated successfully\n");
772 case LANCER_DELETE_FW_DUMP:
773 status = lancer_delete_dump(adapter);
775 dev_info(dev, "FW dump deleted successfully\n");
778 dev_err(dev, "Invalid dump level: 0x%x\n", dump->flag);
784 static void be_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
786 struct be_adapter *adapter = netdev_priv(netdev);
788 if (adapter->wol_cap & BE_WOL_CAP) {
789 wol->supported |= WAKE_MAGIC;
791 wol->wolopts |= WAKE_MAGIC;
795 memset(&wol->sopass, 0, sizeof(wol->sopass));
798 static int be_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
800 struct be_adapter *adapter = netdev_priv(netdev);
801 struct device *dev = &adapter->pdev->dev;
802 struct be_dma_mem cmd;
807 if (wol->wolopts & ~WAKE_MAGIC)
810 if (!(adapter->wol_cap & BE_WOL_CAP)) {
811 dev_warn(&adapter->pdev->dev, "WOL not supported\n");
815 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
816 cmd.va = dma_zalloc_coherent(dev, cmd.size, &cmd.dma, GFP_KERNEL);
822 enable = wol->wolopts & WAKE_MAGIC;
824 ether_addr_copy(mac, adapter->netdev->dev_addr);
826 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
828 dev_err(dev, "Could not set Wake-on-lan mac address\n");
829 status = be_cmd_status(status);
833 pci_enable_wake(adapter->pdev, PCI_D3hot, enable);
834 pci_enable_wake(adapter->pdev, PCI_D3cold, enable);
836 adapter->wol_en = enable ? true : false;
839 dma_free_coherent(dev, cmd.size, cmd.va, cmd.dma);
843 static int be_test_ddr_dma(struct be_adapter *adapter)
846 struct be_dma_mem ddrdma_cmd;
847 static const u64 pattern[2] = {
848 0x5a5a5a5a5a5a5a5aULL, 0xa5a5a5a5a5a5a5a5ULL
851 ddrdma_cmd.size = sizeof(struct be_cmd_req_ddrdma_test);
852 ddrdma_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
853 ddrdma_cmd.size, &ddrdma_cmd.dma,
858 for (i = 0; i < 2; i++) {
859 ret = be_cmd_ddr_dma_test(adapter, pattern[i],
866 dma_free_coherent(&adapter->pdev->dev, ddrdma_cmd.size, ddrdma_cmd.va,
868 return be_cmd_status(ret);
871 static u64 be_loopback_test(struct be_adapter *adapter, u8 loopback_type,
876 ret = be_cmd_set_loopback(adapter, adapter->hba_port_num,
881 *status = be_cmd_loopback_test(adapter, adapter->hba_port_num,
882 loopback_type, 1500, 2, 0xabc);
884 ret = be_cmd_set_loopback(adapter, adapter->hba_port_num,
892 static void be_self_test(struct net_device *netdev, struct ethtool_test *test,
895 struct be_adapter *adapter = netdev_priv(netdev);
899 if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC) {
900 dev_err(&adapter->pdev->dev, "Self test not supported\n");
901 test->flags |= ETH_TEST_FL_FAILED;
905 memset(data, 0, sizeof(u64) * ETHTOOL_TESTS_NUM);
907 /* check link status before offline tests */
908 link_status = netif_carrier_ok(netdev);
910 if (test->flags & ETH_TEST_FL_OFFLINE) {
911 if (be_loopback_test(adapter, BE_MAC_LOOPBACK, &data[0]) != 0)
912 test->flags |= ETH_TEST_FL_FAILED;
914 if (be_loopback_test(adapter, BE_PHY_LOOPBACK, &data[1]) != 0)
915 test->flags |= ETH_TEST_FL_FAILED;
917 if (test->flags & ETH_TEST_FL_EXTERNAL_LB) {
918 if (be_loopback_test(adapter, BE_ONE_PORT_EXT_LOOPBACK,
920 test->flags |= ETH_TEST_FL_FAILED;
921 test->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
925 if (!lancer_chip(adapter) && be_test_ddr_dma(adapter) != 0) {
927 test->flags |= ETH_TEST_FL_FAILED;
930 /* link status was down prior to test */
932 test->flags |= ETH_TEST_FL_FAILED;
937 for (cnt = 10; cnt; cnt--) {
938 status = be_cmd_link_status_query(adapter, NULL, &link_status,
941 test->flags |= ETH_TEST_FL_FAILED;
949 msleep_interruptible(500);
953 static int be_do_flash(struct net_device *netdev, struct ethtool_flash *efl)
955 struct be_adapter *adapter = netdev_priv(netdev);
957 return be_load_fw(adapter, efl->data);
961 be_get_dump_flag(struct net_device *netdev, struct ethtool_dump *dump)
963 struct be_adapter *adapter = netdev_priv(netdev);
965 if (!check_privilege(adapter, MAX_PRIVILEGES))
968 dump->len = be_get_dump_len(adapter);
970 dump->flag = 0x1; /* FW dump is enabled */
975 be_get_dump_data(struct net_device *netdev, struct ethtool_dump *dump,
978 struct be_adapter *adapter = netdev_priv(netdev);
981 if (!check_privilege(adapter, MAX_PRIVILEGES))
984 status = be_read_dump_data(adapter, dump->len, buf);
985 return be_cmd_status(status);
988 static int be_get_eeprom_len(struct net_device *netdev)
990 struct be_adapter *adapter = netdev_priv(netdev);
992 if (!check_privilege(adapter, MAX_PRIVILEGES))
995 if (lancer_chip(adapter)) {
996 if (be_physfn(adapter))
997 return lancer_cmd_get_file_len(adapter,
1000 return lancer_cmd_get_file_len(adapter,
1001 LANCER_VPD_VF_FILE);
1003 return BE_READ_SEEPROM_LEN;
1007 static int be_read_eeprom(struct net_device *netdev,
1008 struct ethtool_eeprom *eeprom, uint8_t *data)
1010 struct be_adapter *adapter = netdev_priv(netdev);
1011 struct be_dma_mem eeprom_cmd;
1012 struct be_cmd_resp_seeprom_read *resp;
1018 if (lancer_chip(adapter)) {
1019 if (be_physfn(adapter))
1020 return lancer_cmd_read_file(adapter, LANCER_VPD_PF_FILE,
1023 return lancer_cmd_read_file(adapter, LANCER_VPD_VF_FILE,
1027 eeprom->magic = BE_VENDOR_ID | (adapter->pdev->device<<16);
1029 memset(&eeprom_cmd, 0, sizeof(struct be_dma_mem));
1030 eeprom_cmd.size = sizeof(struct be_cmd_req_seeprom_read);
1031 eeprom_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
1032 eeprom_cmd.size, &eeprom_cmd.dma,
1038 status = be_cmd_get_seeprom_data(adapter, &eeprom_cmd);
1041 resp = eeprom_cmd.va;
1042 memcpy(data, resp->seeprom_data + eeprom->offset, eeprom->len);
1044 dma_free_coherent(&adapter->pdev->dev, eeprom_cmd.size, eeprom_cmd.va,
1047 return be_cmd_status(status);
1050 static u32 be_get_msg_level(struct net_device *netdev)
1052 struct be_adapter *adapter = netdev_priv(netdev);
1054 return adapter->msg_enable;
1057 static void be_set_msg_level(struct net_device *netdev, u32 level)
1059 struct be_adapter *adapter = netdev_priv(netdev);
1061 if (adapter->msg_enable == level)
1064 if ((level & NETIF_MSG_HW) != (adapter->msg_enable & NETIF_MSG_HW))
1065 if (BEx_chip(adapter))
1066 be_cmd_set_fw_log_level(adapter, level & NETIF_MSG_HW ?
1067 FW_LOG_LEVEL_DEFAULT :
1068 FW_LOG_LEVEL_FATAL);
1069 adapter->msg_enable = level;
1072 static u64 be_get_rss_hash_opts(struct be_adapter *adapter, u64 flow_type)
1076 switch (flow_type) {
1078 if (adapter->rss_info.rss_flags & RSS_ENABLE_IPV4)
1079 data |= RXH_IP_DST | RXH_IP_SRC;
1080 if (adapter->rss_info.rss_flags & RSS_ENABLE_TCP_IPV4)
1081 data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
1084 if (adapter->rss_info.rss_flags & RSS_ENABLE_IPV4)
1085 data |= RXH_IP_DST | RXH_IP_SRC;
1086 if (adapter->rss_info.rss_flags & RSS_ENABLE_UDP_IPV4)
1087 data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
1090 if (adapter->rss_info.rss_flags & RSS_ENABLE_IPV6)
1091 data |= RXH_IP_DST | RXH_IP_SRC;
1092 if (adapter->rss_info.rss_flags & RSS_ENABLE_TCP_IPV6)
1093 data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
1096 if (adapter->rss_info.rss_flags & RSS_ENABLE_IPV6)
1097 data |= RXH_IP_DST | RXH_IP_SRC;
1098 if (adapter->rss_info.rss_flags & RSS_ENABLE_UDP_IPV6)
1099 data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
1106 static int be_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
1109 struct be_adapter *adapter = netdev_priv(netdev);
1111 if (!be_multi_rxq(adapter)) {
1112 dev_info(&adapter->pdev->dev,
1113 "ethtool::get_rxnfc: RX flow hashing is disabled\n");
1119 cmd->data = be_get_rss_hash_opts(adapter, cmd->flow_type);
1121 case ETHTOOL_GRXRINGS:
1122 cmd->data = adapter->num_rx_qs;
1131 static int be_set_rss_hash_opts(struct be_adapter *adapter,
1132 struct ethtool_rxnfc *cmd)
1135 u32 rss_flags = adapter->rss_info.rss_flags;
1137 if (cmd->data != L3_RSS_FLAGS &&
1138 cmd->data != (L3_RSS_FLAGS | L4_RSS_FLAGS))
1141 switch (cmd->flow_type) {
1143 if (cmd->data == L3_RSS_FLAGS)
1144 rss_flags &= ~RSS_ENABLE_TCP_IPV4;
1145 else if (cmd->data == (L3_RSS_FLAGS | L4_RSS_FLAGS))
1146 rss_flags |= RSS_ENABLE_IPV4 |
1147 RSS_ENABLE_TCP_IPV4;
1150 if (cmd->data == L3_RSS_FLAGS)
1151 rss_flags &= ~RSS_ENABLE_TCP_IPV6;
1152 else if (cmd->data == (L3_RSS_FLAGS | L4_RSS_FLAGS))
1153 rss_flags |= RSS_ENABLE_IPV6 |
1154 RSS_ENABLE_TCP_IPV6;
1157 if ((cmd->data == (L3_RSS_FLAGS | L4_RSS_FLAGS)) &&
1161 if (cmd->data == L3_RSS_FLAGS)
1162 rss_flags &= ~RSS_ENABLE_UDP_IPV4;
1163 else if (cmd->data == (L3_RSS_FLAGS | L4_RSS_FLAGS))
1164 rss_flags |= RSS_ENABLE_IPV4 |
1165 RSS_ENABLE_UDP_IPV4;
1168 if ((cmd->data == (L3_RSS_FLAGS | L4_RSS_FLAGS)) &&
1172 if (cmd->data == L3_RSS_FLAGS)
1173 rss_flags &= ~RSS_ENABLE_UDP_IPV6;
1174 else if (cmd->data == (L3_RSS_FLAGS | L4_RSS_FLAGS))
1175 rss_flags |= RSS_ENABLE_IPV6 |
1176 RSS_ENABLE_UDP_IPV6;
1182 if (rss_flags == adapter->rss_info.rss_flags)
1185 status = be_cmd_rss_config(adapter, adapter->rss_info.rsstable,
1186 rss_flags, RSS_INDIR_TABLE_LEN,
1187 adapter->rss_info.rss_hkey);
1189 adapter->rss_info.rss_flags = rss_flags;
1191 return be_cmd_status(status);
1194 static int be_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
1196 struct be_adapter *adapter = netdev_priv(netdev);
1199 if (!be_multi_rxq(adapter)) {
1200 dev_err(&adapter->pdev->dev,
1201 "ethtool::set_rxnfc: RX flow hashing is disabled\n");
1207 status = be_set_rss_hash_opts(adapter, cmd);
1216 static void be_get_channels(struct net_device *netdev,
1217 struct ethtool_channels *ch)
1219 struct be_adapter *adapter = netdev_priv(netdev);
1220 u16 num_rx_irqs = max_t(u16, adapter->num_rss_qs, 1);
1222 /* num_tx_qs is always same as the number of irqs used for TX */
1223 ch->combined_count = min(adapter->num_tx_qs, num_rx_irqs);
1224 ch->rx_count = num_rx_irqs - ch->combined_count;
1225 ch->tx_count = adapter->num_tx_qs - ch->combined_count;
1227 ch->max_combined = be_max_qp_irqs(adapter);
1228 /* The user must create atleast one combined channel */
1229 ch->max_rx = be_max_rx_irqs(adapter) - 1;
1230 ch->max_tx = be_max_tx_irqs(adapter) - 1;
1233 static int be_set_channels(struct net_device *netdev,
1234 struct ethtool_channels *ch)
1236 struct be_adapter *adapter = netdev_priv(netdev);
1239 /* we support either only combined channels or a combination of
1240 * combined and either RX-only or TX-only channels.
1242 if (ch->other_count || !ch->combined_count ||
1243 (ch->rx_count && ch->tx_count))
1246 if (ch->combined_count > be_max_qp_irqs(adapter) ||
1248 (ch->rx_count + ch->combined_count) > be_max_rx_irqs(adapter)) ||
1250 (ch->tx_count + ch->combined_count) > be_max_tx_irqs(adapter)))
1253 adapter->cfg_num_rx_irqs = ch->combined_count + ch->rx_count;
1254 adapter->cfg_num_tx_irqs = ch->combined_count + ch->tx_count;
1256 status = be_update_queues(adapter);
1257 return be_cmd_status(status);
1260 static u32 be_get_rxfh_indir_size(struct net_device *netdev)
1262 return RSS_INDIR_TABLE_LEN;
1265 static u32 be_get_rxfh_key_size(struct net_device *netdev)
1267 return RSS_HASH_KEY_LEN;
1270 static int be_get_rxfh(struct net_device *netdev, u32 *indir, u8 *hkey,
1273 struct be_adapter *adapter = netdev_priv(netdev);
1275 struct rss_info *rss = &adapter->rss_info;
1278 for (i = 0; i < RSS_INDIR_TABLE_LEN; i++)
1279 indir[i] = rss->rss_queue[i];
1283 memcpy(hkey, rss->rss_hkey, RSS_HASH_KEY_LEN);
1286 *hfunc = ETH_RSS_HASH_TOP;
1291 static int be_set_rxfh(struct net_device *netdev, const u32 *indir,
1292 const u8 *hkey, const u8 hfunc)
1295 struct be_adapter *adapter = netdev_priv(netdev);
1296 u8 rsstable[RSS_INDIR_TABLE_LEN];
1298 /* We do not allow change in unsupported parameters */
1299 if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
1303 struct be_rx_obj *rxo;
1305 for (i = 0; i < RSS_INDIR_TABLE_LEN; i++) {
1307 rxo = &adapter->rx_obj[j];
1308 rsstable[i] = rxo->rss_id;
1309 adapter->rss_info.rss_queue[i] = j;
1312 memcpy(rsstable, adapter->rss_info.rsstable,
1313 RSS_INDIR_TABLE_LEN);
1317 hkey = adapter->rss_info.rss_hkey;
1319 rc = be_cmd_rss_config(adapter, rsstable,
1320 adapter->rss_info.rss_flags,
1321 RSS_INDIR_TABLE_LEN, hkey);
1323 adapter->rss_info.rss_flags = RSS_ENABLE_NONE;
1326 memcpy(adapter->rss_info.rss_hkey, hkey, RSS_HASH_KEY_LEN);
1327 memcpy(adapter->rss_info.rsstable, rsstable,
1328 RSS_INDIR_TABLE_LEN);
1332 static int be_get_module_info(struct net_device *netdev,
1333 struct ethtool_modinfo *modinfo)
1335 struct be_adapter *adapter = netdev_priv(netdev);
1336 u8 page_data[PAGE_DATA_LEN];
1339 if (!check_privilege(adapter, MAX_PRIVILEGES))
1342 status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A0,
1343 0, PAGE_DATA_LEN, page_data);
1345 if (!page_data[SFP_PLUS_SFF_8472_COMP]) {
1346 modinfo->type = ETH_MODULE_SFF_8079;
1347 modinfo->eeprom_len = PAGE_DATA_LEN;
1349 modinfo->type = ETH_MODULE_SFF_8472;
1350 modinfo->eeprom_len = 2 * PAGE_DATA_LEN;
1353 return be_cmd_status(status);
1356 static int be_get_module_eeprom(struct net_device *netdev,
1357 struct ethtool_eeprom *eeprom, u8 *data)
1359 struct be_adapter *adapter = netdev_priv(netdev);
1363 if (!check_privilege(adapter, MAX_PRIVILEGES))
1366 begin = eeprom->offset;
1367 end = eeprom->offset + eeprom->len;
1369 if (begin < PAGE_DATA_LEN) {
1370 status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A0, begin,
1371 min_t(u32, end, PAGE_DATA_LEN) - begin,
1376 data += PAGE_DATA_LEN - begin;
1377 begin = PAGE_DATA_LEN;
1380 if (end > PAGE_DATA_LEN) {
1381 status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A2,
1382 begin - PAGE_DATA_LEN,
1388 return be_cmd_status(status);
1391 static u32 be_get_priv_flags(struct net_device *netdev)
1393 struct be_adapter *adapter = netdev_priv(netdev);
1395 return adapter->priv_flags;
1398 static int be_set_priv_flags(struct net_device *netdev, u32 flags)
1400 struct be_adapter *adapter = netdev_priv(netdev);
1401 bool tpe_old = !!(adapter->priv_flags & BE_DISABLE_TPE_RECOVERY);
1402 bool tpe_new = !!(flags & BE_DISABLE_TPE_RECOVERY);
1404 if (tpe_old != tpe_new) {
1406 adapter->priv_flags |= BE_DISABLE_TPE_RECOVERY;
1407 dev_info(&adapter->pdev->dev,
1408 "HW error recovery is disabled\n");
1410 adapter->priv_flags &= ~BE_DISABLE_TPE_RECOVERY;
1411 dev_info(&adapter->pdev->dev,
1412 "HW error recovery is enabled\n");
1419 const struct ethtool_ops be_ethtool_ops = {
1420 .get_drvinfo = be_get_drvinfo,
1421 .get_wol = be_get_wol,
1422 .set_wol = be_set_wol,
1423 .get_link = ethtool_op_get_link,
1424 .get_eeprom_len = be_get_eeprom_len,
1425 .get_eeprom = be_read_eeprom,
1426 .get_coalesce = be_get_coalesce,
1427 .set_coalesce = be_set_coalesce,
1428 .get_ringparam = be_get_ringparam,
1429 .get_pauseparam = be_get_pauseparam,
1430 .set_pauseparam = be_set_pauseparam,
1431 .set_priv_flags = be_set_priv_flags,
1432 .get_priv_flags = be_get_priv_flags,
1433 .get_strings = be_get_stat_strings,
1434 .set_phys_id = be_set_phys_id,
1435 .set_dump = be_set_dump,
1436 .get_msglevel = be_get_msg_level,
1437 .set_msglevel = be_set_msg_level,
1438 .get_sset_count = be_get_sset_count,
1439 .get_ethtool_stats = be_get_ethtool_stats,
1440 .flash_device = be_do_flash,
1441 .self_test = be_self_test,
1442 .get_rxnfc = be_get_rxnfc,
1443 .set_rxnfc = be_set_rxnfc,
1444 .get_rxfh_indir_size = be_get_rxfh_indir_size,
1445 .get_rxfh_key_size = be_get_rxfh_key_size,
1446 .get_rxfh = be_get_rxfh,
1447 .set_rxfh = be_set_rxfh,
1448 .get_dump_flag = be_get_dump_flag,
1449 .get_dump_data = be_get_dump_data,
1450 .get_channels = be_get_channels,
1451 .set_channels = be_set_channels,
1452 .get_module_info = be_get_module_info,
1453 .get_module_eeprom = be_get_module_eeprom,
1454 .get_link_ksettings = be_get_link_ksettings,