1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2013-2015 Chelsio Communications. All rights reserved.
6 #include <linux/firmware.h>
7 #include <linux/mdio.h>
12 #include "cxgb4_cudbg.h"
13 #include "cxgb4_filter.h"
14 #include "cxgb4_tc_flower.h"
16 #define EEPROM_MAGIC 0x38E2F10C
18 static u32 get_msglevel(struct net_device *dev)
20 return netdev2adap(dev)->msg_enable;
23 static void set_msglevel(struct net_device *dev, u32 val)
25 netdev2adap(dev)->msg_enable = val;
28 enum cxgb4_ethtool_tests {
29 CXGB4_ETHTOOL_LB_TEST,
30 CXGB4_ETHTOOL_MAX_TEST,
33 static const char cxgb4_selftest_strings[CXGB4_ETHTOOL_MAX_TEST][ETH_GSTRING_LEN] = {
34 "Loop back test (offline)",
37 static const char * const flash_region_strings[] = {
45 static const char stats_strings[][ETH_GSTRING_LEN] = {
48 "tx_broadcast_frames ",
49 "tx_multicast_frames ",
54 "tx_frames_65_to_127 ",
55 "tx_frames_128_to_255 ",
56 "tx_frames_256_to_511 ",
57 "tx_frames_512_to_1023 ",
58 "tx_frames_1024_to_1518 ",
59 "tx_frames_1519_to_max ",
74 "rx_broadcast_frames ",
75 "rx_multicast_frames ",
78 "rx_frames_too_long ",
86 "rx_frames_65_to_127 ",
87 "rx_frames_128_to_255 ",
88 "rx_frames_256_to_511 ",
89 "rx_frames_512_to_1023 ",
90 "rx_frames_1024_to_1518 ",
91 "rx_frames_1519_to_max ",
103 "rx_bg0_frames_dropped ",
104 "rx_bg1_frames_dropped ",
105 "rx_bg2_frames_dropped ",
106 "rx_bg3_frames_dropped ",
107 "rx_bg0_frames_trunc ",
108 "rx_bg1_frames_trunc ",
109 "rx_bg2_frames_trunc ",
110 "rx_bg3_frames_trunc ",
120 #if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE)
121 "tx_tls_encrypted_packets",
122 "tx_tls_encrypted_bytes ",
125 "tx_tls_skip_no_sync_data",
126 "tx_tls_drop_no_sync_data",
127 "tx_tls_drop_bypass_req ",
131 static char adapter_stats_strings[][ETH_GSTRING_LEN] = {
135 "write_coal_success ",
139 static char loopback_stats_strings[][ETH_GSTRING_LEN] = {
140 "-------Loopback----------- ",
149 "frames_128_to_255 ",
150 "frames_256_to_511 ",
151 "frames_512_to_1023 ",
152 "frames_1024_to_1518 ",
153 "frames_1519_to_max ",
155 "bg0_frames_dropped ",
156 "bg1_frames_dropped ",
157 "bg2_frames_dropped ",
158 "bg3_frames_dropped ",
165 static const char cxgb4_priv_flags_strings[][ETH_GSTRING_LEN] = {
166 [PRIV_FLAG_PORT_TX_VM_BIT] = "port_tx_vm_wr",
169 static int get_sset_count(struct net_device *dev, int sset)
173 return ARRAY_SIZE(stats_strings) +
174 ARRAY_SIZE(adapter_stats_strings) +
175 ARRAY_SIZE(loopback_stats_strings);
176 case ETH_SS_PRIV_FLAGS:
177 return ARRAY_SIZE(cxgb4_priv_flags_strings);
179 return ARRAY_SIZE(cxgb4_selftest_strings);
185 static int get_regs_len(struct net_device *dev)
187 struct adapter *adap = netdev2adap(dev);
189 return t4_get_regs_len(adap);
192 static int get_eeprom_len(struct net_device *dev)
197 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
199 struct adapter *adapter = netdev2adap(dev);
202 strscpy(info->driver, cxgb4_driver_name, sizeof(info->driver));
203 strscpy(info->bus_info, pci_name(adapter->pdev),
204 sizeof(info->bus_info));
205 info->regdump_len = get_regs_len(dev);
207 if (adapter->params.fw_vers)
208 snprintf(info->fw_version, sizeof(info->fw_version),
209 "%u.%u.%u.%u, TP %u.%u.%u.%u",
210 FW_HDR_FW_VER_MAJOR_G(adapter->params.fw_vers),
211 FW_HDR_FW_VER_MINOR_G(adapter->params.fw_vers),
212 FW_HDR_FW_VER_MICRO_G(adapter->params.fw_vers),
213 FW_HDR_FW_VER_BUILD_G(adapter->params.fw_vers),
214 FW_HDR_FW_VER_MAJOR_G(adapter->params.tp_vers),
215 FW_HDR_FW_VER_MINOR_G(adapter->params.tp_vers),
216 FW_HDR_FW_VER_MICRO_G(adapter->params.tp_vers),
217 FW_HDR_FW_VER_BUILD_G(adapter->params.tp_vers));
219 if (!t4_get_exprom_version(adapter, &exprom_vers))
220 snprintf(info->erom_version, sizeof(info->erom_version),
222 FW_HDR_FW_VER_MAJOR_G(exprom_vers),
223 FW_HDR_FW_VER_MINOR_G(exprom_vers),
224 FW_HDR_FW_VER_MICRO_G(exprom_vers),
225 FW_HDR_FW_VER_BUILD_G(exprom_vers));
226 info->n_priv_flags = ARRAY_SIZE(cxgb4_priv_flags_strings);
229 static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
231 if (stringset == ETH_SS_STATS) {
232 memcpy(data, stats_strings, sizeof(stats_strings));
233 data += sizeof(stats_strings);
234 memcpy(data, adapter_stats_strings,
235 sizeof(adapter_stats_strings));
236 data += sizeof(adapter_stats_strings);
237 memcpy(data, loopback_stats_strings,
238 sizeof(loopback_stats_strings));
239 } else if (stringset == ETH_SS_PRIV_FLAGS) {
240 memcpy(data, cxgb4_priv_flags_strings,
241 sizeof(cxgb4_priv_flags_strings));
242 } else if (stringset == ETH_SS_TEST) {
243 memcpy(data, cxgb4_selftest_strings,
244 sizeof(cxgb4_selftest_strings));
248 /* port stats maintained per queue of the port. They should be in the same
249 * order as in stats_strings above.
251 struct queue_port_stats {
260 #if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE)
261 u64 tx_tls_encrypted_packets;
262 u64 tx_tls_encrypted_bytes;
265 u64 tx_tls_skip_no_sync_data;
266 u64 tx_tls_drop_no_sync_data;
267 u64 tx_tls_drop_bypass_req;
271 struct adapter_stats {
279 static void collect_sge_port_stats(const struct adapter *adap,
280 const struct port_info *p,
281 struct queue_port_stats *s)
283 const struct sge_eth_txq *tx = &adap->sge.ethtxq[p->first_qset];
284 const struct sge_eth_rxq *rx = &adap->sge.ethrxq[p->first_qset];
285 #if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE)
286 const struct ch_ktls_port_stats_debug *ktls_stats;
288 struct sge_eohw_txq *eohw_tx;
291 memset(s, 0, sizeof(*s));
292 for (i = 0; i < p->nqsets; i++, rx++, tx++) {
295 s->tx_csum += tx->tx_cso;
296 s->rx_csum += rx->stats.rx_cso;
297 s->vlan_ex += rx->stats.vlan_ex;
298 s->vlan_ins += tx->vlan_ins;
299 s->gro_pkts += rx->stats.lro_pkts;
300 s->gro_merged += rx->stats.lro_merged;
303 if (adap->sge.eohw_txq) {
304 eohw_tx = &adap->sge.eohw_txq[p->first_qset];
305 for (i = 0; i < p->nqsets; i++, eohw_tx++) {
306 s->tso += eohw_tx->tso;
307 s->uso += eohw_tx->uso;
308 s->tx_csum += eohw_tx->tx_cso;
309 s->vlan_ins += eohw_tx->vlan_ins;
312 #if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE)
313 ktls_stats = &adap->ch_ktls_stats.ktls_port[p->port_id];
314 s->tx_tls_encrypted_packets =
315 atomic64_read(&ktls_stats->ktls_tx_encrypted_packets);
316 s->tx_tls_encrypted_bytes =
317 atomic64_read(&ktls_stats->ktls_tx_encrypted_bytes);
318 s->tx_tls_ctx = atomic64_read(&ktls_stats->ktls_tx_ctx);
319 s->tx_tls_ooo = atomic64_read(&ktls_stats->ktls_tx_ooo);
320 s->tx_tls_skip_no_sync_data =
321 atomic64_read(&ktls_stats->ktls_tx_skip_no_sync_data);
322 s->tx_tls_drop_no_sync_data =
323 atomic64_read(&ktls_stats->ktls_tx_drop_no_sync_data);
324 s->tx_tls_drop_bypass_req =
325 atomic64_read(&ktls_stats->ktls_tx_drop_bypass_req);
329 static void collect_adapter_stats(struct adapter *adap, struct adapter_stats *s)
333 memset(s, 0, sizeof(*s));
335 s->db_drop = adap->db_stats.db_drop;
336 s->db_full = adap->db_stats.db_full;
337 s->db_empty = adap->db_stats.db_empty;
339 if (!is_t4(adap->params.chip)) {
342 v = t4_read_reg(adap, SGE_STAT_CFG_A);
343 if (STATSOURCE_T5_G(v) == 7) {
344 val2 = t4_read_reg(adap, SGE_STAT_MATCH_A);
345 val1 = t4_read_reg(adap, SGE_STAT_TOTAL_A);
346 s->wc_success = val1 - val2;
352 static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
355 struct port_info *pi = netdev_priv(dev);
356 struct adapter *adapter = pi->adapter;
357 struct lb_port_stats s;
361 t4_get_port_stats_offset(adapter, pi->tx_chan,
362 (struct port_stats *)data,
365 data += sizeof(struct port_stats) / sizeof(u64);
366 collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data);
367 data += sizeof(struct queue_port_stats) / sizeof(u64);
368 collect_adapter_stats(adapter, (struct adapter_stats *)data);
369 data += sizeof(struct adapter_stats) / sizeof(u64);
371 *data++ = (u64)pi->port_id;
372 memset(&s, 0, sizeof(s));
373 t4_get_lb_stats(adapter, pi->port_id, &s);
376 for (i = 0; i < ARRAY_SIZE(loopback_stats_strings) - 1; i++)
377 *data++ = (unsigned long long)*p0++;
380 static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
383 struct adapter *adap = netdev2adap(dev);
386 buf_size = t4_get_regs_len(adap);
387 regs->version = mk_adap_vers(adap);
388 t4_get_regs(adap, buf, buf_size);
391 static int restart_autoneg(struct net_device *dev)
393 struct port_info *p = netdev_priv(dev);
395 if (!netif_running(dev))
397 if (p->link_cfg.autoneg != AUTONEG_ENABLE)
399 t4_restart_aneg(p->adapter, p->adapter->pf, p->tx_chan);
403 static int identify_port(struct net_device *dev,
404 enum ethtool_phys_id_state state)
407 struct adapter *adap = netdev2adap(dev);
409 if (state == ETHTOOL_ID_ACTIVE)
411 else if (state == ETHTOOL_ID_INACTIVE)
416 return t4_identify_port(adap, adap->pf, netdev2pinfo(dev)->viid, val);
420 * from_fw_port_mod_type - translate Firmware Port/Module type to Ethtool
421 * @port_type: Firmware Port Type
422 * @mod_type: Firmware Module Type
424 * Translate Firmware Port/Module type to Ethtool Port Type.
426 static int from_fw_port_mod_type(enum fw_port_type port_type,
427 enum fw_port_module_type mod_type)
429 if (port_type == FW_PORT_TYPE_BT_SGMII ||
430 port_type == FW_PORT_TYPE_BT_XFI ||
431 port_type == FW_PORT_TYPE_BT_XAUI) {
433 } else if (port_type == FW_PORT_TYPE_FIBER_XFI ||
434 port_type == FW_PORT_TYPE_FIBER_XAUI) {
436 } else if (port_type == FW_PORT_TYPE_SFP ||
437 port_type == FW_PORT_TYPE_QSFP_10G ||
438 port_type == FW_PORT_TYPE_QSA ||
439 port_type == FW_PORT_TYPE_QSFP ||
440 port_type == FW_PORT_TYPE_CR4_QSFP ||
441 port_type == FW_PORT_TYPE_CR_QSFP ||
442 port_type == FW_PORT_TYPE_CR2_QSFP ||
443 port_type == FW_PORT_TYPE_SFP28) {
444 if (mod_type == FW_PORT_MOD_TYPE_LR ||
445 mod_type == FW_PORT_MOD_TYPE_SR ||
446 mod_type == FW_PORT_MOD_TYPE_ER ||
447 mod_type == FW_PORT_MOD_TYPE_LRM)
449 else if (mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE ||
450 mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE)
454 } else if (port_type == FW_PORT_TYPE_KR4_100G ||
455 port_type == FW_PORT_TYPE_KR_SFP28 ||
456 port_type == FW_PORT_TYPE_KR_XLAUI) {
464 * speed_to_fw_caps - translate Port Speed to Firmware Port Capabilities
465 * @speed: speed in Kb/s
467 * Translates a specific Port Speed into a Firmware Port Capabilities
470 static unsigned int speed_to_fw_caps(int speed)
473 return FW_PORT_CAP32_SPEED_100M;
475 return FW_PORT_CAP32_SPEED_1G;
477 return FW_PORT_CAP32_SPEED_10G;
479 return FW_PORT_CAP32_SPEED_25G;
481 return FW_PORT_CAP32_SPEED_40G;
483 return FW_PORT_CAP32_SPEED_50G;
485 return FW_PORT_CAP32_SPEED_100G;
487 return FW_PORT_CAP32_SPEED_200G;
489 return FW_PORT_CAP32_SPEED_400G;
494 * fw_caps_to_lmm - translate Firmware to ethtool Link Mode Mask
495 * @port_type: Firmware Port Type
496 * @fw_caps: Firmware Port Capabilities
497 * @link_mode_mask: ethtool Link Mode Mask
499 * Translate a Firmware Port Capabilities specification to an ethtool
502 static void fw_caps_to_lmm(enum fw_port_type port_type,
503 fw_port_cap32_t fw_caps,
504 unsigned long *link_mode_mask)
506 #define SET_LMM(__lmm_name) \
508 __set_bit(ETHTOOL_LINK_MODE_ ## __lmm_name ## _BIT, \
512 #define FW_CAPS_TO_LMM(__fw_name, __lmm_name) \
514 if (fw_caps & FW_PORT_CAP32_ ## __fw_name) \
515 SET_LMM(__lmm_name); \
519 case FW_PORT_TYPE_BT_SGMII:
520 case FW_PORT_TYPE_BT_XFI:
521 case FW_PORT_TYPE_BT_XAUI:
523 FW_CAPS_TO_LMM(SPEED_100M, 100baseT_Full);
524 FW_CAPS_TO_LMM(SPEED_1G, 1000baseT_Full);
525 FW_CAPS_TO_LMM(SPEED_10G, 10000baseT_Full);
528 case FW_PORT_TYPE_KX4:
529 case FW_PORT_TYPE_KX:
531 FW_CAPS_TO_LMM(SPEED_1G, 1000baseKX_Full);
532 FW_CAPS_TO_LMM(SPEED_10G, 10000baseKX4_Full);
535 case FW_PORT_TYPE_KR:
537 FW_CAPS_TO_LMM(SPEED_10G, 10000baseKR_Full);
540 case FW_PORT_TYPE_BP_AP:
542 FW_CAPS_TO_LMM(SPEED_1G, 1000baseKX_Full);
543 FW_CAPS_TO_LMM(SPEED_10G, 10000baseR_FEC);
544 FW_CAPS_TO_LMM(SPEED_10G, 10000baseKR_Full);
547 case FW_PORT_TYPE_BP4_AP:
549 FW_CAPS_TO_LMM(SPEED_1G, 1000baseKX_Full);
550 FW_CAPS_TO_LMM(SPEED_10G, 10000baseR_FEC);
551 FW_CAPS_TO_LMM(SPEED_10G, 10000baseKR_Full);
552 FW_CAPS_TO_LMM(SPEED_10G, 10000baseKX4_Full);
555 case FW_PORT_TYPE_FIBER_XFI:
556 case FW_PORT_TYPE_FIBER_XAUI:
557 case FW_PORT_TYPE_SFP:
558 case FW_PORT_TYPE_QSFP_10G:
559 case FW_PORT_TYPE_QSA:
561 FW_CAPS_TO_LMM(SPEED_1G, 1000baseT_Full);
562 FW_CAPS_TO_LMM(SPEED_10G, 10000baseT_Full);
565 case FW_PORT_TYPE_BP40_BA:
566 case FW_PORT_TYPE_QSFP:
568 FW_CAPS_TO_LMM(SPEED_1G, 1000baseT_Full);
569 FW_CAPS_TO_LMM(SPEED_10G, 10000baseT_Full);
570 FW_CAPS_TO_LMM(SPEED_40G, 40000baseSR4_Full);
573 case FW_PORT_TYPE_CR_QSFP:
574 case FW_PORT_TYPE_SFP28:
576 FW_CAPS_TO_LMM(SPEED_1G, 1000baseT_Full);
577 FW_CAPS_TO_LMM(SPEED_10G, 10000baseT_Full);
578 FW_CAPS_TO_LMM(SPEED_25G, 25000baseCR_Full);
581 case FW_PORT_TYPE_KR_SFP28:
583 FW_CAPS_TO_LMM(SPEED_1G, 1000baseT_Full);
584 FW_CAPS_TO_LMM(SPEED_10G, 10000baseKR_Full);
585 FW_CAPS_TO_LMM(SPEED_25G, 25000baseKR_Full);
588 case FW_PORT_TYPE_KR_XLAUI:
590 FW_CAPS_TO_LMM(SPEED_1G, 1000baseKX_Full);
591 FW_CAPS_TO_LMM(SPEED_10G, 10000baseKR_Full);
592 FW_CAPS_TO_LMM(SPEED_40G, 40000baseKR4_Full);
595 case FW_PORT_TYPE_CR2_QSFP:
597 FW_CAPS_TO_LMM(SPEED_50G, 50000baseSR2_Full);
600 case FW_PORT_TYPE_KR4_100G:
601 case FW_PORT_TYPE_CR4_QSFP:
603 FW_CAPS_TO_LMM(SPEED_1G, 1000baseT_Full);
604 FW_CAPS_TO_LMM(SPEED_10G, 10000baseKR_Full);
605 FW_CAPS_TO_LMM(SPEED_40G, 40000baseSR4_Full);
606 FW_CAPS_TO_LMM(SPEED_25G, 25000baseCR_Full);
607 FW_CAPS_TO_LMM(SPEED_50G, 50000baseCR2_Full);
608 FW_CAPS_TO_LMM(SPEED_100G, 100000baseCR4_Full);
615 if (fw_caps & FW_PORT_CAP32_FEC_V(FW_PORT_CAP32_FEC_M)) {
616 FW_CAPS_TO_LMM(FEC_RS, FEC_RS);
617 FW_CAPS_TO_LMM(FEC_BASER_RS, FEC_BASER);
622 FW_CAPS_TO_LMM(ANEG, Autoneg);
623 FW_CAPS_TO_LMM(802_3_PAUSE, Pause);
624 FW_CAPS_TO_LMM(802_3_ASM_DIR, Asym_Pause);
626 #undef FW_CAPS_TO_LMM
631 * lmm_to_fw_caps - translate ethtool Link Mode Mask to Firmware
633 * @link_mode_mask: ethtool Link Mode Mask
635 * Translate ethtool Link Mode Mask into a Firmware Port capabilities
638 static unsigned int lmm_to_fw_caps(const unsigned long *link_mode_mask)
640 unsigned int fw_caps = 0;
642 #define LMM_TO_FW_CAPS(__lmm_name, __fw_name) \
644 if (test_bit(ETHTOOL_LINK_MODE_ ## __lmm_name ## _BIT, \
646 fw_caps |= FW_PORT_CAP32_ ## __fw_name; \
649 LMM_TO_FW_CAPS(100baseT_Full, SPEED_100M);
650 LMM_TO_FW_CAPS(1000baseT_Full, SPEED_1G);
651 LMM_TO_FW_CAPS(10000baseT_Full, SPEED_10G);
652 LMM_TO_FW_CAPS(40000baseSR4_Full, SPEED_40G);
653 LMM_TO_FW_CAPS(25000baseCR_Full, SPEED_25G);
654 LMM_TO_FW_CAPS(50000baseCR2_Full, SPEED_50G);
655 LMM_TO_FW_CAPS(100000baseCR4_Full, SPEED_100G);
657 #undef LMM_TO_FW_CAPS
662 static int get_link_ksettings(struct net_device *dev,
663 struct ethtool_link_ksettings *link_ksettings)
665 struct port_info *pi = netdev_priv(dev);
666 struct ethtool_link_settings *base = &link_ksettings->base;
668 /* For the nonce, the Firmware doesn't send up Port State changes
669 * when the Virtual Interface attached to the Port is down. So
670 * if it's down, let's grab any changes.
672 if (!netif_running(dev))
673 (void)t4_update_port_info(pi);
675 ethtool_link_ksettings_zero_link_mode(link_ksettings, supported);
676 ethtool_link_ksettings_zero_link_mode(link_ksettings, advertising);
677 ethtool_link_ksettings_zero_link_mode(link_ksettings, lp_advertising);
679 base->port = from_fw_port_mod_type(pi->port_type, pi->mod_type);
681 if (pi->mdio_addr >= 0) {
682 base->phy_address = pi->mdio_addr;
683 base->mdio_support = (pi->port_type == FW_PORT_TYPE_BT_SGMII
684 ? ETH_MDIO_SUPPORTS_C22
685 : ETH_MDIO_SUPPORTS_C45);
687 base->phy_address = 255;
688 base->mdio_support = 0;
691 fw_caps_to_lmm(pi->port_type, pi->link_cfg.pcaps,
692 link_ksettings->link_modes.supported);
693 fw_caps_to_lmm(pi->port_type,
694 t4_link_acaps(pi->adapter,
697 link_ksettings->link_modes.advertising);
698 fw_caps_to_lmm(pi->port_type, pi->link_cfg.lpacaps,
699 link_ksettings->link_modes.lp_advertising);
701 base->speed = (netif_carrier_ok(dev)
704 base->duplex = DUPLEX_FULL;
706 base->autoneg = pi->link_cfg.autoneg;
707 if (pi->link_cfg.pcaps & FW_PORT_CAP32_ANEG)
708 ethtool_link_ksettings_add_link_mode(link_ksettings,
710 if (pi->link_cfg.autoneg)
711 ethtool_link_ksettings_add_link_mode(link_ksettings,
712 advertising, Autoneg);
717 static int set_link_ksettings(struct net_device *dev,
718 const struct ethtool_link_ksettings *link_ksettings)
720 struct port_info *pi = netdev_priv(dev);
721 struct link_config *lc = &pi->link_cfg;
722 const struct ethtool_link_settings *base = &link_ksettings->base;
723 struct link_config old_lc;
724 unsigned int fw_caps;
727 /* only full-duplex supported */
728 if (base->duplex != DUPLEX_FULL)
732 if (!(lc->pcaps & FW_PORT_CAP32_ANEG) ||
733 base->autoneg == AUTONEG_DISABLE) {
734 fw_caps = speed_to_fw_caps(base->speed);
736 /* Speed must be supported by Physical Port Capabilities. */
737 if (!(lc->pcaps & fw_caps))
740 lc->speed_caps = fw_caps;
744 lmm_to_fw_caps(link_ksettings->link_modes.advertising);
745 if (!(lc->pcaps & fw_caps))
748 lc->acaps = fw_caps | FW_PORT_CAP32_ANEG;
750 lc->autoneg = base->autoneg;
752 /* If the firmware rejects the Link Configuration request, back out
753 * the changes and report the error.
755 ret = t4_link_l1cfg(pi->adapter, pi->adapter->mbox, pi->tx_chan, lc);
762 /* Translate the Firmware FEC value into the ethtool value. */
763 static inline unsigned int fwcap_to_eth_fec(unsigned int fw_fec)
765 unsigned int eth_fec = 0;
767 if (fw_fec & FW_PORT_CAP32_FEC_RS)
768 eth_fec |= ETHTOOL_FEC_RS;
769 if (fw_fec & FW_PORT_CAP32_FEC_BASER_RS)
770 eth_fec |= ETHTOOL_FEC_BASER;
772 /* if nothing is set, then FEC is off */
774 eth_fec = ETHTOOL_FEC_OFF;
779 /* Translate Common Code FEC value into ethtool value. */
780 static inline unsigned int cc_to_eth_fec(unsigned int cc_fec)
782 unsigned int eth_fec = 0;
784 if (cc_fec & FEC_AUTO)
785 eth_fec |= ETHTOOL_FEC_AUTO;
787 eth_fec |= ETHTOOL_FEC_RS;
788 if (cc_fec & FEC_BASER_RS)
789 eth_fec |= ETHTOOL_FEC_BASER;
791 /* if nothing is set, then FEC is off */
793 eth_fec = ETHTOOL_FEC_OFF;
798 /* Translate ethtool FEC value into Common Code value. */
799 static inline unsigned int eth_to_cc_fec(unsigned int eth_fec)
801 unsigned int cc_fec = 0;
803 if (eth_fec & ETHTOOL_FEC_OFF)
806 if (eth_fec & ETHTOOL_FEC_AUTO)
808 if (eth_fec & ETHTOOL_FEC_RS)
810 if (eth_fec & ETHTOOL_FEC_BASER)
811 cc_fec |= FEC_BASER_RS;
816 static int get_fecparam(struct net_device *dev, struct ethtool_fecparam *fec)
818 const struct port_info *pi = netdev_priv(dev);
819 const struct link_config *lc = &pi->link_cfg;
821 /* Translate the Firmware FEC Support into the ethtool value. We
822 * always support IEEE 802.3 "automatic" selection of Link FEC type if
823 * any FEC is supported.
825 fec->fec = fwcap_to_eth_fec(lc->pcaps);
826 if (fec->fec != ETHTOOL_FEC_OFF)
827 fec->fec |= ETHTOOL_FEC_AUTO;
829 /* Translate the current internal FEC parameters into the
832 fec->active_fec = cc_to_eth_fec(lc->fec);
837 static int set_fecparam(struct net_device *dev, struct ethtool_fecparam *fec)
839 struct port_info *pi = netdev_priv(dev);
840 struct link_config *lc = &pi->link_cfg;
841 struct link_config old_lc;
844 /* Save old Link Configuration in case the L1 Configure below
849 /* Try to perform the L1 Configure and return the result of that
850 * effort. If it fails, revert the attempted change.
852 lc->requested_fec = eth_to_cc_fec(fec->fec);
853 ret = t4_link_l1cfg(pi->adapter, pi->adapter->mbox,
860 static void get_pauseparam(struct net_device *dev,
861 struct ethtool_pauseparam *epause)
863 struct port_info *p = netdev_priv(dev);
865 epause->autoneg = (p->link_cfg.requested_fc & PAUSE_AUTONEG) != 0;
866 epause->rx_pause = (p->link_cfg.advertised_fc & PAUSE_RX) != 0;
867 epause->tx_pause = (p->link_cfg.advertised_fc & PAUSE_TX) != 0;
870 static int set_pauseparam(struct net_device *dev,
871 struct ethtool_pauseparam *epause)
873 struct port_info *p = netdev_priv(dev);
874 struct link_config *lc = &p->link_cfg;
876 if (epause->autoneg == AUTONEG_DISABLE)
877 lc->requested_fc = 0;
878 else if (lc->pcaps & FW_PORT_CAP32_ANEG)
879 lc->requested_fc = PAUSE_AUTONEG;
883 if (epause->rx_pause)
884 lc->requested_fc |= PAUSE_RX;
885 if (epause->tx_pause)
886 lc->requested_fc |= PAUSE_TX;
887 if (netif_running(dev))
888 return t4_link_l1cfg(p->adapter, p->adapter->mbox, p->tx_chan,
893 static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e,
894 struct kernel_ethtool_ringparam *kernel_e,
895 struct netlink_ext_ack *extack)
897 const struct port_info *pi = netdev_priv(dev);
898 const struct sge *s = &pi->adapter->sge;
900 e->rx_max_pending = MAX_RX_BUFFERS;
901 e->rx_mini_max_pending = MAX_RSPQ_ENTRIES;
902 e->rx_jumbo_max_pending = 0;
903 e->tx_max_pending = MAX_TXQ_ENTRIES;
905 e->rx_pending = s->ethrxq[pi->first_qset].fl.size - 8;
906 e->rx_mini_pending = s->ethrxq[pi->first_qset].rspq.size;
907 e->rx_jumbo_pending = 0;
908 e->tx_pending = s->ethtxq[pi->first_qset].q.size;
911 static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e,
912 struct kernel_ethtool_ringparam *kernel_e,
913 struct netlink_ext_ack *extack)
916 const struct port_info *pi = netdev_priv(dev);
917 struct adapter *adapter = pi->adapter;
918 struct sge *s = &adapter->sge;
920 if (e->rx_pending > MAX_RX_BUFFERS || e->rx_jumbo_pending ||
921 e->tx_pending > MAX_TXQ_ENTRIES ||
922 e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
923 e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
924 e->rx_pending < MIN_FL_ENTRIES || e->tx_pending < MIN_TXQ_ENTRIES)
927 if (adapter->flags & CXGB4_FULL_INIT_DONE)
930 for (i = 0; i < pi->nqsets; ++i) {
931 s->ethtxq[pi->first_qset + i].q.size = e->tx_pending;
932 s->ethrxq[pi->first_qset + i].fl.size = e->rx_pending + 8;
933 s->ethrxq[pi->first_qset + i].rspq.size = e->rx_mini_pending;
939 * set_rx_intr_params - set a net devices's RX interrupt holdoff paramete!
940 * @dev: the network device
941 * @us: the hold-off time in us, or 0 to disable timer
942 * @cnt: the hold-off packet count, or 0 to disable counter
944 * Set the RX interrupt hold-off parameters for a network device.
946 static int set_rx_intr_params(struct net_device *dev,
947 unsigned int us, unsigned int cnt)
950 struct port_info *pi = netdev_priv(dev);
951 struct adapter *adap = pi->adapter;
952 struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset];
954 for (i = 0; i < pi->nqsets; i++, q++) {
955 err = cxgb4_set_rspq_intr_params(&q->rspq, us, cnt);
962 static int set_adaptive_rx_setting(struct net_device *dev, int adaptive_rx)
965 struct port_info *pi = netdev_priv(dev);
966 struct adapter *adap = pi->adapter;
967 struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset];
969 for (i = 0; i < pi->nqsets; i++, q++)
970 q->rspq.adaptive_rx = adaptive_rx;
975 static int get_adaptive_rx_setting(struct net_device *dev)
977 struct port_info *pi = netdev_priv(dev);
978 struct adapter *adap = pi->adapter;
979 struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset];
981 return q->rspq.adaptive_rx;
984 /* Return the current global Adapter SGE Doorbell Queue Timer Tick for all
985 * Ethernet TX Queues.
987 static int get_dbqtimer_tick(struct net_device *dev)
989 struct port_info *pi = netdev_priv(dev);
990 struct adapter *adap = pi->adapter;
992 if (!(adap->flags & CXGB4_SGE_DBQ_TIMER))
995 return adap->sge.dbqtimer_tick;
998 /* Return the SGE Doorbell Queue Timer Value for the Ethernet TX Queues
999 * associated with a Network Device.
1001 static int get_dbqtimer(struct net_device *dev)
1003 struct port_info *pi = netdev_priv(dev);
1004 struct adapter *adap = pi->adapter;
1005 struct sge_eth_txq *txq;
1007 txq = &adap->sge.ethtxq[pi->first_qset];
1009 if (!(adap->flags & CXGB4_SGE_DBQ_TIMER))
1012 /* all of the TX Queues use the same Timer Index */
1013 return adap->sge.dbqtimer_val[txq->dbqtimerix];
1016 /* Set the global Adapter SGE Doorbell Queue Timer Tick for all Ethernet TX
1017 * Queues. This is the fundamental "Tick" that sets the scale of values which
1018 * can be used. Individual Ethernet TX Queues index into a relatively small
1019 * array of Tick Multipliers. Changing the base Tick will thus change all of
1020 * the resulting Timer Values associated with those multipliers for all
1021 * Ethernet TX Queues.
1023 static int set_dbqtimer_tick(struct net_device *dev, int usecs)
1025 struct port_info *pi = netdev_priv(dev);
1026 struct adapter *adap = pi->adapter;
1027 struct sge *s = &adap->sge;
1031 if (!(adap->flags & CXGB4_SGE_DBQ_TIMER))
1034 /* return early if it's the same Timer Tick we're already using */
1035 if (s->dbqtimer_tick == usecs)
1038 /* attempt to set the new Timer Tick value */
1039 param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
1040 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_DBQ_TIMERTICK));
1042 ret = t4_set_params(adap, adap->mbox, adap->pf, 0, 1, ¶m, &val);
1045 s->dbqtimer_tick = usecs;
1047 /* if successful, reread resulting dependent Timer values */
1048 ret = t4_read_sge_dbqtimers(adap, ARRAY_SIZE(s->dbqtimer_val),
1053 /* Set the SGE Doorbell Queue Timer Value for the Ethernet TX Queues
1054 * associated with a Network Device. There is a relatively small array of
1055 * possible Timer Values so we need to pick the closest value available.
1057 static int set_dbqtimer(struct net_device *dev, int usecs)
1059 int qix, timerix, min_timerix, delta, min_delta;
1060 struct port_info *pi = netdev_priv(dev);
1061 struct adapter *adap = pi->adapter;
1062 struct sge *s = &adap->sge;
1063 struct sge_eth_txq *txq;
1067 if (!(adap->flags & CXGB4_SGE_DBQ_TIMER))
1070 /* Find the SGE Doorbell Timer Value that's closest to the requested
1073 min_delta = INT_MAX;
1075 for (timerix = 0; timerix < ARRAY_SIZE(s->dbqtimer_val); timerix++) {
1076 delta = s->dbqtimer_val[timerix] - usecs;
1079 if (delta < min_delta) {
1081 min_timerix = timerix;
1085 /* Return early if it's the same Timer Index we're already using.
1086 * We use the same Timer Index for all of the TX Queues for an
1087 * interface so it's only necessary to check the first one.
1089 txq = &s->ethtxq[pi->first_qset];
1090 if (txq->dbqtimerix == min_timerix)
1093 for (qix = 0; qix < pi->nqsets; qix++, txq++) {
1094 if (adap->flags & CXGB4_FULL_INIT_DONE) {
1096 (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
1097 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DMAQ_EQ_TIMERIX) |
1098 FW_PARAMS_PARAM_YZ_V(txq->q.cntxt_id));
1100 ret = t4_set_params(adap, adap->mbox, adap->pf, 0,
1105 txq->dbqtimerix = min_timerix;
1110 /* Set the global Adapter SGE Doorbell Queue Timer Tick for all Ethernet TX
1111 * Queues and the Timer Value for the Ethernet TX Queues associated with a
1112 * Network Device. Since changing the global Tick changes all of the
1113 * available Timer Values, we need to do this first before selecting the
1114 * resulting closest Timer Value. Moreover, since the Tick is global,
1115 * changing it affects the Timer Values for all Network Devices on the
1116 * adapter. So, before changing the Tick, we grab all of the current Timer
1117 * Values for other Network Devices on this Adapter and then attempt to select
1118 * new Timer Values which are close to the old values ...
1120 static int set_dbqtimer_tickval(struct net_device *dev,
1121 int tick_usecs, int timer_usecs)
1123 struct port_info *pi = netdev_priv(dev);
1124 struct adapter *adap = pi->adapter;
1125 int timer[MAX_NPORTS];
1129 /* Grab the other adapter Network Interface current timers and fill in
1130 * the new one for this Network Interface.
1132 for_each_port(adap, port)
1133 if (port == pi->port_id)
1134 timer[port] = timer_usecs;
1136 timer[port] = get_dbqtimer(adap->port[port]);
1138 /* Change the global Tick first ... */
1139 ret = set_dbqtimer_tick(dev, tick_usecs);
1143 /* ... and then set all of the Network Interface Timer Values ... */
1144 for_each_port(adap, port) {
1145 ret = set_dbqtimer(adap->port[port], timer[port]);
1153 static int set_coalesce(struct net_device *dev,
1154 struct ethtool_coalesce *coalesce,
1155 struct kernel_ethtool_coalesce *kernel_coal,
1156 struct netlink_ext_ack *extack)
1160 set_adaptive_rx_setting(dev, coalesce->use_adaptive_rx_coalesce);
1162 ret = set_rx_intr_params(dev, coalesce->rx_coalesce_usecs,
1163 coalesce->rx_max_coalesced_frames);
1167 return set_dbqtimer_tickval(dev,
1168 coalesce->tx_coalesce_usecs_irq,
1169 coalesce->tx_coalesce_usecs);
1172 static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c,
1173 struct kernel_ethtool_coalesce *kernel_coal,
1174 struct netlink_ext_ack *extack)
1176 const struct port_info *pi = netdev_priv(dev);
1177 const struct adapter *adap = pi->adapter;
1178 const struct sge_rspq *rq = &adap->sge.ethrxq[pi->first_qset].rspq;
1180 c->rx_coalesce_usecs = qtimer_val(adap, rq);
1181 c->rx_max_coalesced_frames = (rq->intr_params & QINTR_CNT_EN_F) ?
1182 adap->sge.counter_val[rq->pktcnt_idx] : 0;
1183 c->use_adaptive_rx_coalesce = get_adaptive_rx_setting(dev);
1184 c->tx_coalesce_usecs_irq = get_dbqtimer_tick(dev);
1185 c->tx_coalesce_usecs = get_dbqtimer(dev);
1189 /* The next two routines implement eeprom read/write from physical addresses.
1191 static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v)
1193 int vaddr = t4_eeprom_ptov(phys_addr, adap->pf, EEPROMPFSIZE);
1196 vaddr = pci_read_vpd(adap->pdev, vaddr, sizeof(u32), v);
1197 return vaddr < 0 ? vaddr : 0;
1200 static int eeprom_wr_phys(struct adapter *adap, unsigned int phys_addr, u32 v)
1202 int vaddr = t4_eeprom_ptov(phys_addr, adap->pf, EEPROMPFSIZE);
1205 vaddr = pci_write_vpd(adap->pdev, vaddr, sizeof(u32), &v);
1206 return vaddr < 0 ? vaddr : 0;
1209 #define EEPROM_MAGIC 0x38E2F10C
1211 static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
1215 struct adapter *adapter = netdev2adap(dev);
1216 u8 *buf = kvzalloc(EEPROMSIZE, GFP_KERNEL);
1221 e->magic = EEPROM_MAGIC;
1222 for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
1223 err = eeprom_rd_phys(adapter, i, (u32 *)&buf[i]);
1226 memcpy(data, buf + e->offset, e->len);
1231 static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
1236 u32 aligned_offset, aligned_len, *p;
1237 struct adapter *adapter = netdev2adap(dev);
1239 if (eeprom->magic != EEPROM_MAGIC)
1242 aligned_offset = eeprom->offset & ~3;
1243 aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
1245 if (adapter->pf > 0) {
1246 u32 start = 1024 + adapter->pf * EEPROMPFSIZE;
1248 if (aligned_offset < start ||
1249 aligned_offset + aligned_len > start + EEPROMPFSIZE)
1253 if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
1254 /* RMW possibly needed for first or last words.
1256 buf = kvzalloc(aligned_len, GFP_KERNEL);
1259 err = eeprom_rd_phys(adapter, aligned_offset, (u32 *)buf);
1260 if (!err && aligned_len > 4)
1261 err = eeprom_rd_phys(adapter,
1262 aligned_offset + aligned_len - 4,
1263 (u32 *)&buf[aligned_len - 4]);
1266 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
1271 err = t4_seeprom_wp(adapter, false);
1275 for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) {
1276 err = eeprom_wr_phys(adapter, aligned_offset, *p);
1277 aligned_offset += 4;
1281 err = t4_seeprom_wp(adapter, true);
1288 static int cxgb4_ethtool_flash_bootcfg(struct net_device *netdev,
1289 const u8 *data, u32 size)
1291 struct adapter *adap = netdev2adap(netdev);
1294 ret = t4_load_bootcfg(adap, data, size);
1296 dev_err(adap->pdev_dev, "Failed to load boot cfg image\n");
1301 static int cxgb4_ethtool_flash_boot(struct net_device *netdev,
1302 const u8 *bdata, u32 size)
1304 struct adapter *adap = netdev2adap(netdev);
1305 unsigned int offset;
1309 data = kmemdup(bdata, size, GFP_KERNEL);
1313 offset = OFFSET_G(t4_read_reg(adap, PF_REG(0, PCIE_PF_EXPROM_OFST_A)));
1315 ret = t4_load_boot(adap, data, offset, size);
1317 dev_err(adap->pdev_dev, "Failed to load boot image\n");
1323 #define CXGB4_PHY_SIG 0x130000ea
1325 static int cxgb4_validate_phy_image(const u8 *data, u32 *size)
1327 struct cxgb4_fw_data *header;
1329 header = (struct cxgb4_fw_data *)data;
1330 if (be32_to_cpu(header->signature) != CXGB4_PHY_SIG)
1336 static int cxgb4_ethtool_flash_phy(struct net_device *netdev,
1337 const u8 *data, u32 size)
1339 struct adapter *adap = netdev2adap(netdev);
1342 ret = cxgb4_validate_phy_image(data, NULL);
1344 dev_err(adap->pdev_dev, "PHY signature mismatch\n");
1348 /* We have to RESET the chip/firmware because we need the
1349 * chip in uninitialized state for loading new PHY image.
1350 * Otherwise, the running firmware will only store the PHY
1351 * image in local RAM which will be lost after next reset.
1353 ret = t4_fw_reset(adap, adap->mbox, PIORSTMODE_F | PIORST_F);
1355 dev_err(adap->pdev_dev,
1356 "Set FW to RESET for flashing PHY FW failed. ret: %d\n",
1361 ret = t4_load_phy_fw(adap, MEMWIN_NIC, NULL, data, size);
1363 dev_err(adap->pdev_dev, "Failed to load PHY FW. ret: %d\n",
1371 static int cxgb4_ethtool_flash_fw(struct net_device *netdev,
1372 const u8 *data, u32 size)
1374 struct adapter *adap = netdev2adap(netdev);
1375 unsigned int mbox = PCIE_FW_MASTER_M + 1;
1378 /* If the adapter has been fully initialized then we'll go ahead and
1379 * try to get the firmware's cooperation in upgrading to the new
1380 * firmware image otherwise we'll try to do the entire job from the
1381 * host ... and we always "force" the operation in this path.
1383 if (adap->flags & CXGB4_FULL_INIT_DONE)
1386 ret = t4_fw_upgrade(adap, mbox, data, size, 1);
1388 dev_err(adap->pdev_dev,
1389 "Failed to flash firmware\n");
1394 static int cxgb4_ethtool_flash_region(struct net_device *netdev,
1395 const u8 *data, u32 size, u32 region)
1397 struct adapter *adap = netdev2adap(netdev);
1401 case CXGB4_ETHTOOL_FLASH_FW:
1402 ret = cxgb4_ethtool_flash_fw(netdev, data, size);
1404 case CXGB4_ETHTOOL_FLASH_PHY:
1405 ret = cxgb4_ethtool_flash_phy(netdev, data, size);
1407 case CXGB4_ETHTOOL_FLASH_BOOT:
1408 ret = cxgb4_ethtool_flash_boot(netdev, data, size);
1410 case CXGB4_ETHTOOL_FLASH_BOOTCFG:
1411 ret = cxgb4_ethtool_flash_bootcfg(netdev, data, size);
1419 dev_info(adap->pdev_dev,
1420 "loading %s successful, reload cxgb4 driver\n",
1421 flash_region_strings[region]);
1425 #define CXGB4_FW_SIG 0x4368656c
1426 #define CXGB4_FW_SIG_OFFSET 0x160
1428 static int cxgb4_validate_fw_image(const u8 *data, u32 *size)
1430 struct cxgb4_fw_data *header;
1432 header = (struct cxgb4_fw_data *)&data[CXGB4_FW_SIG_OFFSET];
1433 if (be32_to_cpu(header->signature) != CXGB4_FW_SIG)
1437 *size = be16_to_cpu(((struct fw_hdr *)data)->len512) * 512;
1442 static int cxgb4_validate_bootcfg_image(const u8 *data, u32 *size)
1444 struct cxgb4_bootcfg_data *header;
1446 header = (struct cxgb4_bootcfg_data *)data;
1447 if (le16_to_cpu(header->signature) != BOOT_CFG_SIG)
1453 static int cxgb4_validate_boot_image(const u8 *data, u32 *size)
1455 struct cxgb4_pci_exp_rom_header *exp_header;
1456 struct cxgb4_pcir_data *pcir_header;
1457 struct legacy_pci_rom_hdr *header;
1458 const u8 *cur_header = data;
1461 exp_header = (struct cxgb4_pci_exp_rom_header *)data;
1463 if (le16_to_cpu(exp_header->signature) != BOOT_SIGNATURE)
1468 header = (struct legacy_pci_rom_hdr *)cur_header;
1469 pcir_offset = le16_to_cpu(header->pcir_offset);
1470 pcir_header = (struct cxgb4_pcir_data *)(cur_header +
1473 *size += header->size512 * 512;
1474 cur_header += header->size512 * 512;
1475 } while (!(pcir_header->indicator & CXGB4_HDR_INDI));
1481 static int cxgb4_ethtool_get_flash_region(const u8 *data, u32 *size)
1483 if (!cxgb4_validate_fw_image(data, size))
1484 return CXGB4_ETHTOOL_FLASH_FW;
1485 if (!cxgb4_validate_boot_image(data, size))
1486 return CXGB4_ETHTOOL_FLASH_BOOT;
1487 if (!cxgb4_validate_phy_image(data, size))
1488 return CXGB4_ETHTOOL_FLASH_PHY;
1489 if (!cxgb4_validate_bootcfg_image(data, size))
1490 return CXGB4_ETHTOOL_FLASH_BOOTCFG;
1495 static int set_flash(struct net_device *netdev, struct ethtool_flash *ef)
1497 struct adapter *adap = netdev2adap(netdev);
1498 const struct firmware *fw;
1499 unsigned int master;
1508 pcie_fw = t4_read_reg(adap, PCIE_FW_A);
1509 master = PCIE_FW_MASTER_G(pcie_fw);
1510 if (pcie_fw & PCIE_FW_MASTER_VLD_F)
1512 /* if csiostor is the master return */
1513 if (master_vld && (master != adap->pf)) {
1514 dev_warn(adap->pdev_dev,
1515 "cxgb4 driver needs to be loaded as MASTER to support FW flash\n");
1519 ef->data[sizeof(ef->data) - 1] = '\0';
1520 ret = request_firmware(&fw, ef->data, adap->pdev_dev);
1526 if (ef->region == ETHTOOL_FLASH_ALL_REGIONS) {
1527 while (fw_size > 0) {
1529 region = cxgb4_ethtool_get_flash_region(fw_data, &size);
1530 if (region < 0 || !size) {
1535 ret = cxgb4_ethtool_flash_region(netdev, fw_data, size,
1544 ret = cxgb4_ethtool_flash_region(netdev, fw_data, fw_size,
1549 release_firmware(fw);
1553 static int get_ts_info(struct net_device *dev, struct ethtool_ts_info *ts_info)
1555 struct port_info *pi = netdev_priv(dev);
1556 struct adapter *adapter = pi->adapter;
1558 ts_info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
1559 SOF_TIMESTAMPING_RX_SOFTWARE |
1560 SOF_TIMESTAMPING_SOFTWARE;
1562 ts_info->so_timestamping |= SOF_TIMESTAMPING_RX_HARDWARE |
1563 SOF_TIMESTAMPING_TX_HARDWARE |
1564 SOF_TIMESTAMPING_RAW_HARDWARE;
1566 ts_info->tx_types = (1 << HWTSTAMP_TX_OFF) |
1567 (1 << HWTSTAMP_TX_ON);
1569 ts_info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
1570 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
1571 (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
1572 (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
1573 (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
1574 (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ);
1576 if (adapter->ptp_clock)
1577 ts_info->phc_index = ptp_clock_index(adapter->ptp_clock);
1579 ts_info->phc_index = -1;
1584 static u32 get_rss_table_size(struct net_device *dev)
1586 const struct port_info *pi = netdev_priv(dev);
1588 return pi->rss_size;
1591 static int get_rss_table(struct net_device *dev,
1592 struct ethtool_rxfh_param *rxfh)
1594 const struct port_info *pi = netdev_priv(dev);
1595 unsigned int n = pi->rss_size;
1597 rxfh->hfunc = ETH_RSS_HASH_TOP;
1601 rxfh->indir[n] = pi->rss[n];
1605 static int set_rss_table(struct net_device *dev,
1606 struct ethtool_rxfh_param *rxfh,
1607 struct netlink_ext_ack *extack)
1610 struct port_info *pi = netdev_priv(dev);
1612 /* We require at least one supported parameter to be changed and no
1613 * change in any of the unsupported parameters
1616 (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
1617 rxfh->hfunc != ETH_RSS_HASH_TOP))
1622 /* Interface must be brought up atleast once */
1623 if (pi->adapter->flags & CXGB4_FULL_INIT_DONE) {
1624 for (i = 0; i < pi->rss_size; i++)
1625 pi->rss[i] = rxfh->indir[i];
1627 return cxgb4_write_rss(pi, pi->rss);
1633 static struct filter_entry *cxgb4_get_filter_entry(struct adapter *adap,
1636 struct tid_info *t = &adap->tids;
1638 if (ftid >= t->hpftid_base && ftid < t->hpftid_base + t->nhpftids)
1639 return &t->hpftid_tab[ftid - t->hpftid_base];
1641 if (ftid >= t->ftid_base && ftid < t->ftid_base + t->nftids)
1642 return &t->ftid_tab[ftid - t->ftid_base];
1644 return lookup_tid(t, ftid);
1647 static void cxgb4_fill_filter_rule(struct ethtool_rx_flow_spec *fs,
1648 struct ch_filter_specification *dfs)
1650 switch (dfs->val.proto) {
1653 fs->flow_type = TCP_V6_FLOW;
1655 fs->flow_type = TCP_V4_FLOW;
1659 fs->flow_type = UDP_V6_FLOW;
1661 fs->flow_type = UDP_V4_FLOW;
1666 fs->h_u.tcp_ip6_spec.psrc = cpu_to_be16(dfs->val.fport);
1667 fs->m_u.tcp_ip6_spec.psrc = cpu_to_be16(dfs->mask.fport);
1668 fs->h_u.tcp_ip6_spec.pdst = cpu_to_be16(dfs->val.lport);
1669 fs->m_u.tcp_ip6_spec.pdst = cpu_to_be16(dfs->mask.lport);
1670 memcpy(&fs->h_u.tcp_ip6_spec.ip6src, &dfs->val.fip[0],
1671 sizeof(fs->h_u.tcp_ip6_spec.ip6src));
1672 memcpy(&fs->m_u.tcp_ip6_spec.ip6src, &dfs->mask.fip[0],
1673 sizeof(fs->m_u.tcp_ip6_spec.ip6src));
1674 memcpy(&fs->h_u.tcp_ip6_spec.ip6dst, &dfs->val.lip[0],
1675 sizeof(fs->h_u.tcp_ip6_spec.ip6dst));
1676 memcpy(&fs->m_u.tcp_ip6_spec.ip6dst, &dfs->mask.lip[0],
1677 sizeof(fs->m_u.tcp_ip6_spec.ip6dst));
1678 fs->h_u.tcp_ip6_spec.tclass = dfs->val.tos;
1679 fs->m_u.tcp_ip6_spec.tclass = dfs->mask.tos;
1681 fs->h_u.tcp_ip4_spec.psrc = cpu_to_be16(dfs->val.fport);
1682 fs->m_u.tcp_ip4_spec.psrc = cpu_to_be16(dfs->mask.fport);
1683 fs->h_u.tcp_ip4_spec.pdst = cpu_to_be16(dfs->val.lport);
1684 fs->m_u.tcp_ip4_spec.pdst = cpu_to_be16(dfs->mask.lport);
1685 memcpy(&fs->h_u.tcp_ip4_spec.ip4src, &dfs->val.fip[0],
1686 sizeof(fs->h_u.tcp_ip4_spec.ip4src));
1687 memcpy(&fs->m_u.tcp_ip4_spec.ip4src, &dfs->mask.fip[0],
1688 sizeof(fs->m_u.tcp_ip4_spec.ip4src));
1689 memcpy(&fs->h_u.tcp_ip4_spec.ip4dst, &dfs->val.lip[0],
1690 sizeof(fs->h_u.tcp_ip4_spec.ip4dst));
1691 memcpy(&fs->m_u.tcp_ip4_spec.ip4dst, &dfs->mask.lip[0],
1692 sizeof(fs->m_u.tcp_ip4_spec.ip4dst));
1693 fs->h_u.tcp_ip4_spec.tos = dfs->val.tos;
1694 fs->m_u.tcp_ip4_spec.tos = dfs->mask.tos;
1696 fs->h_ext.vlan_tci = cpu_to_be16(dfs->val.ivlan);
1697 fs->m_ext.vlan_tci = cpu_to_be16(dfs->mask.ivlan);
1698 fs->flow_type |= FLOW_EXT;
1700 if (dfs->action == FILTER_DROP)
1701 fs->ring_cookie = RX_CLS_FLOW_DISC;
1703 fs->ring_cookie = dfs->iq;
1706 static int cxgb4_ntuple_get_filter(struct net_device *dev,
1707 struct ethtool_rxnfc *cmd,
1710 const struct port_info *pi = netdev_priv(dev);
1711 struct adapter *adap = netdev2adap(dev);
1712 struct filter_entry *f;
1715 if (!(adap->flags & CXGB4_FULL_INIT_DONE))
1718 /* Check for maximum filter range */
1719 if (!adap->ethtool_filters)
1722 if (loc >= adap->ethtool_filters->nentries)
1725 if (!test_bit(loc, adap->ethtool_filters->port[pi->port_id].bmap))
1728 ftid = adap->ethtool_filters->port[pi->port_id].loc_array[loc];
1730 /* Fetch filter_entry */
1731 f = cxgb4_get_filter_entry(adap, ftid);
1733 cxgb4_fill_filter_rule(&cmd->fs, &f->fs);
1738 static int get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
1741 const struct port_info *pi = netdev_priv(dev);
1742 struct adapter *adap = netdev2adap(dev);
1743 unsigned int count = 0, index = 0;
1746 switch (info->cmd) {
1747 case ETHTOOL_GRXFH: {
1748 unsigned int v = pi->rss_mode;
1751 switch (info->flow_type) {
1753 if (v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F)
1754 info->data = RXH_IP_SRC | RXH_IP_DST |
1755 RXH_L4_B_0_1 | RXH_L4_B_2_3;
1756 else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F)
1757 info->data = RXH_IP_SRC | RXH_IP_DST;
1760 if ((v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F) &&
1761 (v & FW_RSS_VI_CONFIG_CMD_UDPEN_F))
1762 info->data = RXH_IP_SRC | RXH_IP_DST |
1763 RXH_L4_B_0_1 | RXH_L4_B_2_3;
1764 else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F)
1765 info->data = RXH_IP_SRC | RXH_IP_DST;
1768 case AH_ESP_V4_FLOW:
1770 if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F)
1771 info->data = RXH_IP_SRC | RXH_IP_DST;
1774 if (v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F)
1775 info->data = RXH_IP_SRC | RXH_IP_DST |
1776 RXH_L4_B_0_1 | RXH_L4_B_2_3;
1777 else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F)
1778 info->data = RXH_IP_SRC | RXH_IP_DST;
1781 if ((v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F) &&
1782 (v & FW_RSS_VI_CONFIG_CMD_UDPEN_F))
1783 info->data = RXH_IP_SRC | RXH_IP_DST |
1784 RXH_L4_B_0_1 | RXH_L4_B_2_3;
1785 else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F)
1786 info->data = RXH_IP_SRC | RXH_IP_DST;
1789 case AH_ESP_V6_FLOW:
1791 if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F)
1792 info->data = RXH_IP_SRC | RXH_IP_DST;
1797 case ETHTOOL_GRXRINGS:
1798 info->data = pi->nqsets;
1800 case ETHTOOL_GRXCLSRLCNT:
1802 adap->ethtool_filters->port[pi->port_id].in_use;
1804 case ETHTOOL_GRXCLSRULE:
1805 return cxgb4_ntuple_get_filter(dev, info, info->fs.location);
1806 case ETHTOOL_GRXCLSRLALL:
1807 info->data = adap->ethtool_filters->nentries;
1808 while (count < info->rule_cnt) {
1809 ret = cxgb4_ntuple_get_filter(dev, info, index);
1811 rules[count++] = index;
1820 static int cxgb4_ntuple_del_filter(struct net_device *dev,
1821 struct ethtool_rxnfc *cmd)
1823 struct cxgb4_ethtool_filter_info *filter_info;
1824 struct adapter *adapter = netdev2adap(dev);
1825 struct port_info *pi = netdev_priv(dev);
1826 struct filter_entry *f;
1830 if (!(adapter->flags & CXGB4_FULL_INIT_DONE))
1831 return -EAGAIN; /* can still change nfilters */
1833 if (!adapter->ethtool_filters)
1836 if (cmd->fs.location >= adapter->ethtool_filters->nentries) {
1837 dev_err(adapter->pdev_dev,
1838 "Location must be < %u",
1839 adapter->ethtool_filters->nentries);
1843 filter_info = &adapter->ethtool_filters->port[pi->port_id];
1845 if (!test_bit(cmd->fs.location, filter_info->bmap))
1848 filter_id = filter_info->loc_array[cmd->fs.location];
1849 f = cxgb4_get_filter_entry(adapter, filter_id);
1852 filter_id -= adapter->tids.hpftid_base;
1853 else if (!f->fs.hash)
1854 filter_id -= (adapter->tids.ftid_base - adapter->tids.nhpftids);
1856 ret = cxgb4_flow_rule_destroy(dev, f->fs.tc_prio, &f->fs, filter_id);
1860 clear_bit(cmd->fs.location, filter_info->bmap);
1861 filter_info->in_use--;
1867 /* Add Ethtool n-tuple filters. */
1868 static int cxgb4_ntuple_set_filter(struct net_device *netdev,
1869 struct ethtool_rxnfc *cmd)
1871 struct ethtool_rx_flow_spec_input input = {};
1872 struct cxgb4_ethtool_filter_info *filter_info;
1873 struct adapter *adapter = netdev2adap(netdev);
1874 struct port_info *pi = netdev_priv(netdev);
1875 struct ch_filter_specification fs;
1876 struct ethtool_rx_flow_rule *flow;
1880 if (!(adapter->flags & CXGB4_FULL_INIT_DONE))
1881 return -EAGAIN; /* can still change nfilters */
1883 if (!adapter->ethtool_filters)
1886 if (cmd->fs.location >= adapter->ethtool_filters->nentries) {
1887 dev_err(adapter->pdev_dev,
1888 "Location must be < %u",
1889 adapter->ethtool_filters->nentries);
1893 if (test_bit(cmd->fs.location,
1894 adapter->ethtool_filters->port[pi->port_id].bmap))
1897 memset(&fs, 0, sizeof(fs));
1899 input.fs = &cmd->fs;
1900 flow = ethtool_rx_flow_rule_create(&input);
1902 ret = PTR_ERR(flow);
1908 ret = cxgb4_flow_rule_replace(netdev, flow->rule, cmd->fs.location,
1913 filter_info = &adapter->ethtool_filters->port[pi->port_id];
1916 tid += adapter->tids.hpftid_base;
1918 tid += (adapter->tids.ftid_base - adapter->tids.nhpftids);
1920 filter_info->loc_array[cmd->fs.location] = tid;
1921 set_bit(cmd->fs.location, filter_info->bmap);
1922 filter_info->in_use++;
1925 ethtool_rx_flow_rule_destroy(flow);
1930 static int set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
1932 int ret = -EOPNOTSUPP;
1935 case ETHTOOL_SRXCLSRLINS:
1936 ret = cxgb4_ntuple_set_filter(dev, cmd);
1938 case ETHTOOL_SRXCLSRLDEL:
1939 ret = cxgb4_ntuple_del_filter(dev, cmd);
1948 static int set_dump(struct net_device *dev, struct ethtool_dump *eth_dump)
1950 struct adapter *adapter = netdev2adap(dev);
1953 len = sizeof(struct cudbg_hdr) +
1954 sizeof(struct cudbg_entity_hdr) * CUDBG_MAX_ENTITY;
1955 len += cxgb4_get_dump_length(adapter, eth_dump->flag);
1957 adapter->eth_dump.flag = eth_dump->flag;
1958 adapter->eth_dump.len = len;
1962 static int get_dump_flag(struct net_device *dev, struct ethtool_dump *eth_dump)
1964 struct adapter *adapter = netdev2adap(dev);
1966 eth_dump->flag = adapter->eth_dump.flag;
1967 eth_dump->len = adapter->eth_dump.len;
1968 eth_dump->version = adapter->eth_dump.version;
1972 static int get_dump_data(struct net_device *dev, struct ethtool_dump *eth_dump,
1975 struct adapter *adapter = netdev2adap(dev);
1979 if (adapter->eth_dump.flag == CXGB4_ETH_DUMP_NONE)
1982 len = sizeof(struct cudbg_hdr) +
1983 sizeof(struct cudbg_entity_hdr) * CUDBG_MAX_ENTITY;
1984 len += cxgb4_get_dump_length(adapter, adapter->eth_dump.flag);
1985 if (eth_dump->len < len)
1988 ret = cxgb4_cudbg_collect(adapter, buf, &len, adapter->eth_dump.flag);
1992 eth_dump->flag = adapter->eth_dump.flag;
1993 eth_dump->len = len;
1994 eth_dump->version = adapter->eth_dump.version;
1998 static bool cxgb4_fw_mod_type_info_available(unsigned int fw_mod_type)
2000 /* Read port module EEPROM as long as it is plugged-in and
2003 return (fw_mod_type != FW_PORT_MOD_TYPE_NONE &&
2004 fw_mod_type != FW_PORT_MOD_TYPE_ERROR);
2007 static int cxgb4_get_module_info(struct net_device *dev,
2008 struct ethtool_modinfo *modinfo)
2010 struct port_info *pi = netdev_priv(dev);
2011 u8 sff8472_comp, sff_diag_type, sff_rev;
2012 struct adapter *adapter = pi->adapter;
2015 if (!cxgb4_fw_mod_type_info_available(pi->mod_type))
2018 switch (pi->port_type) {
2019 case FW_PORT_TYPE_SFP:
2020 case FW_PORT_TYPE_QSA:
2021 case FW_PORT_TYPE_SFP28:
2022 ret = t4_i2c_rd(adapter, adapter->mbox, pi->tx_chan,
2023 I2C_DEV_ADDR_A0, SFF_8472_COMP_ADDR,
2024 SFF_8472_COMP_LEN, &sff8472_comp);
2027 ret = t4_i2c_rd(adapter, adapter->mbox, pi->tx_chan,
2028 I2C_DEV_ADDR_A0, SFP_DIAG_TYPE_ADDR,
2029 SFP_DIAG_TYPE_LEN, &sff_diag_type);
2033 if (!sff8472_comp || (sff_diag_type & SFP_DIAG_ADDRMODE)) {
2034 modinfo->type = ETH_MODULE_SFF_8079;
2035 modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
2037 modinfo->type = ETH_MODULE_SFF_8472;
2038 if (sff_diag_type & SFP_DIAG_IMPLEMENTED)
2039 modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
2041 modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN / 2;
2045 case FW_PORT_TYPE_QSFP:
2046 case FW_PORT_TYPE_QSFP_10G:
2047 case FW_PORT_TYPE_CR_QSFP:
2048 case FW_PORT_TYPE_CR2_QSFP:
2049 case FW_PORT_TYPE_CR4_QSFP:
2050 ret = t4_i2c_rd(adapter, adapter->mbox, pi->tx_chan,
2051 I2C_DEV_ADDR_A0, SFF_REV_ADDR,
2052 SFF_REV_LEN, &sff_rev);
2053 /* For QSFP type ports, revision value >= 3
2054 * means the SFP is 8636 compliant.
2058 if (sff_rev >= 0x3) {
2059 modinfo->type = ETH_MODULE_SFF_8636;
2060 modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN;
2062 modinfo->type = ETH_MODULE_SFF_8436;
2063 modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
2074 static int cxgb4_get_module_eeprom(struct net_device *dev,
2075 struct ethtool_eeprom *eprom, u8 *data)
2077 int ret = 0, offset = eprom->offset, len = eprom->len;
2078 struct port_info *pi = netdev_priv(dev);
2079 struct adapter *adapter = pi->adapter;
2081 memset(data, 0, eprom->len);
2082 if (offset + len <= I2C_PAGE_SIZE)
2083 return t4_i2c_rd(adapter, adapter->mbox, pi->tx_chan,
2084 I2C_DEV_ADDR_A0, offset, len, data);
2086 /* offset + len spans 0xa0 and 0xa1 pages */
2087 if (offset <= I2C_PAGE_SIZE) {
2088 /* read 0xa0 page */
2089 len = I2C_PAGE_SIZE - offset;
2090 ret = t4_i2c_rd(adapter, adapter->mbox, pi->tx_chan,
2091 I2C_DEV_ADDR_A0, offset, len, data);
2094 offset = I2C_PAGE_SIZE;
2095 /* Remaining bytes to be read from second page =
2096 * Total length - bytes read from first page
2098 len = eprom->len - len;
2100 /* Read additional optical diagnostics from page 0xa2 if supported */
2101 return t4_i2c_rd(adapter, adapter->mbox, pi->tx_chan, I2C_DEV_ADDR_A2,
2102 offset, len, &data[eprom->len - len]);
2105 static u32 cxgb4_get_priv_flags(struct net_device *netdev)
2107 struct port_info *pi = netdev_priv(netdev);
2108 struct adapter *adapter = pi->adapter;
2110 return (adapter->eth_flags | pi->eth_flags);
2114 * set_flags - set/unset specified flags if passed in new_flags
2115 * @cur_flags: pointer to current flags
2116 * @new_flags: new incoming flags
2117 * @flags: set of flags to set/unset
2119 static inline void set_flags(u32 *cur_flags, u32 new_flags, u32 flags)
2121 *cur_flags = (*cur_flags & ~flags) | (new_flags & flags);
2124 static int cxgb4_set_priv_flags(struct net_device *netdev, u32 flags)
2126 struct port_info *pi = netdev_priv(netdev);
2127 struct adapter *adapter = pi->adapter;
2129 set_flags(&adapter->eth_flags, flags, PRIV_FLAGS_ADAP);
2130 set_flags(&pi->eth_flags, flags, PRIV_FLAGS_PORT);
2135 static void cxgb4_lb_test(struct net_device *netdev, u64 *lb_status)
2137 int dev_state = netif_running(netdev);
2140 netif_tx_stop_all_queues(netdev);
2141 netif_carrier_off(netdev);
2144 *lb_status = cxgb4_selftest_lb_pkt(netdev);
2147 netif_tx_start_all_queues(netdev);
2148 netif_carrier_on(netdev);
2152 static void cxgb4_self_test(struct net_device *netdev,
2153 struct ethtool_test *eth_test, u64 *data)
2155 struct port_info *pi = netdev_priv(netdev);
2156 struct adapter *adap = pi->adapter;
2158 memset(data, 0, sizeof(u64) * CXGB4_ETHTOOL_MAX_TEST);
2160 if (!(adap->flags & CXGB4_FULL_INIT_DONE) ||
2161 !(adap->flags & CXGB4_FW_OK)) {
2162 eth_test->flags |= ETH_TEST_FL_FAILED;
2166 if (eth_test->flags & ETH_TEST_FL_OFFLINE)
2167 cxgb4_lb_test(netdev, &data[CXGB4_ETHTOOL_LB_TEST]);
2169 if (data[CXGB4_ETHTOOL_LB_TEST])
2170 eth_test->flags |= ETH_TEST_FL_FAILED;
2173 static const struct ethtool_ops cxgb_ethtool_ops = {
2174 .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
2175 ETHTOOL_COALESCE_RX_MAX_FRAMES |
2176 ETHTOOL_COALESCE_TX_USECS_IRQ |
2177 ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
2178 .get_link_ksettings = get_link_ksettings,
2179 .set_link_ksettings = set_link_ksettings,
2180 .get_fecparam = get_fecparam,
2181 .set_fecparam = set_fecparam,
2182 .get_drvinfo = get_drvinfo,
2183 .get_msglevel = get_msglevel,
2184 .set_msglevel = set_msglevel,
2185 .get_ringparam = get_sge_param,
2186 .set_ringparam = set_sge_param,
2187 .get_coalesce = get_coalesce,
2188 .set_coalesce = set_coalesce,
2189 .get_eeprom_len = get_eeprom_len,
2190 .get_eeprom = get_eeprom,
2191 .set_eeprom = set_eeprom,
2192 .get_pauseparam = get_pauseparam,
2193 .set_pauseparam = set_pauseparam,
2194 .get_link = ethtool_op_get_link,
2195 .get_strings = get_strings,
2196 .set_phys_id = identify_port,
2197 .nway_reset = restart_autoneg,
2198 .get_sset_count = get_sset_count,
2199 .get_ethtool_stats = get_stats,
2200 .get_regs_len = get_regs_len,
2201 .get_regs = get_regs,
2202 .get_rxnfc = get_rxnfc,
2203 .set_rxnfc = set_rxnfc,
2204 .get_rxfh_indir_size = get_rss_table_size,
2205 .get_rxfh = get_rss_table,
2206 .set_rxfh = set_rss_table,
2207 .self_test = cxgb4_self_test,
2208 .flash_device = set_flash,
2209 .get_ts_info = get_ts_info,
2210 .set_dump = set_dump,
2211 .get_dump_flag = get_dump_flag,
2212 .get_dump_data = get_dump_data,
2213 .get_module_info = cxgb4_get_module_info,
2214 .get_module_eeprom = cxgb4_get_module_eeprom,
2215 .get_priv_flags = cxgb4_get_priv_flags,
2216 .set_priv_flags = cxgb4_set_priv_flags,
2219 void cxgb4_cleanup_ethtool_filters(struct adapter *adap)
2221 struct cxgb4_ethtool_filter_info *eth_filter_info;
2224 if (!adap->ethtool_filters)
2227 eth_filter_info = adap->ethtool_filters->port;
2229 if (eth_filter_info) {
2230 for (i = 0; i < adap->params.nports; i++) {
2231 kvfree(eth_filter_info[i].loc_array);
2232 bitmap_free(eth_filter_info[i].bmap);
2234 kfree(eth_filter_info);
2237 kfree(adap->ethtool_filters);
2240 int cxgb4_init_ethtool_filters(struct adapter *adap)
2242 struct cxgb4_ethtool_filter_info *eth_filter_info;
2243 struct cxgb4_ethtool_filter *eth_filter;
2244 struct tid_info *tids = &adap->tids;
2248 eth_filter = kzalloc(sizeof(*eth_filter), GFP_KERNEL);
2252 eth_filter_info = kcalloc(adap->params.nports,
2253 sizeof(*eth_filter_info),
2255 if (!eth_filter_info) {
2257 goto free_eth_filter;
2260 eth_filter->port = eth_filter_info;
2262 nentries = tids->nhpftids + tids->nftids;
2263 if (is_hashfilter(adap))
2264 nentries += tids->nhash +
2265 (adap->tids.stid_base - adap->tids.tid_base);
2266 eth_filter->nentries = nentries;
2268 for (i = 0; i < adap->params.nports; i++) {
2269 eth_filter->port[i].loc_array = kvzalloc(nentries, GFP_KERNEL);
2270 if (!eth_filter->port[i].loc_array) {
2272 goto free_eth_finfo;
2275 eth_filter->port[i].bmap = bitmap_zalloc(nentries, GFP_KERNEL);
2276 if (!eth_filter->port[i].bmap) {
2278 goto free_eth_finfo;
2282 adap->ethtool_filters = eth_filter;
2287 bitmap_free(eth_filter->port[i].bmap);
2288 kvfree(eth_filter->port[i].loc_array);
2290 kfree(eth_filter_info);
2298 void cxgb4_set_ethtool_ops(struct net_device *netdev)
2300 netdev->ethtool_ops = &cxgb_ethtool_ops;