1 // SPDX-License-Identifier: GPL-2.0-only
2 // Copyright 2013 Cisco Systems, Inc. All rights reserved.
4 #include <linux/netdevice.h>
5 #include <linux/ethtool.h>
6 #include <linux/net_tstamp.h>
11 #include "enic_clsf.h"
13 #include "vnic_stats.h"
16 char name[ETH_GSTRING_LEN];
20 #define ENIC_TX_STAT(stat) { \
22 .index = offsetof(struct vnic_tx_stats, stat) / sizeof(u64) \
25 #define ENIC_RX_STAT(stat) { \
27 .index = offsetof(struct vnic_rx_stats, stat) / sizeof(u64) \
30 #define ENIC_GEN_STAT(stat) { \
32 .index = offsetof(struct vnic_gen_stats, stat) / sizeof(u64)\
35 static const struct enic_stat enic_tx_stats[] = {
36 ENIC_TX_STAT(tx_frames_ok),
37 ENIC_TX_STAT(tx_unicast_frames_ok),
38 ENIC_TX_STAT(tx_multicast_frames_ok),
39 ENIC_TX_STAT(tx_broadcast_frames_ok),
40 ENIC_TX_STAT(tx_bytes_ok),
41 ENIC_TX_STAT(tx_unicast_bytes_ok),
42 ENIC_TX_STAT(tx_multicast_bytes_ok),
43 ENIC_TX_STAT(tx_broadcast_bytes_ok),
44 ENIC_TX_STAT(tx_drops),
45 ENIC_TX_STAT(tx_errors),
49 static const struct enic_stat enic_rx_stats[] = {
50 ENIC_RX_STAT(rx_frames_ok),
51 ENIC_RX_STAT(rx_frames_total),
52 ENIC_RX_STAT(rx_unicast_frames_ok),
53 ENIC_RX_STAT(rx_multicast_frames_ok),
54 ENIC_RX_STAT(rx_broadcast_frames_ok),
55 ENIC_RX_STAT(rx_bytes_ok),
56 ENIC_RX_STAT(rx_unicast_bytes_ok),
57 ENIC_RX_STAT(rx_multicast_bytes_ok),
58 ENIC_RX_STAT(rx_broadcast_bytes_ok),
59 ENIC_RX_STAT(rx_drop),
60 ENIC_RX_STAT(rx_no_bufs),
61 ENIC_RX_STAT(rx_errors),
63 ENIC_RX_STAT(rx_crc_errors),
64 ENIC_RX_STAT(rx_frames_64),
65 ENIC_RX_STAT(rx_frames_127),
66 ENIC_RX_STAT(rx_frames_255),
67 ENIC_RX_STAT(rx_frames_511),
68 ENIC_RX_STAT(rx_frames_1023),
69 ENIC_RX_STAT(rx_frames_1518),
70 ENIC_RX_STAT(rx_frames_to_max),
73 static const struct enic_stat enic_gen_stats[] = {
74 ENIC_GEN_STAT(dma_map_error),
77 static const unsigned int enic_n_tx_stats = ARRAY_SIZE(enic_tx_stats);
78 static const unsigned int enic_n_rx_stats = ARRAY_SIZE(enic_rx_stats);
79 static const unsigned int enic_n_gen_stats = ARRAY_SIZE(enic_gen_stats);
81 static void enic_intr_coal_set_rx(struct enic *enic, u32 timer)
86 for (i = 0; i < enic->rq_count; i++) {
87 intr = enic_msix_rq_intr(enic, i);
88 vnic_intr_coalescing_timer_set(&enic->intr[intr], timer);
92 static int enic_get_ksettings(struct net_device *netdev,
93 struct ethtool_link_ksettings *ecmd)
95 struct enic *enic = netdev_priv(netdev);
96 struct ethtool_link_settings *base = &ecmd->base;
98 ethtool_link_ksettings_add_link_mode(ecmd, supported,
100 ethtool_link_ksettings_add_link_mode(ecmd, supported, FIBRE);
101 ethtool_link_ksettings_add_link_mode(ecmd, advertising,
103 ethtool_link_ksettings_add_link_mode(ecmd, advertising, FIBRE);
104 base->port = PORT_FIBRE;
106 if (netif_carrier_ok(netdev)) {
107 base->speed = vnic_dev_port_speed(enic->vdev);
108 base->duplex = DUPLEX_FULL;
110 base->speed = SPEED_UNKNOWN;
111 base->duplex = DUPLEX_UNKNOWN;
114 base->autoneg = AUTONEG_DISABLE;
119 static void enic_get_drvinfo(struct net_device *netdev,
120 struct ethtool_drvinfo *drvinfo)
122 struct enic *enic = netdev_priv(netdev);
123 struct vnic_devcmd_fw_info *fw_info;
126 err = enic_dev_fw_info(enic, &fw_info);
127 /* return only when dma_alloc_coherent fails in vnic_dev_fw_info
128 * For other failures, like devcmd failure, we return previously
134 strscpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
135 strscpy(drvinfo->fw_version, fw_info->fw_version,
136 sizeof(drvinfo->fw_version));
137 strscpy(drvinfo->bus_info, pci_name(enic->pdev),
138 sizeof(drvinfo->bus_info));
141 static void enic_get_strings(struct net_device *netdev, u32 stringset,
148 for (i = 0; i < enic_n_tx_stats; i++) {
149 memcpy(data, enic_tx_stats[i].name, ETH_GSTRING_LEN);
150 data += ETH_GSTRING_LEN;
152 for (i = 0; i < enic_n_rx_stats; i++) {
153 memcpy(data, enic_rx_stats[i].name, ETH_GSTRING_LEN);
154 data += ETH_GSTRING_LEN;
156 for (i = 0; i < enic_n_gen_stats; i++) {
157 memcpy(data, enic_gen_stats[i].name, ETH_GSTRING_LEN);
158 data += ETH_GSTRING_LEN;
164 static void enic_get_ringparam(struct net_device *netdev,
165 struct ethtool_ringparam *ring,
166 struct kernel_ethtool_ringparam *kernel_ring,
167 struct netlink_ext_ack *extack)
169 struct enic *enic = netdev_priv(netdev);
170 struct vnic_enet_config *c = &enic->config;
172 ring->rx_max_pending = ENIC_MAX_RQ_DESCS;
173 ring->rx_pending = c->rq_desc_count;
174 ring->tx_max_pending = ENIC_MAX_WQ_DESCS;
175 ring->tx_pending = c->wq_desc_count;
178 static int enic_set_ringparam(struct net_device *netdev,
179 struct ethtool_ringparam *ring,
180 struct kernel_ethtool_ringparam *kernel_ring,
181 struct netlink_ext_ack *extack)
183 struct enic *enic = netdev_priv(netdev);
184 struct vnic_enet_config *c = &enic->config;
185 int running = netif_running(netdev);
186 unsigned int rx_pending;
187 unsigned int tx_pending;
190 if (ring->rx_mini_max_pending || ring->rx_mini_pending) {
192 "modifying mini ring params is not supported");
195 if (ring->rx_jumbo_max_pending || ring->rx_jumbo_pending) {
197 "modifying jumbo ring params is not supported");
200 rx_pending = c->rq_desc_count;
201 tx_pending = c->wq_desc_count;
202 if (ring->rx_pending > ENIC_MAX_RQ_DESCS ||
203 ring->rx_pending < ENIC_MIN_RQ_DESCS) {
204 netdev_info(netdev, "rx pending (%u) not in range [%u,%u]",
205 ring->rx_pending, ENIC_MIN_RQ_DESCS,
209 if (ring->tx_pending > ENIC_MAX_WQ_DESCS ||
210 ring->tx_pending < ENIC_MIN_WQ_DESCS) {
211 netdev_info(netdev, "tx pending (%u) not in range [%u,%u]",
212 ring->tx_pending, ENIC_MIN_WQ_DESCS,
219 ring->rx_pending & 0xffffffe0; /* must be aligned to groups of 32 */
221 ring->tx_pending & 0xffffffe0; /* must be aligned to groups of 32 */
222 enic_free_vnic_resources(enic);
223 err = enic_alloc_vnic_resources(enic);
226 "Failed to alloc vNIC resources, aborting\n");
227 enic_free_vnic_resources(enic);
230 enic_init_vnic_resources(enic);
232 err = dev_open(netdev, NULL);
238 c->rq_desc_count = rx_pending;
239 c->wq_desc_count = tx_pending;
243 static int enic_get_sset_count(struct net_device *netdev, int sset)
247 return enic_n_tx_stats + enic_n_rx_stats + enic_n_gen_stats;
253 static void enic_get_ethtool_stats(struct net_device *netdev,
254 struct ethtool_stats *stats, u64 *data)
256 struct enic *enic = netdev_priv(netdev);
257 struct vnic_stats *vstats;
261 err = enic_dev_stats_dump(enic, &vstats);
262 /* return only when dma_alloc_coherent fails in vnic_dev_stats_dump
263 * For other failures, like devcmd failure, we return previously
269 for (i = 0; i < enic_n_tx_stats; i++)
270 *(data++) = ((u64 *)&vstats->tx)[enic_tx_stats[i].index];
271 for (i = 0; i < enic_n_rx_stats; i++)
272 *(data++) = ((u64 *)&vstats->rx)[enic_rx_stats[i].index];
273 for (i = 0; i < enic_n_gen_stats; i++)
274 *(data++) = ((u64 *)&enic->gen_stats)[enic_gen_stats[i].index];
277 static u32 enic_get_msglevel(struct net_device *netdev)
279 struct enic *enic = netdev_priv(netdev);
280 return enic->msg_enable;
283 static void enic_set_msglevel(struct net_device *netdev, u32 value)
285 struct enic *enic = netdev_priv(netdev);
286 enic->msg_enable = value;
289 static int enic_get_coalesce(struct net_device *netdev,
290 struct ethtool_coalesce *ecmd,
291 struct kernel_ethtool_coalesce *kernel_coal,
292 struct netlink_ext_ack *extack)
294 struct enic *enic = netdev_priv(netdev);
295 struct enic_rx_coal *rxcoal = &enic->rx_coalesce_setting;
297 if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX)
298 ecmd->tx_coalesce_usecs = enic->tx_coalesce_usecs;
299 ecmd->rx_coalesce_usecs = enic->rx_coalesce_usecs;
300 if (rxcoal->use_adaptive_rx_coalesce)
301 ecmd->use_adaptive_rx_coalesce = 1;
302 ecmd->rx_coalesce_usecs_low = rxcoal->small_pkt_range_start;
303 ecmd->rx_coalesce_usecs_high = rxcoal->range_end;
308 static int enic_coalesce_valid(struct enic *enic,
309 struct ethtool_coalesce *ec)
311 u32 coalesce_usecs_max = vnic_dev_get_intr_coal_timer_max(enic->vdev);
312 u32 rx_coalesce_usecs_high = min_t(u32, coalesce_usecs_max,
313 ec->rx_coalesce_usecs_high);
314 u32 rx_coalesce_usecs_low = min_t(u32, coalesce_usecs_max,
315 ec->rx_coalesce_usecs_low);
317 if ((vnic_dev_get_intr_mode(enic->vdev) != VNIC_DEV_INTR_MODE_MSIX) &&
318 ec->tx_coalesce_usecs)
321 if ((ec->tx_coalesce_usecs > coalesce_usecs_max) ||
322 (ec->rx_coalesce_usecs > coalesce_usecs_max) ||
323 (ec->rx_coalesce_usecs_low > coalesce_usecs_max) ||
324 (ec->rx_coalesce_usecs_high > coalesce_usecs_max))
325 netdev_info(enic->netdev, "ethtool_set_coalesce: adaptor supports max coalesce value of %d. Setting max value.\n",
328 if (ec->rx_coalesce_usecs_high &&
329 (rx_coalesce_usecs_high <
330 rx_coalesce_usecs_low + ENIC_AIC_LARGE_PKT_DIFF))
336 static int enic_set_coalesce(struct net_device *netdev,
337 struct ethtool_coalesce *ecmd,
338 struct kernel_ethtool_coalesce *kernel_coal,
339 struct netlink_ext_ack *extack)
341 struct enic *enic = netdev_priv(netdev);
342 u32 tx_coalesce_usecs;
343 u32 rx_coalesce_usecs;
344 u32 rx_coalesce_usecs_low;
345 u32 rx_coalesce_usecs_high;
346 u32 coalesce_usecs_max;
347 unsigned int i, intr;
349 struct enic_rx_coal *rxcoal = &enic->rx_coalesce_setting;
351 ret = enic_coalesce_valid(enic, ecmd);
354 coalesce_usecs_max = vnic_dev_get_intr_coal_timer_max(enic->vdev);
355 tx_coalesce_usecs = min_t(u32, ecmd->tx_coalesce_usecs,
357 rx_coalesce_usecs = min_t(u32, ecmd->rx_coalesce_usecs,
360 rx_coalesce_usecs_low = min_t(u32, ecmd->rx_coalesce_usecs_low,
362 rx_coalesce_usecs_high = min_t(u32, ecmd->rx_coalesce_usecs_high,
365 if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX) {
366 for (i = 0; i < enic->wq_count; i++) {
367 intr = enic_msix_wq_intr(enic, i);
368 vnic_intr_coalescing_timer_set(&enic->intr[intr],
371 enic->tx_coalesce_usecs = tx_coalesce_usecs;
373 rxcoal->use_adaptive_rx_coalesce = !!ecmd->use_adaptive_rx_coalesce;
374 if (!rxcoal->use_adaptive_rx_coalesce)
375 enic_intr_coal_set_rx(enic, rx_coalesce_usecs);
376 if (ecmd->rx_coalesce_usecs_high) {
377 rxcoal->range_end = rx_coalesce_usecs_high;
378 rxcoal->small_pkt_range_start = rx_coalesce_usecs_low;
379 rxcoal->large_pkt_range_start = rx_coalesce_usecs_low +
380 ENIC_AIC_LARGE_PKT_DIFF;
383 enic->rx_coalesce_usecs = rx_coalesce_usecs;
388 static int enic_grxclsrlall(struct enic *enic, struct ethtool_rxnfc *cmd,
391 int j, ret = 0, cnt = 0;
393 cmd->data = enic->rfs_h.max - enic->rfs_h.free;
394 for (j = 0; j < (1 << ENIC_RFS_FLW_BITSHIFT); j++) {
395 struct hlist_head *hhead;
396 struct hlist_node *tmp;
397 struct enic_rfs_fltr_node *n;
399 hhead = &enic->rfs_h.ht_head[j];
400 hlist_for_each_entry_safe(n, tmp, hhead, node) {
401 if (cnt == cmd->rule_cnt)
403 rule_locs[cnt] = n->fltr_id;
412 static int enic_grxclsrule(struct enic *enic, struct ethtool_rxnfc *cmd)
414 struct ethtool_rx_flow_spec *fsp =
415 (struct ethtool_rx_flow_spec *)&cmd->fs;
416 struct enic_rfs_fltr_node *n;
418 n = htbl_fltr_search(enic, (u16)fsp->location);
421 switch (n->keys.basic.ip_proto) {
423 fsp->flow_type = TCP_V4_FLOW;
426 fsp->flow_type = UDP_V4_FLOW;
432 fsp->h_u.tcp_ip4_spec.ip4src = flow_get_u32_src(&n->keys);
433 fsp->m_u.tcp_ip4_spec.ip4src = (__u32)~0;
435 fsp->h_u.tcp_ip4_spec.ip4dst = flow_get_u32_dst(&n->keys);
436 fsp->m_u.tcp_ip4_spec.ip4dst = (__u32)~0;
438 fsp->h_u.tcp_ip4_spec.psrc = n->keys.ports.src;
439 fsp->m_u.tcp_ip4_spec.psrc = (__u16)~0;
441 fsp->h_u.tcp_ip4_spec.pdst = n->keys.ports.dst;
442 fsp->m_u.tcp_ip4_spec.pdst = (__u16)~0;
444 fsp->ring_cookie = n->rq_id;
449 static int enic_get_rx_flow_hash(struct enic *enic, struct ethtool_rxnfc *cmd)
451 u8 rss_hash_type = 0;
454 spin_lock_bh(&enic->devcmd_lock);
455 (void)vnic_dev_capable_rss_hash_type(enic->vdev, &rss_hash_type);
456 spin_unlock_bh(&enic->devcmd_lock);
457 switch (cmd->flow_type) {
460 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3 |
461 RXH_IP_SRC | RXH_IP_DST;
464 cmd->data |= RXH_IP_SRC | RXH_IP_DST;
465 if (rss_hash_type & NIC_CFG_RSS_HASH_TYPE_UDP_IPV6)
466 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
469 cmd->data |= RXH_IP_SRC | RXH_IP_DST;
470 if (rss_hash_type & NIC_CFG_RSS_HASH_TYPE_UDP_IPV4)
471 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
483 cmd->data |= RXH_IP_SRC | RXH_IP_DST;
492 static int enic_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
495 struct enic *enic = netdev_priv(dev);
499 case ETHTOOL_GRXRINGS:
500 cmd->data = enic->rq_count;
502 case ETHTOOL_GRXCLSRLCNT:
503 spin_lock_bh(&enic->rfs_h.lock);
504 cmd->rule_cnt = enic->rfs_h.max - enic->rfs_h.free;
505 cmd->data = enic->rfs_h.max;
506 spin_unlock_bh(&enic->rfs_h.lock);
508 case ETHTOOL_GRXCLSRLALL:
509 spin_lock_bh(&enic->rfs_h.lock);
510 ret = enic_grxclsrlall(enic, cmd, rule_locs);
511 spin_unlock_bh(&enic->rfs_h.lock);
513 case ETHTOOL_GRXCLSRULE:
514 spin_lock_bh(&enic->rfs_h.lock);
515 ret = enic_grxclsrule(enic, cmd);
516 spin_unlock_bh(&enic->rfs_h.lock);
519 ret = enic_get_rx_flow_hash(enic, cmd);
529 static int enic_get_tunable(struct net_device *dev,
530 const struct ethtool_tunable *tuna, void *data)
532 struct enic *enic = netdev_priv(dev);
536 case ETHTOOL_RX_COPYBREAK:
537 *(u32 *)data = enic->rx_copybreak;
547 static int enic_set_tunable(struct net_device *dev,
548 const struct ethtool_tunable *tuna,
551 struct enic *enic = netdev_priv(dev);
555 case ETHTOOL_RX_COPYBREAK:
556 enic->rx_copybreak = *(u32 *)data;
566 static u32 enic_get_rxfh_key_size(struct net_device *netdev)
571 static int enic_get_rxfh(struct net_device *netdev,
572 struct ethtool_rxfh_param *rxfh)
574 struct enic *enic = netdev_priv(netdev);
577 memcpy(rxfh->key, enic->rss_key, ENIC_RSS_LEN);
579 rxfh->hfunc = ETH_RSS_HASH_TOP;
584 static int enic_set_rxfh(struct net_device *netdev,
585 struct ethtool_rxfh_param *rxfh,
586 struct netlink_ext_ack *extack)
588 struct enic *enic = netdev_priv(netdev);
591 (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
592 rxfh->hfunc != ETH_RSS_HASH_TOP))
596 memcpy(enic->rss_key, rxfh->key, ENIC_RSS_LEN);
598 return __enic_set_rsskey(enic);
601 static int enic_get_ts_info(struct net_device *netdev,
602 struct ethtool_ts_info *info)
604 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
605 SOF_TIMESTAMPING_RX_SOFTWARE |
606 SOF_TIMESTAMPING_SOFTWARE;
611 static const struct ethtool_ops enic_ethtool_ops = {
612 .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
613 ETHTOOL_COALESCE_USE_ADAPTIVE_RX |
614 ETHTOOL_COALESCE_RX_USECS_LOW |
615 ETHTOOL_COALESCE_RX_USECS_HIGH,
616 .get_drvinfo = enic_get_drvinfo,
617 .get_msglevel = enic_get_msglevel,
618 .set_msglevel = enic_set_msglevel,
619 .get_link = ethtool_op_get_link,
620 .get_strings = enic_get_strings,
621 .get_ringparam = enic_get_ringparam,
622 .set_ringparam = enic_set_ringparam,
623 .get_sset_count = enic_get_sset_count,
624 .get_ethtool_stats = enic_get_ethtool_stats,
625 .get_coalesce = enic_get_coalesce,
626 .set_coalesce = enic_set_coalesce,
627 .get_rxnfc = enic_get_rxnfc,
628 .get_tunable = enic_get_tunable,
629 .set_tunable = enic_set_tunable,
630 .get_rxfh_key_size = enic_get_rxfh_key_size,
631 .get_rxfh = enic_get_rxfh,
632 .set_rxfh = enic_set_rxfh,
633 .get_link_ksettings = enic_get_ksettings,
634 .get_ts_info = enic_get_ts_info,
637 void enic_set_ethtool_ops(struct net_device *netdev)
639 netdev->ethtool_ops = &enic_ethtool_ops;