2 * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/bpf.h>
35 #include <linux/etherdevice.h>
36 #include <linux/tcp.h>
37 #include <linux/if_vlan.h>
38 #include <linux/delay.h>
39 #include <linux/slab.h>
40 #include <linux/hash.h>
42 #include <net/busy_poll.h>
43 #include <net/vxlan.h>
44 #include <net/devlink.h>
46 #include <linux/mlx4/driver.h>
47 #include <linux/mlx4/device.h>
48 #include <linux/mlx4/cmd.h>
49 #include <linux/mlx4/cq.h>
54 int mlx4_en_setup_tc(struct net_device *dev, u8 up)
56 struct mlx4_en_priv *priv = netdev_priv(dev);
58 unsigned int offset = 0;
60 if (up && up != MLX4_EN_NUM_UP)
63 netdev_set_num_tc(dev, up);
65 /* Partition Tx queues evenly amongst UP's */
66 for (i = 0; i < up; i++) {
67 netdev_set_tc_queue(dev, i, priv->num_tx_rings_p_up, offset);
68 offset += priv->num_tx_rings_p_up;
71 #ifdef CONFIG_MLX4_EN_DCB
72 if (!mlx4_is_slave(priv->mdev->dev)) {
75 priv->flags |= MLX4_EN_FLAG_DCB_ENABLED;
77 priv->flags &= ~MLX4_EN_FLAG_DCB_ENABLED;
78 priv->cee_config.pfc_state = false;
81 #endif /* CONFIG_MLX4_EN_DCB */
86 static int __mlx4_en_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
87 struct tc_to_netdev *tc)
89 if (tc->type != TC_SETUP_MQPRIO)
92 return mlx4_en_setup_tc(dev, tc->tc);
95 #ifdef CONFIG_RFS_ACCEL
97 struct mlx4_en_filter {
98 struct list_head next;
99 struct work_struct work;
108 struct mlx4_en_priv *priv;
109 u32 flow_id; /* RFS infrastructure id */
110 int id; /* mlx4_en driver id */
111 u64 reg_id; /* Flow steering API id */
112 u8 activated; /* Used to prevent expiry before filter
115 struct hlist_node filter_chain;
118 static void mlx4_en_filter_rfs_expire(struct mlx4_en_priv *priv);
120 static enum mlx4_net_trans_rule_id mlx4_ip_proto_to_trans_rule_id(u8 ip_proto)
124 return MLX4_NET_TRANS_RULE_ID_UDP;
126 return MLX4_NET_TRANS_RULE_ID_TCP;
128 return MLX4_NET_TRANS_RULE_NUM;
132 /* Must not acquire state_lock, as its corresponding work_sync
135 static void mlx4_en_filter_work(struct work_struct *work)
137 struct mlx4_en_filter *filter = container_of(work,
138 struct mlx4_en_filter,
140 struct mlx4_en_priv *priv = filter->priv;
141 struct mlx4_spec_list spec_tcp_udp = {
142 .id = mlx4_ip_proto_to_trans_rule_id(filter->ip_proto),
145 .dst_port = filter->dst_port,
146 .dst_port_msk = (__force __be16)-1,
147 .src_port = filter->src_port,
148 .src_port_msk = (__force __be16)-1,
152 struct mlx4_spec_list spec_ip = {
153 .id = MLX4_NET_TRANS_RULE_ID_IPV4,
156 .dst_ip = filter->dst_ip,
157 .dst_ip_msk = (__force __be32)-1,
158 .src_ip = filter->src_ip,
159 .src_ip_msk = (__force __be32)-1,
163 struct mlx4_spec_list spec_eth = {
164 .id = MLX4_NET_TRANS_RULE_ID_ETH,
166 struct mlx4_net_trans_rule rule = {
167 .list = LIST_HEAD_INIT(rule.list),
168 .queue_mode = MLX4_NET_TRANS_Q_LIFO,
171 .promisc_mode = MLX4_FS_REGULAR,
173 .priority = MLX4_DOMAIN_RFS,
176 __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
178 if (spec_tcp_udp.id >= MLX4_NET_TRANS_RULE_NUM) {
179 en_warn(priv, "RFS: ignoring unsupported ip protocol (%d)\n",
183 list_add_tail(&spec_eth.list, &rule.list);
184 list_add_tail(&spec_ip.list, &rule.list);
185 list_add_tail(&spec_tcp_udp.list, &rule.list);
187 rule.qpn = priv->rss_map.qps[filter->rxq_index].qpn;
188 memcpy(spec_eth.eth.dst_mac, priv->dev->dev_addr, ETH_ALEN);
189 memcpy(spec_eth.eth.dst_mac_msk, &mac_mask, ETH_ALEN);
191 filter->activated = 0;
193 if (filter->reg_id) {
194 rc = mlx4_flow_detach(priv->mdev->dev, filter->reg_id);
195 if (rc && rc != -ENOENT)
196 en_err(priv, "Error detaching flow. rc = %d\n", rc);
199 rc = mlx4_flow_attach(priv->mdev->dev, &rule, &filter->reg_id);
201 en_err(priv, "Error attaching flow. err = %d\n", rc);
204 mlx4_en_filter_rfs_expire(priv);
206 filter->activated = 1;
209 static inline struct hlist_head *
210 filter_hash_bucket(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip,
211 __be16 src_port, __be16 dst_port)
216 l = (__force unsigned long)src_port |
217 ((__force unsigned long)dst_port << 2);
218 l ^= (__force unsigned long)(src_ip ^ dst_ip);
220 bucket_idx = hash_long(l, MLX4_EN_FILTER_HASH_SHIFT);
222 return &priv->filter_hash[bucket_idx];
225 static struct mlx4_en_filter *
226 mlx4_en_filter_alloc(struct mlx4_en_priv *priv, int rxq_index, __be32 src_ip,
227 __be32 dst_ip, u8 ip_proto, __be16 src_port,
228 __be16 dst_port, u32 flow_id)
230 struct mlx4_en_filter *filter = NULL;
232 filter = kzalloc(sizeof(struct mlx4_en_filter), GFP_ATOMIC);
237 filter->rxq_index = rxq_index;
238 INIT_WORK(&filter->work, mlx4_en_filter_work);
240 filter->src_ip = src_ip;
241 filter->dst_ip = dst_ip;
242 filter->ip_proto = ip_proto;
243 filter->src_port = src_port;
244 filter->dst_port = dst_port;
246 filter->flow_id = flow_id;
248 filter->id = priv->last_filter_id++ % RPS_NO_FILTER;
250 list_add_tail(&filter->next, &priv->filters);
251 hlist_add_head(&filter->filter_chain,
252 filter_hash_bucket(priv, src_ip, dst_ip, src_port,
258 static void mlx4_en_filter_free(struct mlx4_en_filter *filter)
260 struct mlx4_en_priv *priv = filter->priv;
263 list_del(&filter->next);
265 rc = mlx4_flow_detach(priv->mdev->dev, filter->reg_id);
266 if (rc && rc != -ENOENT)
267 en_err(priv, "Error detaching flow. rc = %d\n", rc);
272 static inline struct mlx4_en_filter *
273 mlx4_en_filter_find(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip,
274 u8 ip_proto, __be16 src_port, __be16 dst_port)
276 struct mlx4_en_filter *filter;
277 struct mlx4_en_filter *ret = NULL;
279 hlist_for_each_entry(filter,
280 filter_hash_bucket(priv, src_ip, dst_ip,
283 if (filter->src_ip == src_ip &&
284 filter->dst_ip == dst_ip &&
285 filter->ip_proto == ip_proto &&
286 filter->src_port == src_port &&
287 filter->dst_port == dst_port) {
297 mlx4_en_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
298 u16 rxq_index, u32 flow_id)
300 struct mlx4_en_priv *priv = netdev_priv(net_dev);
301 struct mlx4_en_filter *filter;
302 const struct iphdr *ip;
309 int nhoff = skb_network_offset(skb);
312 if (skb->protocol != htons(ETH_P_IP))
313 return -EPROTONOSUPPORT;
315 ip = (const struct iphdr *)(skb->data + nhoff);
316 if (ip_is_fragment(ip))
317 return -EPROTONOSUPPORT;
319 if ((ip->protocol != IPPROTO_TCP) && (ip->protocol != IPPROTO_UDP))
320 return -EPROTONOSUPPORT;
321 ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl);
323 ip_proto = ip->protocol;
329 spin_lock_bh(&priv->filters_lock);
330 filter = mlx4_en_filter_find(priv, src_ip, dst_ip, ip_proto,
333 if (filter->rxq_index == rxq_index)
336 filter->rxq_index = rxq_index;
338 filter = mlx4_en_filter_alloc(priv, rxq_index,
339 src_ip, dst_ip, ip_proto,
340 src_port, dst_port, flow_id);
347 queue_work(priv->mdev->workqueue, &filter->work);
352 spin_unlock_bh(&priv->filters_lock);
357 void mlx4_en_cleanup_filters(struct mlx4_en_priv *priv)
359 struct mlx4_en_filter *filter, *tmp;
362 spin_lock_bh(&priv->filters_lock);
363 list_for_each_entry_safe(filter, tmp, &priv->filters, next) {
364 list_move(&filter->next, &del_list);
365 hlist_del(&filter->filter_chain);
367 spin_unlock_bh(&priv->filters_lock);
369 list_for_each_entry_safe(filter, tmp, &del_list, next) {
370 cancel_work_sync(&filter->work);
371 mlx4_en_filter_free(filter);
375 static void mlx4_en_filter_rfs_expire(struct mlx4_en_priv *priv)
377 struct mlx4_en_filter *filter = NULL, *tmp, *last_filter = NULL;
381 spin_lock_bh(&priv->filters_lock);
382 list_for_each_entry_safe(filter, tmp, &priv->filters, next) {
383 if (i > MLX4_EN_FILTER_EXPIRY_QUOTA)
386 if (filter->activated &&
387 !work_pending(&filter->work) &&
388 rps_may_expire_flow(priv->dev,
389 filter->rxq_index, filter->flow_id,
391 list_move(&filter->next, &del_list);
392 hlist_del(&filter->filter_chain);
394 last_filter = filter;
399 if (last_filter && (&last_filter->next != priv->filters.next))
400 list_move(&priv->filters, &last_filter->next);
402 spin_unlock_bh(&priv->filters_lock);
404 list_for_each_entry_safe(filter, tmp, &del_list, next)
405 mlx4_en_filter_free(filter);
409 static int mlx4_en_vlan_rx_add_vid(struct net_device *dev,
410 __be16 proto, u16 vid)
412 struct mlx4_en_priv *priv = netdev_priv(dev);
413 struct mlx4_en_dev *mdev = priv->mdev;
417 en_dbg(HW, priv, "adding VLAN:%d\n", vid);
419 set_bit(vid, priv->active_vlans);
421 /* Add VID to port VLAN filter */
422 mutex_lock(&mdev->state_lock);
423 if (mdev->device_up && priv->port_up) {
424 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
426 en_err(priv, "Failed configuring VLAN filter\n");
430 err = mlx4_register_vlan(mdev->dev, priv->port, vid, &idx);
432 en_dbg(HW, priv, "Failed adding vlan %d\n", vid);
435 mutex_unlock(&mdev->state_lock);
439 static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev,
440 __be16 proto, u16 vid)
442 struct mlx4_en_priv *priv = netdev_priv(dev);
443 struct mlx4_en_dev *mdev = priv->mdev;
446 en_dbg(HW, priv, "Killing VID:%d\n", vid);
448 clear_bit(vid, priv->active_vlans);
450 /* Remove VID from port VLAN filter */
451 mutex_lock(&mdev->state_lock);
452 mlx4_unregister_vlan(mdev->dev, priv->port, vid);
454 if (mdev->device_up && priv->port_up) {
455 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
457 en_err(priv, "Failed configuring VLAN filter\n");
459 mutex_unlock(&mdev->state_lock);
464 static void mlx4_en_u64_to_mac(unsigned char dst_mac[ETH_ALEN + 2], u64 src_mac)
467 for (i = ETH_ALEN - 1; i >= 0; --i) {
468 dst_mac[i] = src_mac & 0xff;
471 memset(&dst_mac[ETH_ALEN], 0, 2);
475 static int mlx4_en_tunnel_steer_add(struct mlx4_en_priv *priv, unsigned char *addr,
476 int qpn, u64 *reg_id)
480 if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN ||
481 priv->mdev->dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC)
482 return 0; /* do nothing */
484 err = mlx4_tunnel_steer_add(priv->mdev->dev, addr, priv->port, qpn,
485 MLX4_DOMAIN_NIC, reg_id);
487 en_err(priv, "failed to add vxlan steering rule, err %d\n", err);
490 en_dbg(DRV, priv, "added vxlan steering rule, mac %pM reg_id %llx\n", addr, *reg_id);
495 static int mlx4_en_uc_steer_add(struct mlx4_en_priv *priv,
496 unsigned char *mac, int *qpn, u64 *reg_id)
498 struct mlx4_en_dev *mdev = priv->mdev;
499 struct mlx4_dev *dev = mdev->dev;
502 switch (dev->caps.steering_mode) {
503 case MLX4_STEERING_MODE_B0: {
508 memcpy(&gid[10], mac, ETH_ALEN);
511 err = mlx4_unicast_attach(dev, &qp, gid, 0, MLX4_PROT_ETH);
514 case MLX4_STEERING_MODE_DEVICE_MANAGED: {
515 struct mlx4_spec_list spec_eth = { {NULL} };
516 __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
518 struct mlx4_net_trans_rule rule = {
519 .queue_mode = MLX4_NET_TRANS_Q_FIFO,
522 .promisc_mode = MLX4_FS_REGULAR,
523 .priority = MLX4_DOMAIN_NIC,
526 rule.port = priv->port;
528 INIT_LIST_HEAD(&rule.list);
530 spec_eth.id = MLX4_NET_TRANS_RULE_ID_ETH;
531 memcpy(spec_eth.eth.dst_mac, mac, ETH_ALEN);
532 memcpy(spec_eth.eth.dst_mac_msk, &mac_mask, ETH_ALEN);
533 list_add_tail(&spec_eth.list, &rule.list);
535 err = mlx4_flow_attach(dev, &rule, reg_id);
542 en_warn(priv, "Failed Attaching Unicast\n");
547 static void mlx4_en_uc_steer_release(struct mlx4_en_priv *priv,
548 unsigned char *mac, int qpn, u64 reg_id)
550 struct mlx4_en_dev *mdev = priv->mdev;
551 struct mlx4_dev *dev = mdev->dev;
553 switch (dev->caps.steering_mode) {
554 case MLX4_STEERING_MODE_B0: {
559 memcpy(&gid[10], mac, ETH_ALEN);
562 mlx4_unicast_detach(dev, &qp, gid, MLX4_PROT_ETH);
565 case MLX4_STEERING_MODE_DEVICE_MANAGED: {
566 mlx4_flow_detach(dev, reg_id);
570 en_err(priv, "Invalid steering mode.\n");
574 static int mlx4_en_get_qp(struct mlx4_en_priv *priv)
576 struct mlx4_en_dev *mdev = priv->mdev;
577 struct mlx4_dev *dev = mdev->dev;
580 int *qpn = &priv->base_qpn;
581 u64 mac = mlx4_mac_to_u64(priv->dev->dev_addr);
583 en_dbg(DRV, priv, "Registering MAC: %pM for adding\n",
584 priv->dev->dev_addr);
585 index = mlx4_register_mac(dev, priv->port, mac);
588 en_err(priv, "Failed adding MAC: %pM\n",
589 priv->dev->dev_addr);
593 if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) {
594 int base_qpn = mlx4_get_base_qpn(dev, priv->port);
595 *qpn = base_qpn + index;
599 err = mlx4_qp_reserve_range(dev, 1, 1, qpn, MLX4_RESERVE_A0_QP);
600 en_dbg(DRV, priv, "Reserved qp %d\n", *qpn);
602 en_err(priv, "Failed to reserve qp for mac registration\n");
603 mlx4_unregister_mac(dev, priv->port, mac);
610 static void mlx4_en_put_qp(struct mlx4_en_priv *priv)
612 struct mlx4_en_dev *mdev = priv->mdev;
613 struct mlx4_dev *dev = mdev->dev;
614 int qpn = priv->base_qpn;
616 if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) {
617 u64 mac = mlx4_mac_to_u64(priv->dev->dev_addr);
618 en_dbg(DRV, priv, "Registering MAC: %pM for deleting\n",
619 priv->dev->dev_addr);
620 mlx4_unregister_mac(dev, priv->port, mac);
622 en_dbg(DRV, priv, "Releasing qp: port %d, qpn %d\n",
624 mlx4_qp_release_range(dev, qpn, 1);
625 priv->flags &= ~MLX4_EN_FLAG_FORCE_PROMISC;
629 static int mlx4_en_replace_mac(struct mlx4_en_priv *priv, int qpn,
630 unsigned char *new_mac, unsigned char *prev_mac)
632 struct mlx4_en_dev *mdev = priv->mdev;
633 struct mlx4_dev *dev = mdev->dev;
635 u64 new_mac_u64 = mlx4_mac_to_u64(new_mac);
637 if (dev->caps.steering_mode != MLX4_STEERING_MODE_A0) {
638 struct hlist_head *bucket;
639 unsigned int mac_hash;
640 struct mlx4_mac_entry *entry;
641 struct hlist_node *tmp;
642 u64 prev_mac_u64 = mlx4_mac_to_u64(prev_mac);
644 bucket = &priv->mac_hash[prev_mac[MLX4_EN_MAC_HASH_IDX]];
645 hlist_for_each_entry_safe(entry, tmp, bucket, hlist) {
646 if (ether_addr_equal_64bits(entry->mac, prev_mac)) {
647 mlx4_en_uc_steer_release(priv, entry->mac,
649 mlx4_unregister_mac(dev, priv->port,
651 hlist_del_rcu(&entry->hlist);
653 memcpy(entry->mac, new_mac, ETH_ALEN);
655 mac_hash = new_mac[MLX4_EN_MAC_HASH_IDX];
656 hlist_add_head_rcu(&entry->hlist,
657 &priv->mac_hash[mac_hash]);
658 mlx4_register_mac(dev, priv->port, new_mac_u64);
659 err = mlx4_en_uc_steer_add(priv, new_mac,
664 if (priv->tunnel_reg_id) {
665 mlx4_flow_detach(priv->mdev->dev, priv->tunnel_reg_id);
666 priv->tunnel_reg_id = 0;
668 err = mlx4_en_tunnel_steer_add(priv, new_mac, qpn,
669 &priv->tunnel_reg_id);
676 return __mlx4_replace_mac(dev, priv->port, qpn, new_mac_u64);
679 static int mlx4_en_do_set_mac(struct mlx4_en_priv *priv,
680 unsigned char new_mac[ETH_ALEN + 2])
685 /* Remove old MAC and insert the new one */
686 err = mlx4_en_replace_mac(priv, priv->base_qpn,
687 new_mac, priv->current_mac);
689 en_err(priv, "Failed changing HW MAC address\n");
691 en_dbg(HW, priv, "Port is down while registering mac, exiting...\n");
694 memcpy(priv->current_mac, new_mac, sizeof(priv->current_mac));
699 static int mlx4_en_set_mac(struct net_device *dev, void *addr)
701 struct mlx4_en_priv *priv = netdev_priv(dev);
702 struct mlx4_en_dev *mdev = priv->mdev;
703 struct sockaddr *saddr = addr;
704 unsigned char new_mac[ETH_ALEN + 2];
707 if (!is_valid_ether_addr(saddr->sa_data))
708 return -EADDRNOTAVAIL;
710 mutex_lock(&mdev->state_lock);
711 memcpy(new_mac, saddr->sa_data, ETH_ALEN);
712 err = mlx4_en_do_set_mac(priv, new_mac);
714 memcpy(dev->dev_addr, saddr->sa_data, ETH_ALEN);
715 mutex_unlock(&mdev->state_lock);
720 static void mlx4_en_clear_list(struct net_device *dev)
722 struct mlx4_en_priv *priv = netdev_priv(dev);
723 struct mlx4_en_mc_list *tmp, *mc_to_del;
725 list_for_each_entry_safe(mc_to_del, tmp, &priv->mc_list, list) {
726 list_del(&mc_to_del->list);
731 static void mlx4_en_cache_mclist(struct net_device *dev)
733 struct mlx4_en_priv *priv = netdev_priv(dev);
734 struct netdev_hw_addr *ha;
735 struct mlx4_en_mc_list *tmp;
737 mlx4_en_clear_list(dev);
738 netdev_for_each_mc_addr(ha, dev) {
739 tmp = kzalloc(sizeof(struct mlx4_en_mc_list), GFP_ATOMIC);
741 mlx4_en_clear_list(dev);
744 memcpy(tmp->addr, ha->addr, ETH_ALEN);
745 list_add_tail(&tmp->list, &priv->mc_list);
749 static void update_mclist_flags(struct mlx4_en_priv *priv,
750 struct list_head *dst,
751 struct list_head *src)
753 struct mlx4_en_mc_list *dst_tmp, *src_tmp, *new_mc;
756 /* Find all the entries that should be removed from dst,
757 * These are the entries that are not found in src
759 list_for_each_entry(dst_tmp, dst, list) {
761 list_for_each_entry(src_tmp, src, list) {
762 if (ether_addr_equal(dst_tmp->addr, src_tmp->addr)) {
768 dst_tmp->action = MCLIST_REM;
771 /* Add entries that exist in src but not in dst
772 * mark them as need to add
774 list_for_each_entry(src_tmp, src, list) {
776 list_for_each_entry(dst_tmp, dst, list) {
777 if (ether_addr_equal(dst_tmp->addr, src_tmp->addr)) {
778 dst_tmp->action = MCLIST_NONE;
784 new_mc = kmemdup(src_tmp,
785 sizeof(struct mlx4_en_mc_list),
790 new_mc->action = MCLIST_ADD;
791 list_add_tail(&new_mc->list, dst);
796 static void mlx4_en_set_rx_mode(struct net_device *dev)
798 struct mlx4_en_priv *priv = netdev_priv(dev);
803 queue_work(priv->mdev->workqueue, &priv->rx_mode_task);
806 static void mlx4_en_set_promisc_mode(struct mlx4_en_priv *priv,
807 struct mlx4_en_dev *mdev)
811 if (!(priv->flags & MLX4_EN_FLAG_PROMISC)) {
812 if (netif_msg_rx_status(priv))
813 en_warn(priv, "Entering promiscuous mode\n");
814 priv->flags |= MLX4_EN_FLAG_PROMISC;
816 /* Enable promiscouos mode */
817 switch (mdev->dev->caps.steering_mode) {
818 case MLX4_STEERING_MODE_DEVICE_MANAGED:
819 err = mlx4_flow_steer_promisc_add(mdev->dev,
822 MLX4_FS_ALL_DEFAULT);
824 en_err(priv, "Failed enabling promiscuous mode\n");
825 priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
828 case MLX4_STEERING_MODE_B0:
829 err = mlx4_unicast_promisc_add(mdev->dev,
833 en_err(priv, "Failed enabling unicast promiscuous mode\n");
835 /* Add the default qp number as multicast
838 if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) {
839 err = mlx4_multicast_promisc_add(mdev->dev,
843 en_err(priv, "Failed enabling multicast promiscuous mode\n");
844 priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
848 case MLX4_STEERING_MODE_A0:
849 err = mlx4_SET_PORT_qpn_calc(mdev->dev,
854 en_err(priv, "Failed enabling promiscuous mode\n");
858 /* Disable port multicast filter (unconditionally) */
859 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
860 0, MLX4_MCAST_DISABLE);
862 en_err(priv, "Failed disabling multicast filter\n");
866 static void mlx4_en_clear_promisc_mode(struct mlx4_en_priv *priv,
867 struct mlx4_en_dev *mdev)
871 if (netif_msg_rx_status(priv))
872 en_warn(priv, "Leaving promiscuous mode\n");
873 priv->flags &= ~MLX4_EN_FLAG_PROMISC;
875 /* Disable promiscouos mode */
876 switch (mdev->dev->caps.steering_mode) {
877 case MLX4_STEERING_MODE_DEVICE_MANAGED:
878 err = mlx4_flow_steer_promisc_remove(mdev->dev,
880 MLX4_FS_ALL_DEFAULT);
882 en_err(priv, "Failed disabling promiscuous mode\n");
883 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
886 case MLX4_STEERING_MODE_B0:
887 err = mlx4_unicast_promisc_remove(mdev->dev,
891 en_err(priv, "Failed disabling unicast promiscuous mode\n");
892 /* Disable Multicast promisc */
893 if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
894 err = mlx4_multicast_promisc_remove(mdev->dev,
898 en_err(priv, "Failed disabling multicast promiscuous mode\n");
899 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
903 case MLX4_STEERING_MODE_A0:
904 err = mlx4_SET_PORT_qpn_calc(mdev->dev,
908 en_err(priv, "Failed disabling promiscuous mode\n");
913 static void mlx4_en_do_multicast(struct mlx4_en_priv *priv,
914 struct net_device *dev,
915 struct mlx4_en_dev *mdev)
917 struct mlx4_en_mc_list *mclist, *tmp;
919 u8 mc_list[16] = {0};
922 /* Enable/disable the multicast filter according to IFF_ALLMULTI */
923 if (dev->flags & IFF_ALLMULTI) {
924 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
925 0, MLX4_MCAST_DISABLE);
927 en_err(priv, "Failed disabling multicast filter\n");
929 /* Add the default qp number as multicast promisc */
930 if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) {
931 switch (mdev->dev->caps.steering_mode) {
932 case MLX4_STEERING_MODE_DEVICE_MANAGED:
933 err = mlx4_flow_steer_promisc_add(mdev->dev,
939 case MLX4_STEERING_MODE_B0:
940 err = mlx4_multicast_promisc_add(mdev->dev,
945 case MLX4_STEERING_MODE_A0:
949 en_err(priv, "Failed entering multicast promisc mode\n");
950 priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
953 /* Disable Multicast promisc */
954 if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
955 switch (mdev->dev->caps.steering_mode) {
956 case MLX4_STEERING_MODE_DEVICE_MANAGED:
957 err = mlx4_flow_steer_promisc_remove(mdev->dev,
962 case MLX4_STEERING_MODE_B0:
963 err = mlx4_multicast_promisc_remove(mdev->dev,
968 case MLX4_STEERING_MODE_A0:
972 en_err(priv, "Failed disabling multicast promiscuous mode\n");
973 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
976 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
977 0, MLX4_MCAST_DISABLE);
979 en_err(priv, "Failed disabling multicast filter\n");
981 /* Flush mcast filter and init it with broadcast address */
982 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, ETH_BCAST,
983 1, MLX4_MCAST_CONFIG);
985 /* Update multicast list - we cache all addresses so they won't
986 * change while HW is updated holding the command semaphor */
987 netif_addr_lock_bh(dev);
988 mlx4_en_cache_mclist(dev);
989 netif_addr_unlock_bh(dev);
990 list_for_each_entry(mclist, &priv->mc_list, list) {
991 mcast_addr = mlx4_mac_to_u64(mclist->addr);
992 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port,
993 mcast_addr, 0, MLX4_MCAST_CONFIG);
995 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
996 0, MLX4_MCAST_ENABLE);
998 en_err(priv, "Failed enabling multicast filter\n");
1000 update_mclist_flags(priv, &priv->curr_list, &priv->mc_list);
1001 list_for_each_entry_safe(mclist, tmp, &priv->curr_list, list) {
1002 if (mclist->action == MCLIST_REM) {
1003 /* detach this address and delete from list */
1004 memcpy(&mc_list[10], mclist->addr, ETH_ALEN);
1005 mc_list[5] = priv->port;
1006 err = mlx4_multicast_detach(mdev->dev,
1007 &priv->rss_map.indir_qp,
1012 en_err(priv, "Fail to detach multicast address\n");
1014 if (mclist->tunnel_reg_id) {
1015 err = mlx4_flow_detach(priv->mdev->dev, mclist->tunnel_reg_id);
1017 en_err(priv, "Failed to detach multicast address\n");
1020 /* remove from list */
1021 list_del(&mclist->list);
1023 } else if (mclist->action == MCLIST_ADD) {
1024 /* attach the address */
1025 memcpy(&mc_list[10], mclist->addr, ETH_ALEN);
1026 /* needed for B0 steering support */
1027 mc_list[5] = priv->port;
1028 err = mlx4_multicast_attach(mdev->dev,
1029 &priv->rss_map.indir_qp,
1035 en_err(priv, "Fail to attach multicast address\n");
1037 err = mlx4_en_tunnel_steer_add(priv, &mc_list[10], priv->base_qpn,
1038 &mclist->tunnel_reg_id);
1040 en_err(priv, "Failed to attach multicast address\n");
1046 static void mlx4_en_do_uc_filter(struct mlx4_en_priv *priv,
1047 struct net_device *dev,
1048 struct mlx4_en_dev *mdev)
1050 struct netdev_hw_addr *ha;
1051 struct mlx4_mac_entry *entry;
1052 struct hlist_node *tmp;
1056 struct hlist_head *bucket;
1061 /* Note that we do not need to protect our mac_hash traversal with rcu,
1062 * since all modification code is protected by mdev->state_lock
1065 /* find what to remove */
1066 for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i) {
1067 bucket = &priv->mac_hash[i];
1068 hlist_for_each_entry_safe(entry, tmp, bucket, hlist) {
1070 netdev_for_each_uc_addr(ha, dev) {
1071 if (ether_addr_equal_64bits(entry->mac,
1078 /* MAC address of the port is not in uc list */
1079 if (ether_addr_equal_64bits(entry->mac,
1084 mac = mlx4_mac_to_u64(entry->mac);
1085 mlx4_en_uc_steer_release(priv, entry->mac,
1088 mlx4_unregister_mac(mdev->dev, priv->port, mac);
1090 hlist_del_rcu(&entry->hlist);
1091 kfree_rcu(entry, rcu);
1092 en_dbg(DRV, priv, "Removed MAC %pM on port:%d\n",
1093 entry->mac, priv->port);
1099 /* if we didn't remove anything, there is no use in trying to add
1100 * again once we are in a forced promisc mode state
1102 if ((priv->flags & MLX4_EN_FLAG_FORCE_PROMISC) && 0 == removed)
1105 prev_flags = priv->flags;
1106 priv->flags &= ~MLX4_EN_FLAG_FORCE_PROMISC;
1108 /* find what to add */
1109 netdev_for_each_uc_addr(ha, dev) {
1111 bucket = &priv->mac_hash[ha->addr[MLX4_EN_MAC_HASH_IDX]];
1112 hlist_for_each_entry(entry, bucket, hlist) {
1113 if (ether_addr_equal_64bits(entry->mac, ha->addr)) {
1120 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1122 en_err(priv, "Failed adding MAC %pM on port:%d (out of memory)\n",
1123 ha->addr, priv->port);
1124 priv->flags |= MLX4_EN_FLAG_FORCE_PROMISC;
1127 mac = mlx4_mac_to_u64(ha->addr);
1128 memcpy(entry->mac, ha->addr, ETH_ALEN);
1129 err = mlx4_register_mac(mdev->dev, priv->port, mac);
1131 en_err(priv, "Failed registering MAC %pM on port %d: %d\n",
1132 ha->addr, priv->port, err);
1134 priv->flags |= MLX4_EN_FLAG_FORCE_PROMISC;
1137 err = mlx4_en_uc_steer_add(priv, ha->addr,
1141 en_err(priv, "Failed adding MAC %pM on port %d: %d\n",
1142 ha->addr, priv->port, err);
1143 mlx4_unregister_mac(mdev->dev, priv->port, mac);
1145 priv->flags |= MLX4_EN_FLAG_FORCE_PROMISC;
1148 unsigned int mac_hash;
1149 en_dbg(DRV, priv, "Added MAC %pM on port:%d\n",
1150 ha->addr, priv->port);
1151 mac_hash = ha->addr[MLX4_EN_MAC_HASH_IDX];
1152 bucket = &priv->mac_hash[mac_hash];
1153 hlist_add_head_rcu(&entry->hlist, bucket);
1158 if (priv->flags & MLX4_EN_FLAG_FORCE_PROMISC) {
1159 en_warn(priv, "Forcing promiscuous mode on port:%d\n",
1161 } else if (prev_flags & MLX4_EN_FLAG_FORCE_PROMISC) {
1162 en_warn(priv, "Stop forcing promiscuous mode on port:%d\n",
1167 static void mlx4_en_do_set_rx_mode(struct work_struct *work)
1169 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
1171 struct mlx4_en_dev *mdev = priv->mdev;
1172 struct net_device *dev = priv->dev;
1174 mutex_lock(&mdev->state_lock);
1175 if (!mdev->device_up) {
1176 en_dbg(HW, priv, "Card is not up, ignoring rx mode change.\n");
1179 if (!priv->port_up) {
1180 en_dbg(HW, priv, "Port is down, ignoring rx mode change.\n");
1184 if (!netif_carrier_ok(dev)) {
1185 if (!mlx4_en_QUERY_PORT(mdev, priv->port)) {
1186 if (priv->port_state.link_state) {
1187 priv->last_link_state = MLX4_DEV_EVENT_PORT_UP;
1188 netif_carrier_on(dev);
1189 en_dbg(LINK, priv, "Link Up\n");
1194 if (dev->priv_flags & IFF_UNICAST_FLT)
1195 mlx4_en_do_uc_filter(priv, dev, mdev);
1197 /* Promsicuous mode: disable all filters */
1198 if ((dev->flags & IFF_PROMISC) ||
1199 (priv->flags & MLX4_EN_FLAG_FORCE_PROMISC)) {
1200 mlx4_en_set_promisc_mode(priv, mdev);
1204 /* Not in promiscuous mode */
1205 if (priv->flags & MLX4_EN_FLAG_PROMISC)
1206 mlx4_en_clear_promisc_mode(priv, mdev);
1208 mlx4_en_do_multicast(priv, dev, mdev);
1210 mutex_unlock(&mdev->state_lock);
1213 #ifdef CONFIG_NET_POLL_CONTROLLER
1214 static void mlx4_en_netpoll(struct net_device *dev)
1216 struct mlx4_en_priv *priv = netdev_priv(dev);
1217 struct mlx4_en_cq *cq;
1220 for (i = 0; i < priv->tx_ring_num; i++) {
1221 cq = priv->tx_cq[i];
1222 napi_schedule(&cq->napi);
1227 static int mlx4_en_set_rss_steer_rules(struct mlx4_en_priv *priv)
1231 int *qpn = &priv->base_qpn;
1232 struct mlx4_mac_entry *entry;
1234 err = mlx4_en_uc_steer_add(priv, priv->dev->dev_addr, qpn, ®_id);
1238 err = mlx4_en_tunnel_steer_add(priv, priv->dev->dev_addr, *qpn,
1239 &priv->tunnel_reg_id);
1243 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1249 memcpy(entry->mac, priv->dev->dev_addr, sizeof(entry->mac));
1250 memcpy(priv->current_mac, entry->mac, sizeof(priv->current_mac));
1251 entry->reg_id = reg_id;
1252 hlist_add_head_rcu(&entry->hlist,
1253 &priv->mac_hash[entry->mac[MLX4_EN_MAC_HASH_IDX]]);
1258 if (priv->tunnel_reg_id)
1259 mlx4_flow_detach(priv->mdev->dev, priv->tunnel_reg_id);
1262 mlx4_en_uc_steer_release(priv, priv->dev->dev_addr, *qpn, reg_id);
1266 static void mlx4_en_delete_rss_steer_rules(struct mlx4_en_priv *priv)
1270 int qpn = priv->base_qpn;
1271 struct hlist_head *bucket;
1272 struct hlist_node *tmp;
1273 struct mlx4_mac_entry *entry;
1275 for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i) {
1276 bucket = &priv->mac_hash[i];
1277 hlist_for_each_entry_safe(entry, tmp, bucket, hlist) {
1278 mac = mlx4_mac_to_u64(entry->mac);
1279 en_dbg(DRV, priv, "Registering MAC:%pM for deleting\n",
1281 mlx4_en_uc_steer_release(priv, entry->mac,
1282 qpn, entry->reg_id);
1284 mlx4_unregister_mac(priv->mdev->dev, priv->port, mac);
1285 hlist_del_rcu(&entry->hlist);
1286 kfree_rcu(entry, rcu);
1290 if (priv->tunnel_reg_id) {
1291 mlx4_flow_detach(priv->mdev->dev, priv->tunnel_reg_id);
1292 priv->tunnel_reg_id = 0;
1296 static void mlx4_en_tx_timeout(struct net_device *dev)
1298 struct mlx4_en_priv *priv = netdev_priv(dev);
1299 struct mlx4_en_dev *mdev = priv->mdev;
1302 if (netif_msg_timer(priv))
1303 en_warn(priv, "Tx timeout called on port:%d\n", priv->port);
1305 for (i = 0; i < priv->tx_ring_num; i++) {
1306 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, i)))
1308 en_warn(priv, "TX timeout on queue: %d, QP: 0x%x, CQ: 0x%x, Cons: 0x%x, Prod: 0x%x\n",
1309 i, priv->tx_ring[i]->qpn, priv->tx_ring[i]->cqn,
1310 priv->tx_ring[i]->cons, priv->tx_ring[i]->prod);
1313 priv->port_stats.tx_timeout++;
1314 if (!test_and_set_bit(MLX4_EN_STATE_FLAG_RESTARTING, &priv->state)) {
1315 en_dbg(DRV, priv, "Scheduling port restart\n");
1316 queue_work(mdev->workqueue, &priv->restart_task);
1321 static struct rtnl_link_stats64 *
1322 mlx4_en_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
1324 struct mlx4_en_priv *priv = netdev_priv(dev);
1326 spin_lock_bh(&priv->stats_lock);
1327 netdev_stats_to_stats64(stats, &dev->stats);
1328 spin_unlock_bh(&priv->stats_lock);
1333 static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv)
1335 struct mlx4_en_cq *cq;
1338 /* If we haven't received a specific coalescing setting
1339 * (module param), we set the moderation parameters as follows:
1340 * - moder_cnt is set to the number of mtu sized packets to
1341 * satisfy our coalescing target.
1342 * - moder_time is set to a fixed value.
1344 priv->rx_frames = MLX4_EN_RX_COAL_TARGET;
1345 priv->rx_usecs = MLX4_EN_RX_COAL_TIME;
1346 priv->tx_frames = MLX4_EN_TX_COAL_PKTS;
1347 priv->tx_usecs = MLX4_EN_TX_COAL_TIME;
1348 en_dbg(INTR, priv, "Default coalesing params for mtu:%d - rx_frames:%d rx_usecs:%d\n",
1349 priv->dev->mtu, priv->rx_frames, priv->rx_usecs);
1351 /* Setup cq moderation params */
1352 for (i = 0; i < priv->rx_ring_num; i++) {
1353 cq = priv->rx_cq[i];
1354 cq->moder_cnt = priv->rx_frames;
1355 cq->moder_time = priv->rx_usecs;
1356 priv->last_moder_time[i] = MLX4_EN_AUTO_CONF;
1357 priv->last_moder_packets[i] = 0;
1358 priv->last_moder_bytes[i] = 0;
1361 for (i = 0; i < priv->tx_ring_num; i++) {
1362 cq = priv->tx_cq[i];
1363 cq->moder_cnt = priv->tx_frames;
1364 cq->moder_time = priv->tx_usecs;
1367 /* Reset auto-moderation params */
1368 priv->pkt_rate_low = MLX4_EN_RX_RATE_LOW;
1369 priv->rx_usecs_low = MLX4_EN_RX_COAL_TIME_LOW;
1370 priv->pkt_rate_high = MLX4_EN_RX_RATE_HIGH;
1371 priv->rx_usecs_high = MLX4_EN_RX_COAL_TIME_HIGH;
1372 priv->sample_interval = MLX4_EN_SAMPLE_INTERVAL;
1373 priv->adaptive_rx_coal = 1;
1374 priv->last_moder_jiffies = 0;
1375 priv->last_moder_tx_packets = 0;
1378 static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv)
1380 unsigned long period = (unsigned long) (jiffies - priv->last_moder_jiffies);
1381 struct mlx4_en_cq *cq;
1382 unsigned long packets;
1384 unsigned long avg_pkt_size;
1385 unsigned long rx_packets;
1386 unsigned long rx_bytes;
1387 unsigned long rx_pkt_diff;
1391 if (!priv->adaptive_rx_coal || period < priv->sample_interval * HZ)
1394 for (ring = 0; ring < priv->rx_ring_num; ring++) {
1395 spin_lock_bh(&priv->stats_lock);
1396 rx_packets = priv->rx_ring[ring]->packets;
1397 rx_bytes = priv->rx_ring[ring]->bytes;
1398 spin_unlock_bh(&priv->stats_lock);
1400 rx_pkt_diff = ((unsigned long) (rx_packets -
1401 priv->last_moder_packets[ring]));
1402 packets = rx_pkt_diff;
1403 rate = packets * HZ / period;
1404 avg_pkt_size = packets ? ((unsigned long) (rx_bytes -
1405 priv->last_moder_bytes[ring])) / packets : 0;
1407 /* Apply auto-moderation only when packet rate
1408 * exceeds a rate that it matters */
1409 if (rate > (MLX4_EN_RX_RATE_THRESH / priv->rx_ring_num) &&
1410 avg_pkt_size > MLX4_EN_AVG_PKT_SMALL) {
1411 if (rate < priv->pkt_rate_low)
1412 moder_time = priv->rx_usecs_low;
1413 else if (rate > priv->pkt_rate_high)
1414 moder_time = priv->rx_usecs_high;
1416 moder_time = (rate - priv->pkt_rate_low) *
1417 (priv->rx_usecs_high - priv->rx_usecs_low) /
1418 (priv->pkt_rate_high - priv->pkt_rate_low) +
1421 moder_time = priv->rx_usecs_low;
1424 if (moder_time != priv->last_moder_time[ring]) {
1425 priv->last_moder_time[ring] = moder_time;
1426 cq = priv->rx_cq[ring];
1427 cq->moder_time = moder_time;
1428 cq->moder_cnt = priv->rx_frames;
1429 err = mlx4_en_set_cq_moder(priv, cq);
1431 en_err(priv, "Failed modifying moderation for cq:%d\n",
1434 priv->last_moder_packets[ring] = rx_packets;
1435 priv->last_moder_bytes[ring] = rx_bytes;
1438 priv->last_moder_jiffies = jiffies;
1441 static void mlx4_en_do_get_stats(struct work_struct *work)
1443 struct delayed_work *delay = to_delayed_work(work);
1444 struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv,
1446 struct mlx4_en_dev *mdev = priv->mdev;
1449 mutex_lock(&mdev->state_lock);
1450 if (mdev->device_up) {
1451 if (priv->port_up) {
1452 err = mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 0);
1454 en_dbg(HW, priv, "Could not update stats\n");
1456 mlx4_en_auto_moderation(priv);
1459 queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY);
1461 if (mdev->mac_removed[MLX4_MAX_PORTS + 1 - priv->port]) {
1462 mlx4_en_do_set_mac(priv, priv->current_mac);
1463 mdev->mac_removed[MLX4_MAX_PORTS + 1 - priv->port] = 0;
1465 mutex_unlock(&mdev->state_lock);
1468 /* mlx4_en_service_task - Run service task for tasks that needed to be done
1471 static void mlx4_en_service_task(struct work_struct *work)
1473 struct delayed_work *delay = to_delayed_work(work);
1474 struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv,
1476 struct mlx4_en_dev *mdev = priv->mdev;
1478 mutex_lock(&mdev->state_lock);
1479 if (mdev->device_up) {
1480 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
1481 mlx4_en_ptp_overflow_check(mdev);
1483 mlx4_en_recover_from_oom(priv);
1484 queue_delayed_work(mdev->workqueue, &priv->service_task,
1485 SERVICE_TASK_DELAY);
1487 mutex_unlock(&mdev->state_lock);
1490 static void mlx4_en_linkstate(struct work_struct *work)
1492 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
1494 struct mlx4_en_dev *mdev = priv->mdev;
1495 int linkstate = priv->link_state;
1497 mutex_lock(&mdev->state_lock);
1498 /* If observable port state changed set carrier state and
1499 * report to system log */
1500 if (priv->last_link_state != linkstate) {
1501 if (linkstate == MLX4_DEV_EVENT_PORT_DOWN) {
1502 en_info(priv, "Link Down\n");
1503 netif_carrier_off(priv->dev);
1505 en_info(priv, "Link Up\n");
1506 netif_carrier_on(priv->dev);
1509 priv->last_link_state = linkstate;
1510 mutex_unlock(&mdev->state_lock);
1513 static int mlx4_en_init_affinity_hint(struct mlx4_en_priv *priv, int ring_idx)
1515 struct mlx4_en_rx_ring *ring = priv->rx_ring[ring_idx];
1516 int numa_node = priv->mdev->dev->numa_node;
1518 if (!zalloc_cpumask_var(&ring->affinity_mask, GFP_KERNEL))
1521 cpumask_set_cpu(cpumask_local_spread(ring_idx, numa_node),
1522 ring->affinity_mask);
1526 static void mlx4_en_free_affinity_hint(struct mlx4_en_priv *priv, int ring_idx)
1528 free_cpumask_var(priv->rx_ring[ring_idx]->affinity_mask);
1531 static void mlx4_en_init_recycle_ring(struct mlx4_en_priv *priv,
1534 struct mlx4_en_tx_ring *tx_ring = priv->tx_ring[tx_ring_idx];
1537 rr_index = (priv->xdp_ring_num - priv->tx_ring_num) + tx_ring_idx;
1538 if (rr_index >= 0) {
1539 tx_ring->free_tx_desc = mlx4_en_recycle_tx_desc;
1540 tx_ring->recycle_ring = priv->rx_ring[rr_index];
1542 "Set tx_ring[%d]->recycle_ring = rx_ring[%d]\n",
1543 tx_ring_idx, rr_index);
1545 tx_ring->recycle_ring = NULL;
1549 int mlx4_en_start_port(struct net_device *dev)
1551 struct mlx4_en_priv *priv = netdev_priv(dev);
1552 struct mlx4_en_dev *mdev = priv->mdev;
1553 struct mlx4_en_cq *cq;
1554 struct mlx4_en_tx_ring *tx_ring;
1560 u8 mc_list[16] = {0};
1562 if (priv->port_up) {
1563 en_dbg(DRV, priv, "start port called while port already up\n");
1567 INIT_LIST_HEAD(&priv->mc_list);
1568 INIT_LIST_HEAD(&priv->curr_list);
1569 INIT_LIST_HEAD(&priv->ethtool_list);
1570 memset(&priv->ethtool_rules[0], 0,
1571 sizeof(struct ethtool_flow_id) * MAX_NUM_OF_FS_RULES);
1573 /* Calculate Rx buf size */
1574 dev->mtu = min(dev->mtu, priv->max_mtu);
1575 mlx4_en_calc_rx_buf(dev);
1576 en_dbg(DRV, priv, "Rx buf size:%d\n", priv->rx_skb_size);
1578 /* Configure rx cq's and rings */
1579 err = mlx4_en_activate_rx_rings(priv);
1581 en_err(priv, "Failed to activate RX rings\n");
1584 for (i = 0; i < priv->rx_ring_num; i++) {
1585 cq = priv->rx_cq[i];
1587 err = mlx4_en_init_affinity_hint(priv, i);
1589 en_err(priv, "Failed preparing IRQ affinity hint\n");
1593 err = mlx4_en_activate_cq(priv, cq, i);
1595 en_err(priv, "Failed activating Rx CQ\n");
1596 mlx4_en_free_affinity_hint(priv, i);
1600 for (j = 0; j < cq->size; j++) {
1601 struct mlx4_cqe *cqe = NULL;
1603 cqe = mlx4_en_get_cqe(cq->buf, j, priv->cqe_size) +
1605 cqe->owner_sr_opcode = MLX4_CQE_OWNER_MASK;
1608 err = mlx4_en_set_cq_moder(priv, cq);
1610 en_err(priv, "Failed setting cq moderation parameters\n");
1611 mlx4_en_deactivate_cq(priv, cq);
1612 mlx4_en_free_affinity_hint(priv, i);
1615 mlx4_en_arm_cq(priv, cq);
1616 priv->rx_ring[i]->cqn = cq->mcq.cqn;
1621 en_dbg(DRV, priv, "Getting qp number for port %d\n", priv->port);
1622 err = mlx4_en_get_qp(priv);
1624 en_err(priv, "Failed getting eth qp\n");
1627 mdev->mac_removed[priv->port] = 0;
1629 priv->counter_index =
1630 mlx4_get_default_counter_index(mdev->dev, priv->port);
1632 err = mlx4_en_config_rss_steer(priv);
1634 en_err(priv, "Failed configuring rss steering\n");
1638 err = mlx4_en_create_drop_qp(priv);
1642 /* Configure tx cq's and rings */
1643 for (i = 0; i < priv->tx_ring_num; i++) {
1645 cq = priv->tx_cq[i];
1646 err = mlx4_en_activate_cq(priv, cq, i);
1648 en_err(priv, "Failed allocating Tx CQ\n");
1651 err = mlx4_en_set_cq_moder(priv, cq);
1653 en_err(priv, "Failed setting cq moderation parameters\n");
1654 mlx4_en_deactivate_cq(priv, cq);
1657 en_dbg(DRV, priv, "Resetting index of collapsed CQ:%d to -1\n", i);
1658 cq->buf->wqe_index = cpu_to_be16(0xffff);
1660 /* Configure ring */
1661 tx_ring = priv->tx_ring[i];
1662 err = mlx4_en_activate_tx_ring(priv, tx_ring, cq->mcq.cqn,
1663 i / priv->num_tx_rings_p_up);
1665 en_err(priv, "Failed allocating Tx ring\n");
1666 mlx4_en_deactivate_cq(priv, cq);
1669 tx_ring->tx_queue = netdev_get_tx_queue(dev, i);
1671 mlx4_en_init_recycle_ring(priv, i);
1673 /* Arm CQ for TX completions */
1674 mlx4_en_arm_cq(priv, cq);
1676 /* Set initial ownership of all Tx TXBBs to SW (1) */
1677 for (j = 0; j < tx_ring->buf_size; j += STAMP_STRIDE)
1678 *((u32 *) (tx_ring->buf + j)) = 0xffffffff;
1682 /* Configure port */
1683 err = mlx4_SET_PORT_general(mdev->dev, priv->port,
1684 priv->rx_skb_size + ETH_FCS_LEN,
1685 priv->prof->tx_pause,
1687 priv->prof->rx_pause,
1688 priv->prof->rx_ppp);
1690 en_err(priv, "Failed setting port general configurations for port %d, with error %d\n",
1694 /* Set default qp number */
1695 err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, priv->base_qpn, 0);
1697 en_err(priv, "Failed setting default qp numbers\n");
1701 if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
1702 err = mlx4_SET_PORT_VXLAN(mdev->dev, priv->port, VXLAN_STEER_BY_OUTER_MAC, 1);
1704 en_err(priv, "Failed setting port L2 tunnel configuration, err %d\n",
1711 en_dbg(HW, priv, "Initializing port\n");
1712 err = mlx4_INIT_PORT(mdev->dev, priv->port);
1714 en_err(priv, "Failed Initializing port\n");
1718 /* Set Unicast and VXLAN steering rules */
1719 if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_A0 &&
1720 mlx4_en_set_rss_steer_rules(priv))
1721 mlx4_warn(mdev, "Failed setting steering rules\n");
1723 /* Attach rx QP to bradcast address */
1724 eth_broadcast_addr(&mc_list[10]);
1725 mc_list[5] = priv->port; /* needed for B0 steering support */
1726 if (mlx4_multicast_attach(mdev->dev, &priv->rss_map.indir_qp, mc_list,
1727 priv->port, 0, MLX4_PROT_ETH,
1728 &priv->broadcast_id))
1729 mlx4_warn(mdev, "Failed Attaching Broadcast\n");
1731 /* Must redo promiscuous mode setup. */
1732 priv->flags &= ~(MLX4_EN_FLAG_PROMISC | MLX4_EN_FLAG_MC_PROMISC);
1734 /* Schedule multicast task to populate multicast list */
1735 queue_work(mdev->workqueue, &priv->rx_mode_task);
1737 if (priv->mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
1738 udp_tunnel_get_rx_info(dev);
1740 priv->port_up = true;
1742 /* Process all completions if exist to prevent
1743 * the queues freezing if they are full
1745 for (i = 0; i < priv->rx_ring_num; i++) {
1747 napi_schedule(&priv->rx_cq[i]->napi);
1751 clear_bit(MLX4_EN_STATE_FLAG_RESTARTING, &priv->state);
1752 netif_tx_start_all_queues(dev);
1753 netif_device_attach(dev);
1758 while (tx_index--) {
1759 mlx4_en_deactivate_tx_ring(priv, priv->tx_ring[tx_index]);
1760 mlx4_en_deactivate_cq(priv, priv->tx_cq[tx_index]);
1762 mlx4_en_destroy_drop_qp(priv);
1764 mlx4_en_release_rss_steer(priv);
1766 mlx4_en_put_qp(priv);
1768 while (rx_index--) {
1769 mlx4_en_deactivate_cq(priv, priv->rx_cq[rx_index]);
1770 mlx4_en_free_affinity_hint(priv, rx_index);
1772 for (i = 0; i < priv->rx_ring_num; i++)
1773 mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]);
1775 return err; /* need to close devices */
1779 void mlx4_en_stop_port(struct net_device *dev, int detach)
1781 struct mlx4_en_priv *priv = netdev_priv(dev);
1782 struct mlx4_en_dev *mdev = priv->mdev;
1783 struct mlx4_en_mc_list *mclist, *tmp;
1784 struct ethtool_flow_id *flow, *tmp_flow;
1786 u8 mc_list[16] = {0};
1788 if (!priv->port_up) {
1789 en_dbg(DRV, priv, "stop port called while port already down\n");
1794 mlx4_CLOSE_PORT(mdev->dev, priv->port);
1796 /* Synchronize with tx routine */
1797 netif_tx_lock_bh(dev);
1799 netif_device_detach(dev);
1800 netif_tx_stop_all_queues(dev);
1801 netif_tx_unlock_bh(dev);
1803 netif_tx_disable(dev);
1805 /* Set port as not active */
1806 priv->port_up = false;
1807 priv->counter_index = MLX4_SINK_COUNTER_INDEX(mdev->dev);
1809 /* Promsicuous mode */
1810 if (mdev->dev->caps.steering_mode ==
1811 MLX4_STEERING_MODE_DEVICE_MANAGED) {
1812 priv->flags &= ~(MLX4_EN_FLAG_PROMISC |
1813 MLX4_EN_FLAG_MC_PROMISC);
1814 mlx4_flow_steer_promisc_remove(mdev->dev,
1816 MLX4_FS_ALL_DEFAULT);
1817 mlx4_flow_steer_promisc_remove(mdev->dev,
1819 MLX4_FS_MC_DEFAULT);
1820 } else if (priv->flags & MLX4_EN_FLAG_PROMISC) {
1821 priv->flags &= ~MLX4_EN_FLAG_PROMISC;
1823 /* Disable promiscouos mode */
1824 mlx4_unicast_promisc_remove(mdev->dev, priv->base_qpn,
1827 /* Disable Multicast promisc */
1828 if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
1829 mlx4_multicast_promisc_remove(mdev->dev, priv->base_qpn,
1831 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
1835 /* Detach All multicasts */
1836 eth_broadcast_addr(&mc_list[10]);
1837 mc_list[5] = priv->port; /* needed for B0 steering support */
1838 mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp, mc_list,
1839 MLX4_PROT_ETH, priv->broadcast_id);
1840 list_for_each_entry(mclist, &priv->curr_list, list) {
1841 memcpy(&mc_list[10], mclist->addr, ETH_ALEN);
1842 mc_list[5] = priv->port;
1843 mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp,
1844 mc_list, MLX4_PROT_ETH, mclist->reg_id);
1845 if (mclist->tunnel_reg_id)
1846 mlx4_flow_detach(mdev->dev, mclist->tunnel_reg_id);
1848 mlx4_en_clear_list(dev);
1849 list_for_each_entry_safe(mclist, tmp, &priv->curr_list, list) {
1850 list_del(&mclist->list);
1854 /* Flush multicast filter */
1855 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 1, MLX4_MCAST_CONFIG);
1857 /* Remove flow steering rules for the port*/
1858 if (mdev->dev->caps.steering_mode ==
1859 MLX4_STEERING_MODE_DEVICE_MANAGED) {
1861 list_for_each_entry_safe(flow, tmp_flow,
1862 &priv->ethtool_list, list) {
1863 mlx4_flow_detach(mdev->dev, flow->id);
1864 list_del(&flow->list);
1868 mlx4_en_destroy_drop_qp(priv);
1871 for (i = 0; i < priv->tx_ring_num; i++) {
1872 mlx4_en_deactivate_tx_ring(priv, priv->tx_ring[i]);
1873 mlx4_en_deactivate_cq(priv, priv->tx_cq[i]);
1877 for (i = 0; i < priv->tx_ring_num; i++)
1878 mlx4_en_free_tx_buf(dev, priv->tx_ring[i]);
1880 if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_A0)
1881 mlx4_en_delete_rss_steer_rules(priv);
1884 mlx4_en_release_rss_steer(priv);
1886 /* Unregister Mac address for the port */
1887 mlx4_en_put_qp(priv);
1888 if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_REASSIGN_MAC_EN))
1889 mdev->mac_removed[priv->port] = 1;
1892 for (i = 0; i < priv->rx_ring_num; i++) {
1893 struct mlx4_en_cq *cq = priv->rx_cq[i];
1895 napi_synchronize(&cq->napi);
1896 mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]);
1897 mlx4_en_deactivate_cq(priv, cq);
1899 mlx4_en_free_affinity_hint(priv, i);
1903 static void mlx4_en_restart(struct work_struct *work)
1905 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
1907 struct mlx4_en_dev *mdev = priv->mdev;
1908 struct net_device *dev = priv->dev;
1910 en_dbg(DRV, priv, "Watchdog task called for port %d\n", priv->port);
1913 mutex_lock(&mdev->state_lock);
1914 if (priv->port_up) {
1915 mlx4_en_stop_port(dev, 1);
1916 if (mlx4_en_start_port(dev))
1917 en_err(priv, "Failed restarting port %d\n", priv->port);
1919 mutex_unlock(&mdev->state_lock);
1923 static void mlx4_en_clear_stats(struct net_device *dev)
1925 struct mlx4_en_priv *priv = netdev_priv(dev);
1926 struct mlx4_en_dev *mdev = priv->mdev;
1929 if (!mlx4_is_slave(mdev->dev))
1930 if (mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 1))
1931 en_dbg(HW, priv, "Failed dumping statistics\n");
1933 memset(&priv->pstats, 0, sizeof(priv->pstats));
1934 memset(&priv->pkstats, 0, sizeof(priv->pkstats));
1935 memset(&priv->port_stats, 0, sizeof(priv->port_stats));
1936 memset(&priv->rx_flowstats, 0, sizeof(priv->rx_flowstats));
1937 memset(&priv->tx_flowstats, 0, sizeof(priv->tx_flowstats));
1938 memset(&priv->rx_priority_flowstats, 0,
1939 sizeof(priv->rx_priority_flowstats));
1940 memset(&priv->tx_priority_flowstats, 0,
1941 sizeof(priv->tx_priority_flowstats));
1942 memset(&priv->pf_stats, 0, sizeof(priv->pf_stats));
1944 for (i = 0; i < priv->tx_ring_num; i++) {
1945 priv->tx_ring[i]->bytes = 0;
1946 priv->tx_ring[i]->packets = 0;
1947 priv->tx_ring[i]->tx_csum = 0;
1948 priv->tx_ring[i]->tx_dropped = 0;
1949 priv->tx_ring[i]->queue_stopped = 0;
1950 priv->tx_ring[i]->wake_queue = 0;
1951 priv->tx_ring[i]->tso_packets = 0;
1952 priv->tx_ring[i]->xmit_more = 0;
1954 for (i = 0; i < priv->rx_ring_num; i++) {
1955 priv->rx_ring[i]->bytes = 0;
1956 priv->rx_ring[i]->packets = 0;
1957 priv->rx_ring[i]->csum_ok = 0;
1958 priv->rx_ring[i]->csum_none = 0;
1959 priv->rx_ring[i]->csum_complete = 0;
1963 static int mlx4_en_open(struct net_device *dev)
1965 struct mlx4_en_priv *priv = netdev_priv(dev);
1966 struct mlx4_en_dev *mdev = priv->mdev;
1969 mutex_lock(&mdev->state_lock);
1971 if (!mdev->device_up) {
1972 en_err(priv, "Cannot open - device down/disabled\n");
1977 /* Reset HW statistics and SW counters */
1978 mlx4_en_clear_stats(dev);
1980 err = mlx4_en_start_port(dev);
1982 en_err(priv, "Failed starting port:%d\n", priv->port);
1985 mutex_unlock(&mdev->state_lock);
1990 static int mlx4_en_close(struct net_device *dev)
1992 struct mlx4_en_priv *priv = netdev_priv(dev);
1993 struct mlx4_en_dev *mdev = priv->mdev;
1995 en_dbg(IFDOWN, priv, "Close port called\n");
1997 mutex_lock(&mdev->state_lock);
1999 mlx4_en_stop_port(dev, 0);
2000 netif_carrier_off(dev);
2002 mutex_unlock(&mdev->state_lock);
2006 static void mlx4_en_free_resources(struct mlx4_en_priv *priv)
2010 #ifdef CONFIG_RFS_ACCEL
2011 priv->dev->rx_cpu_rmap = NULL;
2014 for (i = 0; i < priv->tx_ring_num; i++) {
2015 if (priv->tx_ring && priv->tx_ring[i])
2016 mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]);
2017 if (priv->tx_cq && priv->tx_cq[i])
2018 mlx4_en_destroy_cq(priv, &priv->tx_cq[i]);
2021 for (i = 0; i < priv->rx_ring_num; i++) {
2022 if (priv->rx_ring[i])
2023 mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i],
2024 priv->prof->rx_ring_size, priv->stride);
2026 mlx4_en_destroy_cq(priv, &priv->rx_cq[i]);
2031 static int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
2033 struct mlx4_en_port_profile *prof = priv->prof;
2037 /* Create tx Rings */
2038 for (i = 0; i < priv->tx_ring_num; i++) {
2039 node = cpu_to_node(i % num_online_cpus());
2040 if (mlx4_en_create_cq(priv, &priv->tx_cq[i],
2041 prof->tx_ring_size, i, TX, node))
2044 if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i],
2045 prof->tx_ring_size, TXBB_SIZE,
2050 /* Create rx Rings */
2051 for (i = 0; i < priv->rx_ring_num; i++) {
2052 node = cpu_to_node(i % num_online_cpus());
2053 if (mlx4_en_create_cq(priv, &priv->rx_cq[i],
2054 prof->rx_ring_size, i, RX, node))
2057 if (mlx4_en_create_rx_ring(priv, &priv->rx_ring[i],
2058 prof->rx_ring_size, priv->stride,
2063 #ifdef CONFIG_RFS_ACCEL
2064 priv->dev->rx_cpu_rmap = mlx4_get_cpu_rmap(priv->mdev->dev, priv->port);
2070 en_err(priv, "Failed to allocate NIC resources\n");
2071 for (i = 0; i < priv->rx_ring_num; i++) {
2072 if (priv->rx_ring[i])
2073 mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i],
2077 mlx4_en_destroy_cq(priv, &priv->rx_cq[i]);
2079 for (i = 0; i < priv->tx_ring_num; i++) {
2080 if (priv->tx_ring[i])
2081 mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]);
2083 mlx4_en_destroy_cq(priv, &priv->tx_cq[i]);
2089 static int mlx4_en_copy_priv(struct mlx4_en_priv *dst,
2090 struct mlx4_en_priv *src,
2091 struct mlx4_en_port_profile *prof)
2093 memcpy(&dst->hwtstamp_config, &prof->hwtstamp_config,
2094 sizeof(dst->hwtstamp_config));
2095 dst->num_tx_rings_p_up = src->mdev->profile.num_tx_rings_p_up;
2096 dst->tx_ring_num = prof->tx_ring_num;
2097 dst->rx_ring_num = prof->rx_ring_num;
2098 dst->flags = prof->flags;
2099 dst->mdev = src->mdev;
2100 dst->port = src->port;
2101 dst->dev = src->dev;
2103 dst->stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) +
2104 DS_SIZE * MLX4_EN_MAX_RX_FRAGS);
2106 dst->tx_ring = kzalloc(sizeof(struct mlx4_en_tx_ring *) * MAX_TX_RINGS,
2111 dst->tx_cq = kzalloc(sizeof(struct mlx4_en_cq *) * MAX_TX_RINGS,
2114 kfree(dst->tx_ring);
2120 static void mlx4_en_update_priv(struct mlx4_en_priv *dst,
2121 struct mlx4_en_priv *src)
2123 memcpy(dst->rx_ring, src->rx_ring,
2124 sizeof(struct mlx4_en_rx_ring *) * src->rx_ring_num);
2125 memcpy(dst->rx_cq, src->rx_cq,
2126 sizeof(struct mlx4_en_cq *) * src->rx_ring_num);
2127 memcpy(&dst->hwtstamp_config, &src->hwtstamp_config,
2128 sizeof(dst->hwtstamp_config));
2129 dst->tx_ring_num = src->tx_ring_num;
2130 dst->rx_ring_num = src->rx_ring_num;
2131 dst->tx_ring = src->tx_ring;
2132 dst->tx_cq = src->tx_cq;
2133 memcpy(dst->prof, src->prof, sizeof(struct mlx4_en_port_profile));
2136 int mlx4_en_try_alloc_resources(struct mlx4_en_priv *priv,
2137 struct mlx4_en_priv *tmp,
2138 struct mlx4_en_port_profile *prof)
2140 mlx4_en_copy_priv(tmp, priv, prof);
2142 if (mlx4_en_alloc_resources(tmp)) {
2144 "%s: Resource allocation failed, using previous configuration\n",
2146 kfree(tmp->tx_ring);
2153 void mlx4_en_safe_replace_resources(struct mlx4_en_priv *priv,
2154 struct mlx4_en_priv *tmp)
2156 mlx4_en_free_resources(priv);
2157 mlx4_en_update_priv(priv, tmp);
2160 void mlx4_en_destroy_netdev(struct net_device *dev)
2162 struct mlx4_en_priv *priv = netdev_priv(dev);
2163 struct mlx4_en_dev *mdev = priv->mdev;
2165 en_dbg(DRV, priv, "Destroying netdev on port:%d\n", priv->port);
2167 /* Unregister device - this will close the port if it was up */
2168 if (priv->registered) {
2169 devlink_port_type_clear(mlx4_get_devlink_port(mdev->dev,
2171 unregister_netdev(dev);
2174 if (priv->allocated)
2175 mlx4_free_hwq_res(mdev->dev, &priv->res, MLX4_EN_PAGE_SIZE);
2177 cancel_delayed_work(&priv->stats_task);
2178 cancel_delayed_work(&priv->service_task);
2179 /* flush any pending task for this netdev */
2180 flush_workqueue(mdev->workqueue);
2182 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
2183 mlx4_en_remove_timestamp(mdev);
2185 /* Detach the netdev so tasks would not attempt to access it */
2186 mutex_lock(&mdev->state_lock);
2187 mdev->pndev[priv->port] = NULL;
2188 mdev->upper[priv->port] = NULL;
2190 #ifdef CONFIG_RFS_ACCEL
2191 mlx4_en_cleanup_filters(priv);
2194 mlx4_en_free_resources(priv);
2195 mutex_unlock(&mdev->state_lock);
2197 kfree(priv->tx_ring);
2203 static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
2205 struct mlx4_en_priv *priv = netdev_priv(dev);
2206 struct mlx4_en_dev *mdev = priv->mdev;
2209 en_dbg(DRV, priv, "Change MTU called - current:%d new:%d\n",
2212 if ((new_mtu < MLX4_EN_MIN_MTU) || (new_mtu > priv->max_mtu)) {
2213 en_err(priv, "Bad MTU size:%d.\n", new_mtu);
2216 if (priv->xdp_ring_num && MLX4_EN_EFF_MTU(new_mtu) > FRAG_SZ0) {
2217 en_err(priv, "MTU size:%d requires frags but XDP running\n",
2223 if (netif_running(dev)) {
2224 mutex_lock(&mdev->state_lock);
2225 if (!mdev->device_up) {
2226 /* NIC is probably restarting - let restart task reset
2228 en_dbg(DRV, priv, "Change MTU called with card down!?\n");
2230 mlx4_en_stop_port(dev, 1);
2231 err = mlx4_en_start_port(dev);
2233 en_err(priv, "Failed restarting port:%d\n",
2235 if (!test_and_set_bit(MLX4_EN_STATE_FLAG_RESTARTING,
2237 queue_work(mdev->workqueue, &priv->restart_task);
2240 mutex_unlock(&mdev->state_lock);
2245 static int mlx4_en_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
2247 struct mlx4_en_priv *priv = netdev_priv(dev);
2248 struct mlx4_en_dev *mdev = priv->mdev;
2249 struct hwtstamp_config config;
2251 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
2254 /* reserved for future extensions */
2258 /* device doesn't support time stamping */
2259 if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS))
2262 /* TX HW timestamp */
2263 switch (config.tx_type) {
2264 case HWTSTAMP_TX_OFF:
2265 case HWTSTAMP_TX_ON:
2271 /* RX HW timestamp */
2272 switch (config.rx_filter) {
2273 case HWTSTAMP_FILTER_NONE:
2275 case HWTSTAMP_FILTER_ALL:
2276 case HWTSTAMP_FILTER_SOME:
2277 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
2278 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
2279 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
2280 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
2281 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
2282 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
2283 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
2284 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
2285 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
2286 case HWTSTAMP_FILTER_PTP_V2_EVENT:
2287 case HWTSTAMP_FILTER_PTP_V2_SYNC:
2288 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
2289 config.rx_filter = HWTSTAMP_FILTER_ALL;
2295 if (mlx4_en_reset_config(dev, config, dev->features)) {
2296 config.tx_type = HWTSTAMP_TX_OFF;
2297 config.rx_filter = HWTSTAMP_FILTER_NONE;
2300 return copy_to_user(ifr->ifr_data, &config,
2301 sizeof(config)) ? -EFAULT : 0;
2304 static int mlx4_en_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
2306 struct mlx4_en_priv *priv = netdev_priv(dev);
2308 return copy_to_user(ifr->ifr_data, &priv->hwtstamp_config,
2309 sizeof(priv->hwtstamp_config)) ? -EFAULT : 0;
2312 static int mlx4_en_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2316 return mlx4_en_hwtstamp_set(dev, ifr);
2318 return mlx4_en_hwtstamp_get(dev, ifr);
2324 static netdev_features_t mlx4_en_fix_features(struct net_device *netdev,
2325 netdev_features_t features)
2327 struct mlx4_en_priv *en_priv = netdev_priv(netdev);
2328 struct mlx4_en_dev *mdev = en_priv->mdev;
2330 /* Since there is no support for separate RX C-TAG/S-TAG vlan accel
2331 * enable/disable make sure S-TAG flag is always in same state as
2334 if (features & NETIF_F_HW_VLAN_CTAG_RX &&
2335 !(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN))
2336 features |= NETIF_F_HW_VLAN_STAG_RX;
2338 features &= ~NETIF_F_HW_VLAN_STAG_RX;
2343 static int mlx4_en_set_features(struct net_device *netdev,
2344 netdev_features_t features)
2346 struct mlx4_en_priv *priv = netdev_priv(netdev);
2350 if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_RXFCS)) {
2351 en_info(priv, "Turn %s RX-FCS\n",
2352 (features & NETIF_F_RXFCS) ? "ON" : "OFF");
2356 if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_RXALL)) {
2357 u8 ignore_fcs_value = (features & NETIF_F_RXALL) ? 1 : 0;
2359 en_info(priv, "Turn %s RX-ALL\n",
2360 ignore_fcs_value ? "ON" : "OFF");
2361 ret = mlx4_SET_PORT_fcs_check(priv->mdev->dev,
2362 priv->port, ignore_fcs_value);
2367 if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_HW_VLAN_CTAG_RX)) {
2368 en_info(priv, "Turn %s RX vlan strip offload\n",
2369 (features & NETIF_F_HW_VLAN_CTAG_RX) ? "ON" : "OFF");
2373 if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_HW_VLAN_CTAG_TX))
2374 en_info(priv, "Turn %s TX vlan strip offload\n",
2375 (features & NETIF_F_HW_VLAN_CTAG_TX) ? "ON" : "OFF");
2377 if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_HW_VLAN_STAG_TX))
2378 en_info(priv, "Turn %s TX S-VLAN strip offload\n",
2379 (features & NETIF_F_HW_VLAN_STAG_TX) ? "ON" : "OFF");
2381 if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_LOOPBACK)) {
2382 en_info(priv, "Turn %s loopback\n",
2383 (features & NETIF_F_LOOPBACK) ? "ON" : "OFF");
2384 mlx4_en_update_loopback_state(netdev, features);
2388 ret = mlx4_en_reset_config(netdev, priv->hwtstamp_config,
2397 static int mlx4_en_set_vf_mac(struct net_device *dev, int queue, u8 *mac)
2399 struct mlx4_en_priv *en_priv = netdev_priv(dev);
2400 struct mlx4_en_dev *mdev = en_priv->mdev;
2401 u64 mac_u64 = mlx4_mac_to_u64(mac);
2403 if (is_multicast_ether_addr(mac))
2406 return mlx4_set_vf_mac(mdev->dev, en_priv->port, queue, mac_u64);
2409 static int mlx4_en_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos,
2412 struct mlx4_en_priv *en_priv = netdev_priv(dev);
2413 struct mlx4_en_dev *mdev = en_priv->mdev;
2415 return mlx4_set_vf_vlan(mdev->dev, en_priv->port, vf, vlan, qos,
2419 static int mlx4_en_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate,
2422 struct mlx4_en_priv *en_priv = netdev_priv(dev);
2423 struct mlx4_en_dev *mdev = en_priv->mdev;
2425 return mlx4_set_vf_rate(mdev->dev, en_priv->port, vf, min_tx_rate,
2429 static int mlx4_en_set_vf_spoofchk(struct net_device *dev, int vf, bool setting)
2431 struct mlx4_en_priv *en_priv = netdev_priv(dev);
2432 struct mlx4_en_dev *mdev = en_priv->mdev;
2434 return mlx4_set_vf_spoofchk(mdev->dev, en_priv->port, vf, setting);
2437 static int mlx4_en_get_vf_config(struct net_device *dev, int vf, struct ifla_vf_info *ivf)
2439 struct mlx4_en_priv *en_priv = netdev_priv(dev);
2440 struct mlx4_en_dev *mdev = en_priv->mdev;
2442 return mlx4_get_vf_config(mdev->dev, en_priv->port, vf, ivf);
2445 static int mlx4_en_set_vf_link_state(struct net_device *dev, int vf, int link_state)
2447 struct mlx4_en_priv *en_priv = netdev_priv(dev);
2448 struct mlx4_en_dev *mdev = en_priv->mdev;
2450 return mlx4_set_vf_link_state(mdev->dev, en_priv->port, vf, link_state);
2453 static int mlx4_en_get_vf_stats(struct net_device *dev, int vf,
2454 struct ifla_vf_stats *vf_stats)
2456 struct mlx4_en_priv *en_priv = netdev_priv(dev);
2457 struct mlx4_en_dev *mdev = en_priv->mdev;
2459 return mlx4_get_vf_stats(mdev->dev, en_priv->port, vf, vf_stats);
2462 #define PORT_ID_BYTE_LEN 8
2463 static int mlx4_en_get_phys_port_id(struct net_device *dev,
2464 struct netdev_phys_item_id *ppid)
2466 struct mlx4_en_priv *priv = netdev_priv(dev);
2467 struct mlx4_dev *mdev = priv->mdev->dev;
2469 u64 phys_port_id = mdev->caps.phys_port_id[priv->port];
2474 ppid->id_len = sizeof(phys_port_id);
2475 for (i = PORT_ID_BYTE_LEN - 1; i >= 0; --i) {
2476 ppid->id[i] = phys_port_id & 0xff;
2482 static void mlx4_en_add_vxlan_offloads(struct work_struct *work)
2485 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
2488 ret = mlx4_config_vxlan_port(priv->mdev->dev, priv->vxlan_port);
2492 ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port,
2493 VXLAN_STEER_BY_OUTER_MAC, 1);
2496 en_err(priv, "failed setting L2 tunnel configuration ret %d\n", ret);
2501 priv->dev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2503 NETIF_F_TSO | NETIF_F_TSO6 |
2504 NETIF_F_GSO_UDP_TUNNEL |
2505 NETIF_F_GSO_UDP_TUNNEL_CSUM |
2506 NETIF_F_GSO_PARTIAL;
2509 static void mlx4_en_del_vxlan_offloads(struct work_struct *work)
2512 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
2514 /* unset offloads */
2515 priv->dev->hw_enc_features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2517 NETIF_F_TSO | NETIF_F_TSO6 |
2518 NETIF_F_GSO_UDP_TUNNEL |
2519 NETIF_F_GSO_UDP_TUNNEL_CSUM |
2520 NETIF_F_GSO_PARTIAL);
2522 ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port,
2523 VXLAN_STEER_BY_OUTER_MAC, 0);
2525 en_err(priv, "failed setting L2 tunnel configuration ret %d\n", ret);
2527 priv->vxlan_port = 0;
2530 static void mlx4_en_add_vxlan_port(struct net_device *dev,
2531 struct udp_tunnel_info *ti)
2533 struct mlx4_en_priv *priv = netdev_priv(dev);
2534 __be16 port = ti->port;
2535 __be16 current_port;
2537 if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
2540 if (ti->sa_family != AF_INET)
2543 if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
2546 current_port = priv->vxlan_port;
2547 if (current_port && current_port != port) {
2548 en_warn(priv, "vxlan port %d configured, can't add port %d\n",
2549 ntohs(current_port), ntohs(port));
2553 priv->vxlan_port = port;
2554 queue_work(priv->mdev->workqueue, &priv->vxlan_add_task);
2557 static void mlx4_en_del_vxlan_port(struct net_device *dev,
2558 struct udp_tunnel_info *ti)
2560 struct mlx4_en_priv *priv = netdev_priv(dev);
2561 __be16 port = ti->port;
2562 __be16 current_port;
2564 if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
2567 if (ti->sa_family != AF_INET)
2570 if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
2573 current_port = priv->vxlan_port;
2574 if (current_port != port) {
2575 en_dbg(DRV, priv, "vxlan port %d isn't configured, ignoring\n", ntohs(port));
2579 queue_work(priv->mdev->workqueue, &priv->vxlan_del_task);
2582 static netdev_features_t mlx4_en_features_check(struct sk_buff *skb,
2583 struct net_device *dev,
2584 netdev_features_t features)
2586 features = vlan_features_check(skb, features);
2587 features = vxlan_features_check(skb, features);
2589 /* The ConnectX-3 doesn't support outer IPv6 checksums but it does
2590 * support inner IPv6 checksums and segmentation so we need to
2591 * strip that feature if this is an IPv6 encapsulated frame.
2593 if (skb->encapsulation &&
2594 (skb->ip_summed == CHECKSUM_PARTIAL)) {
2595 struct mlx4_en_priv *priv = netdev_priv(dev);
2597 if (!priv->vxlan_port ||
2598 (ip_hdr(skb)->version != 4) ||
2599 (udp_hdr(skb)->dest != priv->vxlan_port))
2600 features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
2606 static int mlx4_en_set_tx_maxrate(struct net_device *dev, int queue_index, u32 maxrate)
2608 struct mlx4_en_priv *priv = netdev_priv(dev);
2609 struct mlx4_en_tx_ring *tx_ring = priv->tx_ring[queue_index];
2610 struct mlx4_update_qp_params params;
2613 if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QP_RATE_LIMIT))
2616 /* rate provided to us in Mbs, check if it fits into 12 bits, if not use Gbs */
2617 if (maxrate >> 12) {
2618 params.rate_unit = MLX4_QP_RATE_LIMIT_GBS;
2619 params.rate_val = maxrate / 1000;
2620 } else if (maxrate) {
2621 params.rate_unit = MLX4_QP_RATE_LIMIT_MBS;
2622 params.rate_val = maxrate;
2623 } else { /* zero serves to revoke the QP rate-limitation */
2624 params.rate_unit = 0;
2625 params.rate_val = 0;
2628 err = mlx4_update_qp(priv->mdev->dev, tx_ring->qpn, MLX4_UPDATE_QP_RATE_LIMIT,
2633 static int mlx4_xdp_set(struct net_device *dev, struct bpf_prog *prog)
2635 struct mlx4_en_priv *priv = netdev_priv(dev);
2636 struct mlx4_en_dev *mdev = priv->mdev;
2637 struct bpf_prog *old_prog;
2643 xdp_ring_num = prog ? ALIGN(priv->rx_ring_num, MLX4_EN_NUM_UP) : 0;
2645 /* No need to reconfigure buffers when simply swapping the
2646 * program for a new one.
2648 if (priv->xdp_ring_num == xdp_ring_num) {
2650 prog = bpf_prog_add(prog, priv->rx_ring_num - 1);
2652 return PTR_ERR(prog);
2654 mutex_lock(&mdev->state_lock);
2655 for (i = 0; i < priv->rx_ring_num; i++) {
2656 old_prog = rcu_dereference_protected(
2657 priv->rx_ring[i]->xdp_prog,
2658 lockdep_is_held(&mdev->state_lock));
2659 rcu_assign_pointer(priv->rx_ring[i]->xdp_prog, prog);
2661 bpf_prog_put(old_prog);
2663 mutex_unlock(&mdev->state_lock);
2667 if (priv->num_frags > 1) {
2668 en_err(priv, "Cannot set XDP if MTU requires multiple frags\n");
2672 if (priv->tx_ring_num < xdp_ring_num + MLX4_EN_NUM_UP) {
2674 "Minimum %d tx channels required to run XDP\n",
2675 (xdp_ring_num + MLX4_EN_NUM_UP) / MLX4_EN_NUM_UP);
2680 prog = bpf_prog_add(prog, priv->rx_ring_num - 1);
2682 return PTR_ERR(prog);
2685 mutex_lock(&mdev->state_lock);
2686 if (priv->port_up) {
2688 mlx4_en_stop_port(dev, 1);
2691 priv->xdp_ring_num = xdp_ring_num;
2692 netif_set_real_num_tx_queues(dev, priv->tx_ring_num -
2693 priv->xdp_ring_num);
2695 for (i = 0; i < priv->rx_ring_num; i++) {
2696 old_prog = rcu_dereference_protected(
2697 priv->rx_ring[i]->xdp_prog,
2698 lockdep_is_held(&mdev->state_lock));
2699 rcu_assign_pointer(priv->rx_ring[i]->xdp_prog, prog);
2701 bpf_prog_put(old_prog);
2705 err = mlx4_en_start_port(dev);
2707 en_err(priv, "Failed starting port %d for XDP change\n",
2709 if (!test_and_set_bit(MLX4_EN_STATE_FLAG_RESTARTING, &priv->state))
2710 queue_work(mdev->workqueue, &priv->restart_task);
2714 mutex_unlock(&mdev->state_lock);
2718 static bool mlx4_xdp_attached(struct net_device *dev)
2720 struct mlx4_en_priv *priv = netdev_priv(dev);
2722 return !!priv->xdp_ring_num;
2725 static int mlx4_xdp(struct net_device *dev, struct netdev_xdp *xdp)
2727 switch (xdp->command) {
2728 case XDP_SETUP_PROG:
2729 return mlx4_xdp_set(dev, xdp->prog);
2730 case XDP_QUERY_PROG:
2731 xdp->prog_attached = mlx4_xdp_attached(dev);
2738 static const struct net_device_ops mlx4_netdev_ops = {
2739 .ndo_open = mlx4_en_open,
2740 .ndo_stop = mlx4_en_close,
2741 .ndo_start_xmit = mlx4_en_xmit,
2742 .ndo_select_queue = mlx4_en_select_queue,
2743 .ndo_get_stats64 = mlx4_en_get_stats64,
2744 .ndo_set_rx_mode = mlx4_en_set_rx_mode,
2745 .ndo_set_mac_address = mlx4_en_set_mac,
2746 .ndo_validate_addr = eth_validate_addr,
2747 .ndo_change_mtu = mlx4_en_change_mtu,
2748 .ndo_do_ioctl = mlx4_en_ioctl,
2749 .ndo_tx_timeout = mlx4_en_tx_timeout,
2750 .ndo_vlan_rx_add_vid = mlx4_en_vlan_rx_add_vid,
2751 .ndo_vlan_rx_kill_vid = mlx4_en_vlan_rx_kill_vid,
2752 #ifdef CONFIG_NET_POLL_CONTROLLER
2753 .ndo_poll_controller = mlx4_en_netpoll,
2755 .ndo_set_features = mlx4_en_set_features,
2756 .ndo_fix_features = mlx4_en_fix_features,
2757 .ndo_setup_tc = __mlx4_en_setup_tc,
2758 #ifdef CONFIG_RFS_ACCEL
2759 .ndo_rx_flow_steer = mlx4_en_filter_rfs,
2761 .ndo_get_phys_port_id = mlx4_en_get_phys_port_id,
2762 .ndo_udp_tunnel_add = mlx4_en_add_vxlan_port,
2763 .ndo_udp_tunnel_del = mlx4_en_del_vxlan_port,
2764 .ndo_features_check = mlx4_en_features_check,
2765 .ndo_set_tx_maxrate = mlx4_en_set_tx_maxrate,
2766 .ndo_xdp = mlx4_xdp,
2769 static const struct net_device_ops mlx4_netdev_ops_master = {
2770 .ndo_open = mlx4_en_open,
2771 .ndo_stop = mlx4_en_close,
2772 .ndo_start_xmit = mlx4_en_xmit,
2773 .ndo_select_queue = mlx4_en_select_queue,
2774 .ndo_get_stats64 = mlx4_en_get_stats64,
2775 .ndo_set_rx_mode = mlx4_en_set_rx_mode,
2776 .ndo_set_mac_address = mlx4_en_set_mac,
2777 .ndo_validate_addr = eth_validate_addr,
2778 .ndo_change_mtu = mlx4_en_change_mtu,
2779 .ndo_tx_timeout = mlx4_en_tx_timeout,
2780 .ndo_vlan_rx_add_vid = mlx4_en_vlan_rx_add_vid,
2781 .ndo_vlan_rx_kill_vid = mlx4_en_vlan_rx_kill_vid,
2782 .ndo_set_vf_mac = mlx4_en_set_vf_mac,
2783 .ndo_set_vf_vlan = mlx4_en_set_vf_vlan,
2784 .ndo_set_vf_rate = mlx4_en_set_vf_rate,
2785 .ndo_set_vf_spoofchk = mlx4_en_set_vf_spoofchk,
2786 .ndo_set_vf_link_state = mlx4_en_set_vf_link_state,
2787 .ndo_get_vf_stats = mlx4_en_get_vf_stats,
2788 .ndo_get_vf_config = mlx4_en_get_vf_config,
2789 #ifdef CONFIG_NET_POLL_CONTROLLER
2790 .ndo_poll_controller = mlx4_en_netpoll,
2792 .ndo_set_features = mlx4_en_set_features,
2793 .ndo_fix_features = mlx4_en_fix_features,
2794 .ndo_setup_tc = __mlx4_en_setup_tc,
2795 #ifdef CONFIG_RFS_ACCEL
2796 .ndo_rx_flow_steer = mlx4_en_filter_rfs,
2798 .ndo_get_phys_port_id = mlx4_en_get_phys_port_id,
2799 .ndo_udp_tunnel_add = mlx4_en_add_vxlan_port,
2800 .ndo_udp_tunnel_del = mlx4_en_del_vxlan_port,
2801 .ndo_features_check = mlx4_en_features_check,
2802 .ndo_set_tx_maxrate = mlx4_en_set_tx_maxrate,
2803 .ndo_xdp = mlx4_xdp,
2806 struct mlx4_en_bond {
2807 struct work_struct work;
2808 struct mlx4_en_priv *priv;
2810 struct mlx4_port_map port_map;
2813 static void mlx4_en_bond_work(struct work_struct *work)
2815 struct mlx4_en_bond *bond = container_of(work,
2816 struct mlx4_en_bond,
2819 struct mlx4_dev *dev = bond->priv->mdev->dev;
2821 if (bond->is_bonded) {
2822 if (!mlx4_is_bonded(dev)) {
2823 err = mlx4_bond(dev);
2825 en_err(bond->priv, "Fail to bond device\n");
2828 err = mlx4_port_map_set(dev, &bond->port_map);
2830 en_err(bond->priv, "Fail to set port map [%d][%d]: %d\n",
2831 bond->port_map.port1,
2832 bond->port_map.port2,
2835 } else if (mlx4_is_bonded(dev)) {
2836 err = mlx4_unbond(dev);
2838 en_err(bond->priv, "Fail to unbond device\n");
2840 dev_put(bond->priv->dev);
2844 static int mlx4_en_queue_bond_work(struct mlx4_en_priv *priv, int is_bonded,
2845 u8 v2p_p1, u8 v2p_p2)
2847 struct mlx4_en_bond *bond = NULL;
2849 bond = kzalloc(sizeof(*bond), GFP_ATOMIC);
2853 INIT_WORK(&bond->work, mlx4_en_bond_work);
2855 bond->is_bonded = is_bonded;
2856 bond->port_map.port1 = v2p_p1;
2857 bond->port_map.port2 = v2p_p2;
2858 dev_hold(priv->dev);
2859 queue_work(priv->mdev->workqueue, &bond->work);
2863 int mlx4_en_netdev_event(struct notifier_block *this,
2864 unsigned long event, void *ptr)
2866 struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
2868 struct mlx4_en_dev *mdev;
2869 struct mlx4_dev *dev;
2870 int i, num_eth_ports = 0;
2871 bool do_bond = true;
2872 struct mlx4_en_priv *priv;
2876 if (!net_eq(dev_net(ndev), &init_net))
2879 mdev = container_of(this, struct mlx4_en_dev, nb);
2882 /* Go into this mode only when two network devices set on two ports
2883 * of the same mlx4 device are slaves of the same bonding master
2885 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) {
2887 if (!port && (mdev->pndev[i] == ndev))
2889 mdev->upper[i] = mdev->pndev[i] ?
2890 netdev_master_upper_dev_get(mdev->pndev[i]) : NULL;
2891 /* condition not met: network device is a slave */
2892 if (!mdev->upper[i])
2894 if (num_eth_ports < 2)
2896 /* condition not met: same master */
2897 if (mdev->upper[i] != mdev->upper[i-1])
2900 /* condition not met: 2 salves */
2901 do_bond = (num_eth_ports == 2) ? do_bond : false;
2903 /* handle only events that come with enough info */
2904 if ((do_bond && (event != NETDEV_BONDING_INFO)) || !port)
2907 priv = netdev_priv(ndev);
2909 struct netdev_notifier_bonding_info *notifier_info = ptr;
2910 struct netdev_bonding_info *bonding_info =
2911 ¬ifier_info->bonding_info;
2913 /* required mode 1, 2 or 4 */
2914 if ((bonding_info->master.bond_mode != BOND_MODE_ACTIVEBACKUP) &&
2915 (bonding_info->master.bond_mode != BOND_MODE_XOR) &&
2916 (bonding_info->master.bond_mode != BOND_MODE_8023AD))
2919 /* require exactly 2 slaves */
2920 if (bonding_info->master.num_slaves != 2)
2925 if (bonding_info->master.bond_mode ==
2926 BOND_MODE_ACTIVEBACKUP) {
2927 /* in active-backup mode virtual ports are
2928 * mapped to the physical port of the active
2930 if (bonding_info->slave.state ==
2931 BOND_STATE_BACKUP) {
2939 } else { /* BOND_STATE_ACTIVE */
2948 } else { /* Active-Active */
2949 /* in active-active mode a virtual port is
2950 * mapped to the native physical port if and only
2951 * if the physical port is up */
2952 __s8 link = bonding_info->slave.link;
2958 if ((link == BOND_LINK_UP) ||
2959 (link == BOND_LINK_FAIL)) {
2964 } else { /* BOND_LINK_DOWN || BOND_LINK_BACK */
2974 mlx4_en_queue_bond_work(priv, do_bond,
2975 v2p_port1, v2p_port2);
2980 void mlx4_en_update_pfc_stats_bitmap(struct mlx4_dev *dev,
2981 struct mlx4_en_stats_bitmap *stats_bitmap,
2982 u8 rx_ppp, u8 rx_pause,
2983 u8 tx_ppp, u8 tx_pause)
2985 int last_i = NUM_MAIN_STATS + NUM_PORT_STATS + NUM_PF_STATS;
2987 if (!mlx4_is_slave(dev) &&
2988 (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FLOWSTATS_EN)) {
2989 mutex_lock(&stats_bitmap->mutex);
2990 bitmap_clear(stats_bitmap->bitmap, last_i, NUM_FLOW_STATS);
2993 bitmap_set(stats_bitmap->bitmap, last_i,
2994 NUM_FLOW_PRIORITY_STATS_RX);
2995 last_i += NUM_FLOW_PRIORITY_STATS_RX;
2997 if (rx_pause && !(rx_ppp))
2998 bitmap_set(stats_bitmap->bitmap, last_i,
3000 last_i += NUM_FLOW_STATS_RX;
3003 bitmap_set(stats_bitmap->bitmap, last_i,
3004 NUM_FLOW_PRIORITY_STATS_TX);
3005 last_i += NUM_FLOW_PRIORITY_STATS_TX;
3007 if (tx_pause && !(tx_ppp))
3008 bitmap_set(stats_bitmap->bitmap, last_i,
3010 last_i += NUM_FLOW_STATS_TX;
3012 mutex_unlock(&stats_bitmap->mutex);
3016 void mlx4_en_set_stats_bitmap(struct mlx4_dev *dev,
3017 struct mlx4_en_stats_bitmap *stats_bitmap,
3018 u8 rx_ppp, u8 rx_pause,
3019 u8 tx_ppp, u8 tx_pause)
3023 mutex_init(&stats_bitmap->mutex);
3024 bitmap_zero(stats_bitmap->bitmap, NUM_ALL_STATS);
3026 if (mlx4_is_slave(dev)) {
3027 bitmap_set(stats_bitmap->bitmap, last_i +
3028 MLX4_FIND_NETDEV_STAT(rx_packets), 1);
3029 bitmap_set(stats_bitmap->bitmap, last_i +
3030 MLX4_FIND_NETDEV_STAT(tx_packets), 1);
3031 bitmap_set(stats_bitmap->bitmap, last_i +
3032 MLX4_FIND_NETDEV_STAT(rx_bytes), 1);
3033 bitmap_set(stats_bitmap->bitmap, last_i +
3034 MLX4_FIND_NETDEV_STAT(tx_bytes), 1);
3035 bitmap_set(stats_bitmap->bitmap, last_i +
3036 MLX4_FIND_NETDEV_STAT(rx_dropped), 1);
3037 bitmap_set(stats_bitmap->bitmap, last_i +
3038 MLX4_FIND_NETDEV_STAT(tx_dropped), 1);
3040 bitmap_set(stats_bitmap->bitmap, last_i, NUM_MAIN_STATS);
3042 last_i += NUM_MAIN_STATS;
3044 bitmap_set(stats_bitmap->bitmap, last_i, NUM_PORT_STATS);
3045 last_i += NUM_PORT_STATS;
3047 if (mlx4_is_master(dev))
3048 bitmap_set(stats_bitmap->bitmap, last_i,
3050 last_i += NUM_PF_STATS;
3052 mlx4_en_update_pfc_stats_bitmap(dev, stats_bitmap,
3055 last_i += NUM_FLOW_STATS;
3057 if (!mlx4_is_slave(dev))
3058 bitmap_set(stats_bitmap->bitmap, last_i, NUM_PKT_STATS);
3061 int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
3062 struct mlx4_en_port_profile *prof)
3064 struct net_device *dev;
3065 struct mlx4_en_priv *priv;
3069 dev = alloc_etherdev_mqs(sizeof(struct mlx4_en_priv),
3070 MAX_TX_RINGS, MAX_RX_RINGS);
3074 netif_set_real_num_tx_queues(dev, prof->tx_ring_num);
3075 netif_set_real_num_rx_queues(dev, prof->rx_ring_num);
3077 SET_NETDEV_DEV(dev, &mdev->dev->persist->pdev->dev);
3078 dev->dev_port = port - 1;
3081 * Initialize driver private data
3084 priv = netdev_priv(dev);
3085 memset(priv, 0, sizeof(struct mlx4_en_priv));
3086 priv->counter_index = MLX4_SINK_COUNTER_INDEX(mdev->dev);
3087 spin_lock_init(&priv->stats_lock);
3088 INIT_WORK(&priv->rx_mode_task, mlx4_en_do_set_rx_mode);
3089 INIT_WORK(&priv->restart_task, mlx4_en_restart);
3090 INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate);
3091 INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats);
3092 INIT_DELAYED_WORK(&priv->service_task, mlx4_en_service_task);
3093 INIT_WORK(&priv->vxlan_add_task, mlx4_en_add_vxlan_offloads);
3094 INIT_WORK(&priv->vxlan_del_task, mlx4_en_del_vxlan_offloads);
3095 #ifdef CONFIG_RFS_ACCEL
3096 INIT_LIST_HEAD(&priv->filters);
3097 spin_lock_init(&priv->filters_lock);
3102 priv->ddev = &mdev->pdev->dev;
3105 priv->port_up = false;
3106 priv->flags = prof->flags;
3107 priv->pflags = MLX4_EN_PRIV_FLAGS_BLUEFLAME;
3108 priv->ctrl_flags = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE |
3109 MLX4_WQE_CTRL_SOLICITED);
3110 priv->num_tx_rings_p_up = mdev->profile.num_tx_rings_p_up;
3111 priv->tx_ring_num = prof->tx_ring_num;
3112 priv->tx_work_limit = MLX4_EN_DEFAULT_TX_WORK;
3113 netdev_rss_key_fill(priv->rss_key, sizeof(priv->rss_key));
3115 priv->tx_ring = kzalloc(sizeof(struct mlx4_en_tx_ring *) * MAX_TX_RINGS,
3117 if (!priv->tx_ring) {
3121 priv->tx_cq = kzalloc(sizeof(struct mlx4_en_cq *) * MAX_TX_RINGS,
3127 priv->rx_ring_num = prof->rx_ring_num;
3128 priv->cqe_factor = (mdev->dev->caps.cqe_size == 64) ? 1 : 0;
3129 priv->cqe_size = mdev->dev->caps.cqe_size;
3130 priv->mac_index = -1;
3131 priv->msg_enable = MLX4_EN_MSG_LEVEL;
3132 #ifdef CONFIG_MLX4_EN_DCB
3133 if (!mlx4_is_slave(priv->mdev->dev)) {
3136 for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; ++prio) {
3137 priv->ets.prio_tc[prio] = prio;
3138 priv->ets.tc_tsa[prio] = IEEE_8021QAZ_TSA_VENDOR;
3141 priv->dcbx_cap = DCB_CAP_DCBX_VER_CEE | DCB_CAP_DCBX_HOST |
3142 DCB_CAP_DCBX_VER_IEEE;
3143 priv->flags |= MLX4_EN_DCB_ENABLED;
3144 priv->cee_config.pfc_state = false;
3146 for (i = 0; i < MLX4_EN_NUM_UP; i++)
3147 priv->cee_config.dcb_pfc[i] = pfc_disabled;
3149 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETS_CFG) {
3150 dev->dcbnl_ops = &mlx4_en_dcbnl_ops;
3152 en_info(priv, "enabling only PFC DCB ops\n");
3153 dev->dcbnl_ops = &mlx4_en_dcbnl_pfc_ops;
3158 for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i)
3159 INIT_HLIST_HEAD(&priv->mac_hash[i]);
3161 /* Query for default mac and max mtu */
3162 priv->max_mtu = mdev->dev->caps.eth_mtu_cap[priv->port];
3164 if (mdev->dev->caps.rx_checksum_flags_port[priv->port] &
3165 MLX4_RX_CSUM_MODE_VAL_NON_TCP_UDP)
3166 priv->flags |= MLX4_EN_FLAG_RX_CSUM_NON_TCP_UDP;
3168 /* Set default MAC */
3169 dev->addr_len = ETH_ALEN;
3170 mlx4_en_u64_to_mac(dev->dev_addr, mdev->dev->caps.def_mac[priv->port]);
3171 if (!is_valid_ether_addr(dev->dev_addr)) {
3172 en_err(priv, "Port: %d, invalid mac burned: %pM, quiting\n",
3173 priv->port, dev->dev_addr);
3176 } else if (mlx4_is_slave(priv->mdev->dev) &&
3177 (priv->mdev->dev->port_random_macs & 1 << priv->port)) {
3178 /* Random MAC was assigned in mlx4_slave_cap
3179 * in mlx4_core module
3181 dev->addr_assign_type |= NET_ADDR_RANDOM;
3182 en_warn(priv, "Assigned random MAC address %pM\n", dev->dev_addr);
3185 memcpy(priv->current_mac, dev->dev_addr, sizeof(priv->current_mac));
3187 priv->stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) +
3188 DS_SIZE * MLX4_EN_MAX_RX_FRAGS);
3189 err = mlx4_en_alloc_resources(priv);
3193 /* Initialize time stamping config */
3194 priv->hwtstamp_config.flags = 0;
3195 priv->hwtstamp_config.tx_type = HWTSTAMP_TX_OFF;
3196 priv->hwtstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
3198 /* Allocate page for receive rings */
3199 err = mlx4_alloc_hwq_res(mdev->dev, &priv->res,
3202 en_err(priv, "Failed to allocate page for rx qps\n");
3205 priv->allocated = 1;
3208 * Initialize netdev entry points
3210 if (mlx4_is_master(priv->mdev->dev))
3211 dev->netdev_ops = &mlx4_netdev_ops_master;
3213 dev->netdev_ops = &mlx4_netdev_ops;
3214 dev->watchdog_timeo = MLX4_EN_WATCHDOG_TIMEOUT;
3215 netif_set_real_num_tx_queues(dev, priv->tx_ring_num);
3216 netif_set_real_num_rx_queues(dev, priv->rx_ring_num);
3218 dev->ethtool_ops = &mlx4_en_ethtool_ops;
3221 * Set driver features
3223 dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
3224 if (mdev->LSO_support)
3225 dev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
3227 dev->vlan_features = dev->hw_features;
3229 dev->hw_features |= NETIF_F_RXCSUM | NETIF_F_RXHASH;
3230 dev->features = dev->hw_features | NETIF_F_HIGHDMA |
3231 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
3232 NETIF_F_HW_VLAN_CTAG_FILTER;
3233 dev->hw_features |= NETIF_F_LOOPBACK |
3234 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
3236 if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN)) {
3237 dev->features |= NETIF_F_HW_VLAN_STAG_RX |
3238 NETIF_F_HW_VLAN_STAG_FILTER;
3239 dev->hw_features |= NETIF_F_HW_VLAN_STAG_RX;
3242 if (mlx4_is_slave(mdev->dev)) {
3243 bool vlan_offload_disabled;
3246 err = get_phv_bit(mdev->dev, port, &phv);
3248 dev->hw_features |= NETIF_F_HW_VLAN_STAG_TX;
3249 priv->pflags |= MLX4_EN_PRIV_FLAGS_PHV;
3251 err = mlx4_get_is_vlan_offload_disabled(mdev->dev, port,
3252 &vlan_offload_disabled);
3253 if (!err && vlan_offload_disabled) {
3254 dev->hw_features &= ~(NETIF_F_HW_VLAN_CTAG_TX |
3255 NETIF_F_HW_VLAN_CTAG_RX |
3256 NETIF_F_HW_VLAN_STAG_TX |
3257 NETIF_F_HW_VLAN_STAG_RX);
3258 dev->features &= ~(NETIF_F_HW_VLAN_CTAG_TX |
3259 NETIF_F_HW_VLAN_CTAG_RX |
3260 NETIF_F_HW_VLAN_STAG_TX |
3261 NETIF_F_HW_VLAN_STAG_RX);
3264 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PHV_EN &&
3265 !(mdev->dev->caps.flags2 &
3266 MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN))
3267 dev->hw_features |= NETIF_F_HW_VLAN_STAG_TX;
3270 if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP)
3271 dev->hw_features |= NETIF_F_RXFCS;
3273 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_IGNORE_FCS)
3274 dev->hw_features |= NETIF_F_RXALL;
3276 if (mdev->dev->caps.steering_mode ==
3277 MLX4_STEERING_MODE_DEVICE_MANAGED &&
3278 mdev->dev->caps.dmfs_high_steer_mode != MLX4_STEERING_DMFS_A0_STATIC)
3279 dev->hw_features |= NETIF_F_NTUPLE;
3281 if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_A0)
3282 dev->priv_flags |= IFF_UNICAST_FLT;
3284 /* Setting a default hash function value */
3285 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_TOP) {
3286 priv->rss_hash_fn = ETH_RSS_HASH_TOP;
3287 } else if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_XOR) {
3288 priv->rss_hash_fn = ETH_RSS_HASH_XOR;
3291 "No RSS hash capabilities exposed, using Toeplitz\n");
3292 priv->rss_hash_fn = ETH_RSS_HASH_TOP;
3295 if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
3296 dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL |
3297 NETIF_F_GSO_UDP_TUNNEL_CSUM |
3298 NETIF_F_GSO_PARTIAL;
3299 dev->features |= NETIF_F_GSO_UDP_TUNNEL |
3300 NETIF_F_GSO_UDP_TUNNEL_CSUM |
3301 NETIF_F_GSO_PARTIAL;
3302 dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM;
3305 mdev->pndev[port] = dev;
3306 mdev->upper[port] = NULL;
3308 netif_carrier_off(dev);
3309 mlx4_en_set_default_moderation(priv);
3311 en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num);
3312 en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num);
3314 mlx4_en_update_loopback_state(priv->dev, priv->dev->features);
3316 /* Configure port */
3317 mlx4_en_calc_rx_buf(dev);
3318 err = mlx4_SET_PORT_general(mdev->dev, priv->port,
3319 priv->rx_skb_size + ETH_FCS_LEN,
3320 prof->tx_pause, prof->tx_ppp,
3321 prof->rx_pause, prof->rx_ppp);
3323 en_err(priv, "Failed setting port general configurations for port %d, with error %d\n",
3328 if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
3329 err = mlx4_SET_PORT_VXLAN(mdev->dev, priv->port, VXLAN_STEER_BY_OUTER_MAC, 1);
3331 en_err(priv, "Failed setting port L2 tunnel configuration, err %d\n",
3338 en_warn(priv, "Initializing port\n");
3339 err = mlx4_INIT_PORT(mdev->dev, priv->port);
3341 en_err(priv, "Failed Initializing port\n");
3344 queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY);
3346 /* Initialize time stamp mechanism */
3347 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
3348 mlx4_en_init_timestamp(mdev);
3350 queue_delayed_work(mdev->workqueue, &priv->service_task,
3351 SERVICE_TASK_DELAY);
3353 mlx4_en_set_stats_bitmap(mdev->dev, &priv->stats_bitmap,
3354 mdev->profile.prof[priv->port].rx_ppp,
3355 mdev->profile.prof[priv->port].rx_pause,
3356 mdev->profile.prof[priv->port].tx_ppp,
3357 mdev->profile.prof[priv->port].tx_pause);
3359 err = register_netdev(dev);
3361 en_err(priv, "Netdev registration failed for port %d\n", port);
3365 priv->registered = 1;
3366 devlink_port_type_eth_set(mlx4_get_devlink_port(mdev->dev, priv->port),
3372 mlx4_en_destroy_netdev(dev);
3376 int mlx4_en_reset_config(struct net_device *dev,
3377 struct hwtstamp_config ts_config,
3378 netdev_features_t features)
3380 struct mlx4_en_priv *priv = netdev_priv(dev);
3381 struct mlx4_en_dev *mdev = priv->mdev;
3382 struct mlx4_en_port_profile new_prof;
3383 struct mlx4_en_priv *tmp;
3387 if (priv->hwtstamp_config.tx_type == ts_config.tx_type &&
3388 priv->hwtstamp_config.rx_filter == ts_config.rx_filter &&
3389 !DEV_FEATURE_CHANGED(dev, features, NETIF_F_HW_VLAN_CTAG_RX) &&
3390 !DEV_FEATURE_CHANGED(dev, features, NETIF_F_RXFCS))
3391 return 0; /* Nothing to change */
3393 if (DEV_FEATURE_CHANGED(dev, features, NETIF_F_HW_VLAN_CTAG_RX) &&
3394 (features & NETIF_F_HW_VLAN_CTAG_RX) &&
3395 (priv->hwtstamp_config.rx_filter != HWTSTAMP_FILTER_NONE)) {
3396 en_warn(priv, "Can't turn ON rx vlan offload while time-stamping rx filter is ON\n");
3400 tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
3404 mutex_lock(&mdev->state_lock);
3406 memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile));
3407 memcpy(&new_prof.hwtstamp_config, &ts_config, sizeof(ts_config));
3409 err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof);
3413 if (priv->port_up) {
3415 mlx4_en_stop_port(dev, 1);
3418 en_warn(priv, "Changing device configuration rx filter(%x) rx vlan(%x)\n",
3419 ts_config.rx_filter,
3420 !!(features & NETIF_F_HW_VLAN_CTAG_RX));
3422 mlx4_en_safe_replace_resources(priv, tmp);
3424 if (DEV_FEATURE_CHANGED(dev, features, NETIF_F_HW_VLAN_CTAG_RX)) {
3425 if (features & NETIF_F_HW_VLAN_CTAG_RX)
3426 dev->features |= NETIF_F_HW_VLAN_CTAG_RX;
3428 dev->features &= ~NETIF_F_HW_VLAN_CTAG_RX;
3429 } else if (ts_config.rx_filter == HWTSTAMP_FILTER_NONE) {
3430 /* RX time-stamping is OFF, update the RX vlan offload
3431 * to the latest wanted state
3433 if (dev->wanted_features & NETIF_F_HW_VLAN_CTAG_RX)
3434 dev->features |= NETIF_F_HW_VLAN_CTAG_RX;
3436 dev->features &= ~NETIF_F_HW_VLAN_CTAG_RX;
3439 if (DEV_FEATURE_CHANGED(dev, features, NETIF_F_RXFCS)) {
3440 if (features & NETIF_F_RXFCS)
3441 dev->features |= NETIF_F_RXFCS;
3443 dev->features &= ~NETIF_F_RXFCS;
3446 /* RX vlan offload and RX time-stamping can't co-exist !
3447 * Regardless of the caller's choice,
3448 * Turn Off RX vlan offload in case of time-stamping is ON
3450 if (ts_config.rx_filter != HWTSTAMP_FILTER_NONE) {
3451 if (dev->features & NETIF_F_HW_VLAN_CTAG_RX)
3452 en_warn(priv, "Turning off RX vlan offload since RX time-stamping is ON\n");
3453 dev->features &= ~NETIF_F_HW_VLAN_CTAG_RX;
3457 err = mlx4_en_start_port(dev);
3459 en_err(priv, "Failed starting port\n");
3463 err = mlx4_en_moderation_update(priv);
3465 mutex_unlock(&mdev->state_lock);
3468 netdev_features_change(dev);