2 * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/etherdevice.h>
35 #include <linux/tcp.h>
36 #include <linux/if_vlan.h>
37 #include <linux/delay.h>
38 #include <linux/slab.h>
39 #include <linux/hash.h>
41 #include <net/busy_poll.h>
42 #include <net/vxlan.h>
44 #include <linux/mlx4/driver.h>
45 #include <linux/mlx4/device.h>
46 #include <linux/mlx4/cmd.h>
47 #include <linux/mlx4/cq.h>
52 int mlx4_en_setup_tc(struct net_device *dev, u8 up)
54 struct mlx4_en_priv *priv = netdev_priv(dev);
56 unsigned int offset = 0;
58 if (up && up != MLX4_EN_NUM_UP)
61 netdev_set_num_tc(dev, up);
63 /* Partition Tx queues evenly amongst UP's */
64 for (i = 0; i < up; i++) {
65 netdev_set_tc_queue(dev, i, priv->num_tx_rings_p_up, offset);
66 offset += priv->num_tx_rings_p_up;
72 #ifdef CONFIG_NET_RX_BUSY_POLL
73 /* must be called with local_bh_disable()d */
74 static int mlx4_en_low_latency_recv(struct napi_struct *napi)
76 struct mlx4_en_cq *cq = container_of(napi, struct mlx4_en_cq, napi);
77 struct net_device *dev = cq->dev;
78 struct mlx4_en_priv *priv = netdev_priv(dev);
79 struct mlx4_en_rx_ring *rx_ring = priv->rx_ring[cq->ring];
83 return LL_FLUSH_FAILED;
85 if (!mlx4_en_cq_lock_poll(cq))
88 done = mlx4_en_process_rx_cq(dev, cq, 4);
90 rx_ring->cleaned += done;
94 mlx4_en_cq_unlock_poll(cq);
98 #endif /* CONFIG_NET_RX_BUSY_POLL */
100 #ifdef CONFIG_RFS_ACCEL
102 struct mlx4_en_filter {
103 struct list_head next;
104 struct work_struct work;
113 struct mlx4_en_priv *priv;
114 u32 flow_id; /* RFS infrastructure id */
115 int id; /* mlx4_en driver id */
116 u64 reg_id; /* Flow steering API id */
117 u8 activated; /* Used to prevent expiry before filter
120 struct hlist_node filter_chain;
123 static void mlx4_en_filter_rfs_expire(struct mlx4_en_priv *priv);
125 static enum mlx4_net_trans_rule_id mlx4_ip_proto_to_trans_rule_id(u8 ip_proto)
129 return MLX4_NET_TRANS_RULE_ID_UDP;
131 return MLX4_NET_TRANS_RULE_ID_TCP;
133 return MLX4_NET_TRANS_RULE_NUM;
137 static void mlx4_en_filter_work(struct work_struct *work)
139 struct mlx4_en_filter *filter = container_of(work,
140 struct mlx4_en_filter,
142 struct mlx4_en_priv *priv = filter->priv;
143 struct mlx4_spec_list spec_tcp_udp = {
144 .id = mlx4_ip_proto_to_trans_rule_id(filter->ip_proto),
147 .dst_port = filter->dst_port,
148 .dst_port_msk = (__force __be16)-1,
149 .src_port = filter->src_port,
150 .src_port_msk = (__force __be16)-1,
154 struct mlx4_spec_list spec_ip = {
155 .id = MLX4_NET_TRANS_RULE_ID_IPV4,
158 .dst_ip = filter->dst_ip,
159 .dst_ip_msk = (__force __be32)-1,
160 .src_ip = filter->src_ip,
161 .src_ip_msk = (__force __be32)-1,
165 struct mlx4_spec_list spec_eth = {
166 .id = MLX4_NET_TRANS_RULE_ID_ETH,
168 struct mlx4_net_trans_rule rule = {
169 .list = LIST_HEAD_INIT(rule.list),
170 .queue_mode = MLX4_NET_TRANS_Q_LIFO,
173 .promisc_mode = MLX4_FS_REGULAR,
175 .priority = MLX4_DOMAIN_RFS,
178 __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
180 if (spec_tcp_udp.id >= MLX4_NET_TRANS_RULE_NUM) {
181 en_warn(priv, "RFS: ignoring unsupported ip protocol (%d)\n",
185 list_add_tail(&spec_eth.list, &rule.list);
186 list_add_tail(&spec_ip.list, &rule.list);
187 list_add_tail(&spec_tcp_udp.list, &rule.list);
189 rule.qpn = priv->rss_map.qps[filter->rxq_index].qpn;
190 memcpy(spec_eth.eth.dst_mac, priv->dev->dev_addr, ETH_ALEN);
191 memcpy(spec_eth.eth.dst_mac_msk, &mac_mask, ETH_ALEN);
193 filter->activated = 0;
195 if (filter->reg_id) {
196 rc = mlx4_flow_detach(priv->mdev->dev, filter->reg_id);
197 if (rc && rc != -ENOENT)
198 en_err(priv, "Error detaching flow. rc = %d\n", rc);
201 rc = mlx4_flow_attach(priv->mdev->dev, &rule, &filter->reg_id);
203 en_err(priv, "Error attaching flow. err = %d\n", rc);
206 mlx4_en_filter_rfs_expire(priv);
208 filter->activated = 1;
211 static inline struct hlist_head *
212 filter_hash_bucket(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip,
213 __be16 src_port, __be16 dst_port)
218 l = (__force unsigned long)src_port |
219 ((__force unsigned long)dst_port << 2);
220 l ^= (__force unsigned long)(src_ip ^ dst_ip);
222 bucket_idx = hash_long(l, MLX4_EN_FILTER_HASH_SHIFT);
224 return &priv->filter_hash[bucket_idx];
227 static struct mlx4_en_filter *
228 mlx4_en_filter_alloc(struct mlx4_en_priv *priv, int rxq_index, __be32 src_ip,
229 __be32 dst_ip, u8 ip_proto, __be16 src_port,
230 __be16 dst_port, u32 flow_id)
232 struct mlx4_en_filter *filter = NULL;
234 filter = kzalloc(sizeof(struct mlx4_en_filter), GFP_ATOMIC);
239 filter->rxq_index = rxq_index;
240 INIT_WORK(&filter->work, mlx4_en_filter_work);
242 filter->src_ip = src_ip;
243 filter->dst_ip = dst_ip;
244 filter->ip_proto = ip_proto;
245 filter->src_port = src_port;
246 filter->dst_port = dst_port;
248 filter->flow_id = flow_id;
250 filter->id = priv->last_filter_id++ % RPS_NO_FILTER;
252 list_add_tail(&filter->next, &priv->filters);
253 hlist_add_head(&filter->filter_chain,
254 filter_hash_bucket(priv, src_ip, dst_ip, src_port,
260 static void mlx4_en_filter_free(struct mlx4_en_filter *filter)
262 struct mlx4_en_priv *priv = filter->priv;
265 list_del(&filter->next);
267 rc = mlx4_flow_detach(priv->mdev->dev, filter->reg_id);
268 if (rc && rc != -ENOENT)
269 en_err(priv, "Error detaching flow. rc = %d\n", rc);
274 static inline struct mlx4_en_filter *
275 mlx4_en_filter_find(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip,
276 u8 ip_proto, __be16 src_port, __be16 dst_port)
278 struct mlx4_en_filter *filter;
279 struct mlx4_en_filter *ret = NULL;
281 hlist_for_each_entry(filter,
282 filter_hash_bucket(priv, src_ip, dst_ip,
285 if (filter->src_ip == src_ip &&
286 filter->dst_ip == dst_ip &&
287 filter->ip_proto == ip_proto &&
288 filter->src_port == src_port &&
289 filter->dst_port == dst_port) {
299 mlx4_en_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
300 u16 rxq_index, u32 flow_id)
302 struct mlx4_en_priv *priv = netdev_priv(net_dev);
303 struct mlx4_en_filter *filter;
304 const struct iphdr *ip;
311 int nhoff = skb_network_offset(skb);
314 if (skb->encapsulation)
315 return -EPROTONOSUPPORT;
317 if (skb->protocol != htons(ETH_P_IP))
318 return -EPROTONOSUPPORT;
320 ip = (const struct iphdr *)(skb->data + nhoff);
321 if (ip_is_fragment(ip))
322 return -EPROTONOSUPPORT;
324 if ((ip->protocol != IPPROTO_TCP) && (ip->protocol != IPPROTO_UDP))
325 return -EPROTONOSUPPORT;
326 ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl);
328 ip_proto = ip->protocol;
334 spin_lock_bh(&priv->filters_lock);
335 filter = mlx4_en_filter_find(priv, src_ip, dst_ip, ip_proto,
338 if (filter->rxq_index == rxq_index)
341 filter->rxq_index = rxq_index;
343 filter = mlx4_en_filter_alloc(priv, rxq_index,
344 src_ip, dst_ip, ip_proto,
345 src_port, dst_port, flow_id);
352 queue_work(priv->mdev->workqueue, &filter->work);
357 spin_unlock_bh(&priv->filters_lock);
362 void mlx4_en_cleanup_filters(struct mlx4_en_priv *priv)
364 struct mlx4_en_filter *filter, *tmp;
367 spin_lock_bh(&priv->filters_lock);
368 list_for_each_entry_safe(filter, tmp, &priv->filters, next) {
369 list_move(&filter->next, &del_list);
370 hlist_del(&filter->filter_chain);
372 spin_unlock_bh(&priv->filters_lock);
374 list_for_each_entry_safe(filter, tmp, &del_list, next) {
375 cancel_work_sync(&filter->work);
376 mlx4_en_filter_free(filter);
380 static void mlx4_en_filter_rfs_expire(struct mlx4_en_priv *priv)
382 struct mlx4_en_filter *filter = NULL, *tmp, *last_filter = NULL;
386 spin_lock_bh(&priv->filters_lock);
387 list_for_each_entry_safe(filter, tmp, &priv->filters, next) {
388 if (i > MLX4_EN_FILTER_EXPIRY_QUOTA)
391 if (filter->activated &&
392 !work_pending(&filter->work) &&
393 rps_may_expire_flow(priv->dev,
394 filter->rxq_index, filter->flow_id,
396 list_move(&filter->next, &del_list);
397 hlist_del(&filter->filter_chain);
399 last_filter = filter;
404 if (last_filter && (&last_filter->next != priv->filters.next))
405 list_move(&priv->filters, &last_filter->next);
407 spin_unlock_bh(&priv->filters_lock);
409 list_for_each_entry_safe(filter, tmp, &del_list, next)
410 mlx4_en_filter_free(filter);
414 static int mlx4_en_vlan_rx_add_vid(struct net_device *dev,
415 __be16 proto, u16 vid)
417 struct mlx4_en_priv *priv = netdev_priv(dev);
418 struct mlx4_en_dev *mdev = priv->mdev;
422 en_dbg(HW, priv, "adding VLAN:%d\n", vid);
424 set_bit(vid, priv->active_vlans);
426 /* Add VID to port VLAN filter */
427 mutex_lock(&mdev->state_lock);
428 if (mdev->device_up && priv->port_up) {
429 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
431 en_err(priv, "Failed configuring VLAN filter\n");
435 err = mlx4_register_vlan(mdev->dev, priv->port, vid, &idx);
437 en_dbg(HW, priv, "Failed adding vlan %d\n", vid);
440 mutex_unlock(&mdev->state_lock);
444 static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev,
445 __be16 proto, u16 vid)
447 struct mlx4_en_priv *priv = netdev_priv(dev);
448 struct mlx4_en_dev *mdev = priv->mdev;
451 en_dbg(HW, priv, "Killing VID:%d\n", vid);
453 clear_bit(vid, priv->active_vlans);
455 /* Remove VID from port VLAN filter */
456 mutex_lock(&mdev->state_lock);
457 mlx4_unregister_vlan(mdev->dev, priv->port, vid);
459 if (mdev->device_up && priv->port_up) {
460 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
462 en_err(priv, "Failed configuring VLAN filter\n");
464 mutex_unlock(&mdev->state_lock);
469 static void mlx4_en_u64_to_mac(unsigned char dst_mac[ETH_ALEN + 2], u64 src_mac)
472 for (i = ETH_ALEN - 1; i >= 0; --i) {
473 dst_mac[i] = src_mac & 0xff;
476 memset(&dst_mac[ETH_ALEN], 0, 2);
480 static int mlx4_en_tunnel_steer_add(struct mlx4_en_priv *priv, unsigned char *addr,
481 int qpn, u64 *reg_id)
485 if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN ||
486 priv->mdev->dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC)
487 return 0; /* do nothing */
489 err = mlx4_tunnel_steer_add(priv->mdev->dev, addr, priv->port, qpn,
490 MLX4_DOMAIN_NIC, reg_id);
492 en_err(priv, "failed to add vxlan steering rule, err %d\n", err);
495 en_dbg(DRV, priv, "added vxlan steering rule, mac %pM reg_id %llx\n", addr, *reg_id);
500 static int mlx4_en_uc_steer_add(struct mlx4_en_priv *priv,
501 unsigned char *mac, int *qpn, u64 *reg_id)
503 struct mlx4_en_dev *mdev = priv->mdev;
504 struct mlx4_dev *dev = mdev->dev;
507 switch (dev->caps.steering_mode) {
508 case MLX4_STEERING_MODE_B0: {
513 memcpy(&gid[10], mac, ETH_ALEN);
516 err = mlx4_unicast_attach(dev, &qp, gid, 0, MLX4_PROT_ETH);
519 case MLX4_STEERING_MODE_DEVICE_MANAGED: {
520 struct mlx4_spec_list spec_eth = { {NULL} };
521 __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
523 struct mlx4_net_trans_rule rule = {
524 .queue_mode = MLX4_NET_TRANS_Q_FIFO,
527 .promisc_mode = MLX4_FS_REGULAR,
528 .priority = MLX4_DOMAIN_NIC,
531 rule.port = priv->port;
533 INIT_LIST_HEAD(&rule.list);
535 spec_eth.id = MLX4_NET_TRANS_RULE_ID_ETH;
536 memcpy(spec_eth.eth.dst_mac, mac, ETH_ALEN);
537 memcpy(spec_eth.eth.dst_mac_msk, &mac_mask, ETH_ALEN);
538 list_add_tail(&spec_eth.list, &rule.list);
540 err = mlx4_flow_attach(dev, &rule, reg_id);
547 en_warn(priv, "Failed Attaching Unicast\n");
552 static void mlx4_en_uc_steer_release(struct mlx4_en_priv *priv,
553 unsigned char *mac, int qpn, u64 reg_id)
555 struct mlx4_en_dev *mdev = priv->mdev;
556 struct mlx4_dev *dev = mdev->dev;
558 switch (dev->caps.steering_mode) {
559 case MLX4_STEERING_MODE_B0: {
564 memcpy(&gid[10], mac, ETH_ALEN);
567 mlx4_unicast_detach(dev, &qp, gid, MLX4_PROT_ETH);
570 case MLX4_STEERING_MODE_DEVICE_MANAGED: {
571 mlx4_flow_detach(dev, reg_id);
575 en_err(priv, "Invalid steering mode.\n");
579 static int mlx4_en_get_qp(struct mlx4_en_priv *priv)
581 struct mlx4_en_dev *mdev = priv->mdev;
582 struct mlx4_dev *dev = mdev->dev;
585 int *qpn = &priv->base_qpn;
586 u64 mac = mlx4_mac_to_u64(priv->dev->dev_addr);
588 en_dbg(DRV, priv, "Registering MAC: %pM for adding\n",
589 priv->dev->dev_addr);
590 index = mlx4_register_mac(dev, priv->port, mac);
593 en_err(priv, "Failed adding MAC: %pM\n",
594 priv->dev->dev_addr);
598 if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) {
599 int base_qpn = mlx4_get_base_qpn(dev, priv->port);
600 *qpn = base_qpn + index;
604 err = mlx4_qp_reserve_range(dev, 1, 1, qpn, MLX4_RESERVE_A0_QP);
605 en_dbg(DRV, priv, "Reserved qp %d\n", *qpn);
607 en_err(priv, "Failed to reserve qp for mac registration\n");
608 mlx4_unregister_mac(dev, priv->port, mac);
615 static void mlx4_en_put_qp(struct mlx4_en_priv *priv)
617 struct mlx4_en_dev *mdev = priv->mdev;
618 struct mlx4_dev *dev = mdev->dev;
619 int qpn = priv->base_qpn;
621 if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) {
622 u64 mac = mlx4_mac_to_u64(priv->dev->dev_addr);
623 en_dbg(DRV, priv, "Registering MAC: %pM for deleting\n",
624 priv->dev->dev_addr);
625 mlx4_unregister_mac(dev, priv->port, mac);
627 en_dbg(DRV, priv, "Releasing qp: port %d, qpn %d\n",
629 mlx4_qp_release_range(dev, qpn, 1);
630 priv->flags &= ~MLX4_EN_FLAG_FORCE_PROMISC;
634 static int mlx4_en_replace_mac(struct mlx4_en_priv *priv, int qpn,
635 unsigned char *new_mac, unsigned char *prev_mac)
637 struct mlx4_en_dev *mdev = priv->mdev;
638 struct mlx4_dev *dev = mdev->dev;
640 u64 new_mac_u64 = mlx4_mac_to_u64(new_mac);
642 if (dev->caps.steering_mode != MLX4_STEERING_MODE_A0) {
643 struct hlist_head *bucket;
644 unsigned int mac_hash;
645 struct mlx4_mac_entry *entry;
646 struct hlist_node *tmp;
647 u64 prev_mac_u64 = mlx4_mac_to_u64(prev_mac);
649 bucket = &priv->mac_hash[prev_mac[MLX4_EN_MAC_HASH_IDX]];
650 hlist_for_each_entry_safe(entry, tmp, bucket, hlist) {
651 if (ether_addr_equal_64bits(entry->mac, prev_mac)) {
652 mlx4_en_uc_steer_release(priv, entry->mac,
654 mlx4_unregister_mac(dev, priv->port,
656 hlist_del_rcu(&entry->hlist);
658 memcpy(entry->mac, new_mac, ETH_ALEN);
660 mac_hash = new_mac[MLX4_EN_MAC_HASH_IDX];
661 hlist_add_head_rcu(&entry->hlist,
662 &priv->mac_hash[mac_hash]);
663 mlx4_register_mac(dev, priv->port, new_mac_u64);
664 err = mlx4_en_uc_steer_add(priv, new_mac,
669 if (priv->tunnel_reg_id) {
670 mlx4_flow_detach(priv->mdev->dev, priv->tunnel_reg_id);
671 priv->tunnel_reg_id = 0;
673 err = mlx4_en_tunnel_steer_add(priv, new_mac, qpn,
674 &priv->tunnel_reg_id);
681 return __mlx4_replace_mac(dev, priv->port, qpn, new_mac_u64);
684 static int mlx4_en_do_set_mac(struct mlx4_en_priv *priv,
685 unsigned char new_mac[ETH_ALEN + 2])
690 /* Remove old MAC and insert the new one */
691 err = mlx4_en_replace_mac(priv, priv->base_qpn,
692 new_mac, priv->current_mac);
694 en_err(priv, "Failed changing HW MAC address\n");
696 en_dbg(HW, priv, "Port is down while registering mac, exiting...\n");
699 memcpy(priv->current_mac, new_mac, sizeof(priv->current_mac));
704 static int mlx4_en_set_mac(struct net_device *dev, void *addr)
706 struct mlx4_en_priv *priv = netdev_priv(dev);
707 struct mlx4_en_dev *mdev = priv->mdev;
708 struct sockaddr *saddr = addr;
709 unsigned char new_mac[ETH_ALEN + 2];
712 if (!is_valid_ether_addr(saddr->sa_data))
713 return -EADDRNOTAVAIL;
715 mutex_lock(&mdev->state_lock);
716 memcpy(new_mac, saddr->sa_data, ETH_ALEN);
717 err = mlx4_en_do_set_mac(priv, new_mac);
719 memcpy(dev->dev_addr, saddr->sa_data, ETH_ALEN);
720 mutex_unlock(&mdev->state_lock);
725 static void mlx4_en_clear_list(struct net_device *dev)
727 struct mlx4_en_priv *priv = netdev_priv(dev);
728 struct mlx4_en_mc_list *tmp, *mc_to_del;
730 list_for_each_entry_safe(mc_to_del, tmp, &priv->mc_list, list) {
731 list_del(&mc_to_del->list);
736 static void mlx4_en_cache_mclist(struct net_device *dev)
738 struct mlx4_en_priv *priv = netdev_priv(dev);
739 struct netdev_hw_addr *ha;
740 struct mlx4_en_mc_list *tmp;
742 mlx4_en_clear_list(dev);
743 netdev_for_each_mc_addr(ha, dev) {
744 tmp = kzalloc(sizeof(struct mlx4_en_mc_list), GFP_ATOMIC);
746 mlx4_en_clear_list(dev);
749 memcpy(tmp->addr, ha->addr, ETH_ALEN);
750 list_add_tail(&tmp->list, &priv->mc_list);
754 static void update_mclist_flags(struct mlx4_en_priv *priv,
755 struct list_head *dst,
756 struct list_head *src)
758 struct mlx4_en_mc_list *dst_tmp, *src_tmp, *new_mc;
761 /* Find all the entries that should be removed from dst,
762 * These are the entries that are not found in src
764 list_for_each_entry(dst_tmp, dst, list) {
766 list_for_each_entry(src_tmp, src, list) {
767 if (ether_addr_equal(dst_tmp->addr, src_tmp->addr)) {
773 dst_tmp->action = MCLIST_REM;
776 /* Add entries that exist in src but not in dst
777 * mark them as need to add
779 list_for_each_entry(src_tmp, src, list) {
781 list_for_each_entry(dst_tmp, dst, list) {
782 if (ether_addr_equal(dst_tmp->addr, src_tmp->addr)) {
783 dst_tmp->action = MCLIST_NONE;
789 new_mc = kmemdup(src_tmp,
790 sizeof(struct mlx4_en_mc_list),
795 new_mc->action = MCLIST_ADD;
796 list_add_tail(&new_mc->list, dst);
801 static void mlx4_en_set_rx_mode(struct net_device *dev)
803 struct mlx4_en_priv *priv = netdev_priv(dev);
808 queue_work(priv->mdev->workqueue, &priv->rx_mode_task);
811 static void mlx4_en_set_promisc_mode(struct mlx4_en_priv *priv,
812 struct mlx4_en_dev *mdev)
816 if (!(priv->flags & MLX4_EN_FLAG_PROMISC)) {
817 if (netif_msg_rx_status(priv))
818 en_warn(priv, "Entering promiscuous mode\n");
819 priv->flags |= MLX4_EN_FLAG_PROMISC;
821 /* Enable promiscouos mode */
822 switch (mdev->dev->caps.steering_mode) {
823 case MLX4_STEERING_MODE_DEVICE_MANAGED:
824 err = mlx4_flow_steer_promisc_add(mdev->dev,
827 MLX4_FS_ALL_DEFAULT);
829 en_err(priv, "Failed enabling promiscuous mode\n");
830 priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
833 case MLX4_STEERING_MODE_B0:
834 err = mlx4_unicast_promisc_add(mdev->dev,
838 en_err(priv, "Failed enabling unicast promiscuous mode\n");
840 /* Add the default qp number as multicast
843 if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) {
844 err = mlx4_multicast_promisc_add(mdev->dev,
848 en_err(priv, "Failed enabling multicast promiscuous mode\n");
849 priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
853 case MLX4_STEERING_MODE_A0:
854 err = mlx4_SET_PORT_qpn_calc(mdev->dev,
859 en_err(priv, "Failed enabling promiscuous mode\n");
863 /* Disable port multicast filter (unconditionally) */
864 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
865 0, MLX4_MCAST_DISABLE);
867 en_err(priv, "Failed disabling multicast filter\n");
871 static void mlx4_en_clear_promisc_mode(struct mlx4_en_priv *priv,
872 struct mlx4_en_dev *mdev)
876 if (netif_msg_rx_status(priv))
877 en_warn(priv, "Leaving promiscuous mode\n");
878 priv->flags &= ~MLX4_EN_FLAG_PROMISC;
880 /* Disable promiscouos mode */
881 switch (mdev->dev->caps.steering_mode) {
882 case MLX4_STEERING_MODE_DEVICE_MANAGED:
883 err = mlx4_flow_steer_promisc_remove(mdev->dev,
885 MLX4_FS_ALL_DEFAULT);
887 en_err(priv, "Failed disabling promiscuous mode\n");
888 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
891 case MLX4_STEERING_MODE_B0:
892 err = mlx4_unicast_promisc_remove(mdev->dev,
896 en_err(priv, "Failed disabling unicast promiscuous mode\n");
897 /* Disable Multicast promisc */
898 if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
899 err = mlx4_multicast_promisc_remove(mdev->dev,
903 en_err(priv, "Failed disabling multicast promiscuous mode\n");
904 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
908 case MLX4_STEERING_MODE_A0:
909 err = mlx4_SET_PORT_qpn_calc(mdev->dev,
913 en_err(priv, "Failed disabling promiscuous mode\n");
918 static void mlx4_en_do_multicast(struct mlx4_en_priv *priv,
919 struct net_device *dev,
920 struct mlx4_en_dev *mdev)
922 struct mlx4_en_mc_list *mclist, *tmp;
924 u8 mc_list[16] = {0};
927 /* Enable/disable the multicast filter according to IFF_ALLMULTI */
928 if (dev->flags & IFF_ALLMULTI) {
929 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
930 0, MLX4_MCAST_DISABLE);
932 en_err(priv, "Failed disabling multicast filter\n");
934 /* Add the default qp number as multicast promisc */
935 if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) {
936 switch (mdev->dev->caps.steering_mode) {
937 case MLX4_STEERING_MODE_DEVICE_MANAGED:
938 err = mlx4_flow_steer_promisc_add(mdev->dev,
944 case MLX4_STEERING_MODE_B0:
945 err = mlx4_multicast_promisc_add(mdev->dev,
950 case MLX4_STEERING_MODE_A0:
954 en_err(priv, "Failed entering multicast promisc mode\n");
955 priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
958 /* Disable Multicast promisc */
959 if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
960 switch (mdev->dev->caps.steering_mode) {
961 case MLX4_STEERING_MODE_DEVICE_MANAGED:
962 err = mlx4_flow_steer_promisc_remove(mdev->dev,
967 case MLX4_STEERING_MODE_B0:
968 err = mlx4_multicast_promisc_remove(mdev->dev,
973 case MLX4_STEERING_MODE_A0:
977 en_err(priv, "Failed disabling multicast promiscuous mode\n");
978 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
981 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
982 0, MLX4_MCAST_DISABLE);
984 en_err(priv, "Failed disabling multicast filter\n");
986 /* Flush mcast filter and init it with broadcast address */
987 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, ETH_BCAST,
988 1, MLX4_MCAST_CONFIG);
990 /* Update multicast list - we cache all addresses so they won't
991 * change while HW is updated holding the command semaphor */
992 netif_addr_lock_bh(dev);
993 mlx4_en_cache_mclist(dev);
994 netif_addr_unlock_bh(dev);
995 list_for_each_entry(mclist, &priv->mc_list, list) {
996 mcast_addr = mlx4_mac_to_u64(mclist->addr);
997 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port,
998 mcast_addr, 0, MLX4_MCAST_CONFIG);
1000 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
1001 0, MLX4_MCAST_ENABLE);
1003 en_err(priv, "Failed enabling multicast filter\n");
1005 update_mclist_flags(priv, &priv->curr_list, &priv->mc_list);
1006 list_for_each_entry_safe(mclist, tmp, &priv->curr_list, list) {
1007 if (mclist->action == MCLIST_REM) {
1008 /* detach this address and delete from list */
1009 memcpy(&mc_list[10], mclist->addr, ETH_ALEN);
1010 mc_list[5] = priv->port;
1011 err = mlx4_multicast_detach(mdev->dev,
1012 &priv->rss_map.indir_qp,
1017 en_err(priv, "Fail to detach multicast address\n");
1019 if (mclist->tunnel_reg_id) {
1020 err = mlx4_flow_detach(priv->mdev->dev, mclist->tunnel_reg_id);
1022 en_err(priv, "Failed to detach multicast address\n");
1025 /* remove from list */
1026 list_del(&mclist->list);
1028 } else if (mclist->action == MCLIST_ADD) {
1029 /* attach the address */
1030 memcpy(&mc_list[10], mclist->addr, ETH_ALEN);
1031 /* needed for B0 steering support */
1032 mc_list[5] = priv->port;
1033 err = mlx4_multicast_attach(mdev->dev,
1034 &priv->rss_map.indir_qp,
1040 en_err(priv, "Fail to attach multicast address\n");
1042 err = mlx4_en_tunnel_steer_add(priv, &mc_list[10], priv->base_qpn,
1043 &mclist->tunnel_reg_id);
1045 en_err(priv, "Failed to attach multicast address\n");
1051 static void mlx4_en_do_uc_filter(struct mlx4_en_priv *priv,
1052 struct net_device *dev,
1053 struct mlx4_en_dev *mdev)
1055 struct netdev_hw_addr *ha;
1056 struct mlx4_mac_entry *entry;
1057 struct hlist_node *tmp;
1061 struct hlist_head *bucket;
1066 /* Note that we do not need to protect our mac_hash traversal with rcu,
1067 * since all modification code is protected by mdev->state_lock
1070 /* find what to remove */
1071 for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i) {
1072 bucket = &priv->mac_hash[i];
1073 hlist_for_each_entry_safe(entry, tmp, bucket, hlist) {
1075 netdev_for_each_uc_addr(ha, dev) {
1076 if (ether_addr_equal_64bits(entry->mac,
1083 /* MAC address of the port is not in uc list */
1084 if (ether_addr_equal_64bits(entry->mac,
1089 mac = mlx4_mac_to_u64(entry->mac);
1090 mlx4_en_uc_steer_release(priv, entry->mac,
1093 mlx4_unregister_mac(mdev->dev, priv->port, mac);
1095 hlist_del_rcu(&entry->hlist);
1096 kfree_rcu(entry, rcu);
1097 en_dbg(DRV, priv, "Removed MAC %pM on port:%d\n",
1098 entry->mac, priv->port);
1104 /* if we didn't remove anything, there is no use in trying to add
1105 * again once we are in a forced promisc mode state
1107 if ((priv->flags & MLX4_EN_FLAG_FORCE_PROMISC) && 0 == removed)
1110 prev_flags = priv->flags;
1111 priv->flags &= ~MLX4_EN_FLAG_FORCE_PROMISC;
1113 /* find what to add */
1114 netdev_for_each_uc_addr(ha, dev) {
1116 bucket = &priv->mac_hash[ha->addr[MLX4_EN_MAC_HASH_IDX]];
1117 hlist_for_each_entry(entry, bucket, hlist) {
1118 if (ether_addr_equal_64bits(entry->mac, ha->addr)) {
1125 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1127 en_err(priv, "Failed adding MAC %pM on port:%d (out of memory)\n",
1128 ha->addr, priv->port);
1129 priv->flags |= MLX4_EN_FLAG_FORCE_PROMISC;
1132 mac = mlx4_mac_to_u64(ha->addr);
1133 memcpy(entry->mac, ha->addr, ETH_ALEN);
1134 err = mlx4_register_mac(mdev->dev, priv->port, mac);
1136 en_err(priv, "Failed registering MAC %pM on port %d: %d\n",
1137 ha->addr, priv->port, err);
1139 priv->flags |= MLX4_EN_FLAG_FORCE_PROMISC;
1142 err = mlx4_en_uc_steer_add(priv, ha->addr,
1146 en_err(priv, "Failed adding MAC %pM on port %d: %d\n",
1147 ha->addr, priv->port, err);
1148 mlx4_unregister_mac(mdev->dev, priv->port, mac);
1150 priv->flags |= MLX4_EN_FLAG_FORCE_PROMISC;
1153 unsigned int mac_hash;
1154 en_dbg(DRV, priv, "Added MAC %pM on port:%d\n",
1155 ha->addr, priv->port);
1156 mac_hash = ha->addr[MLX4_EN_MAC_HASH_IDX];
1157 bucket = &priv->mac_hash[mac_hash];
1158 hlist_add_head_rcu(&entry->hlist, bucket);
1163 if (priv->flags & MLX4_EN_FLAG_FORCE_PROMISC) {
1164 en_warn(priv, "Forcing promiscuous mode on port:%d\n",
1166 } else if (prev_flags & MLX4_EN_FLAG_FORCE_PROMISC) {
1167 en_warn(priv, "Stop forcing promiscuous mode on port:%d\n",
1172 static void mlx4_en_do_set_rx_mode(struct work_struct *work)
1174 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
1176 struct mlx4_en_dev *mdev = priv->mdev;
1177 struct net_device *dev = priv->dev;
1179 mutex_lock(&mdev->state_lock);
1180 if (!mdev->device_up) {
1181 en_dbg(HW, priv, "Card is not up, ignoring rx mode change.\n");
1184 if (!priv->port_up) {
1185 en_dbg(HW, priv, "Port is down, ignoring rx mode change.\n");
1189 if (!netif_carrier_ok(dev)) {
1190 if (!mlx4_en_QUERY_PORT(mdev, priv->port)) {
1191 if (priv->port_state.link_state) {
1192 priv->last_link_state = MLX4_DEV_EVENT_PORT_UP;
1193 netif_carrier_on(dev);
1194 en_dbg(LINK, priv, "Link Up\n");
1199 if (dev->priv_flags & IFF_UNICAST_FLT)
1200 mlx4_en_do_uc_filter(priv, dev, mdev);
1202 /* Promsicuous mode: disable all filters */
1203 if ((dev->flags & IFF_PROMISC) ||
1204 (priv->flags & MLX4_EN_FLAG_FORCE_PROMISC)) {
1205 mlx4_en_set_promisc_mode(priv, mdev);
1209 /* Not in promiscuous mode */
1210 if (priv->flags & MLX4_EN_FLAG_PROMISC)
1211 mlx4_en_clear_promisc_mode(priv, mdev);
1213 mlx4_en_do_multicast(priv, dev, mdev);
1215 mutex_unlock(&mdev->state_lock);
1218 #ifdef CONFIG_NET_POLL_CONTROLLER
1219 static void mlx4_en_netpoll(struct net_device *dev)
1221 struct mlx4_en_priv *priv = netdev_priv(dev);
1222 struct mlx4_en_cq *cq;
1225 for (i = 0; i < priv->rx_ring_num; i++) {
1226 cq = priv->rx_cq[i];
1227 napi_schedule(&cq->napi);
1232 static int mlx4_en_set_rss_steer_rules(struct mlx4_en_priv *priv)
1236 int *qpn = &priv->base_qpn;
1237 struct mlx4_mac_entry *entry;
1239 err = mlx4_en_uc_steer_add(priv, priv->dev->dev_addr, qpn, ®_id);
1243 err = mlx4_en_tunnel_steer_add(priv, priv->dev->dev_addr, *qpn,
1244 &priv->tunnel_reg_id);
1248 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1254 memcpy(entry->mac, priv->dev->dev_addr, sizeof(entry->mac));
1255 memcpy(priv->current_mac, entry->mac, sizeof(priv->current_mac));
1256 entry->reg_id = reg_id;
1257 hlist_add_head_rcu(&entry->hlist,
1258 &priv->mac_hash[entry->mac[MLX4_EN_MAC_HASH_IDX]]);
1263 if (priv->tunnel_reg_id)
1264 mlx4_flow_detach(priv->mdev->dev, priv->tunnel_reg_id);
1267 mlx4_en_uc_steer_release(priv, priv->dev->dev_addr, *qpn, reg_id);
1271 static void mlx4_en_delete_rss_steer_rules(struct mlx4_en_priv *priv)
1275 int qpn = priv->base_qpn;
1276 struct hlist_head *bucket;
1277 struct hlist_node *tmp;
1278 struct mlx4_mac_entry *entry;
1280 for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i) {
1281 bucket = &priv->mac_hash[i];
1282 hlist_for_each_entry_safe(entry, tmp, bucket, hlist) {
1283 mac = mlx4_mac_to_u64(entry->mac);
1284 en_dbg(DRV, priv, "Registering MAC:%pM for deleting\n",
1286 mlx4_en_uc_steer_release(priv, entry->mac,
1287 qpn, entry->reg_id);
1289 mlx4_unregister_mac(priv->mdev->dev, priv->port, mac);
1290 hlist_del_rcu(&entry->hlist);
1291 kfree_rcu(entry, rcu);
1295 if (priv->tunnel_reg_id) {
1296 mlx4_flow_detach(priv->mdev->dev, priv->tunnel_reg_id);
1297 priv->tunnel_reg_id = 0;
1301 static void mlx4_en_tx_timeout(struct net_device *dev)
1303 struct mlx4_en_priv *priv = netdev_priv(dev);
1304 struct mlx4_en_dev *mdev = priv->mdev;
1307 if (netif_msg_timer(priv))
1308 en_warn(priv, "Tx timeout called on port:%d\n", priv->port);
1310 for (i = 0; i < priv->tx_ring_num; i++) {
1311 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, i)))
1313 en_warn(priv, "TX timeout on queue: %d, QP: 0x%x, CQ: 0x%x, Cons: 0x%x, Prod: 0x%x\n",
1314 i, priv->tx_ring[i]->qpn, priv->tx_ring[i]->cqn,
1315 priv->tx_ring[i]->cons, priv->tx_ring[i]->prod);
1318 priv->port_stats.tx_timeout++;
1319 if (!test_and_set_bit(MLX4_EN_STATE_FLAG_RESTARTING, &priv->state)) {
1320 en_dbg(DRV, priv, "Scheduling port restart\n");
1321 queue_work(mdev->workqueue, &priv->restart_task);
1326 static struct net_device_stats *mlx4_en_get_stats(struct net_device *dev)
1328 struct mlx4_en_priv *priv = netdev_priv(dev);
1330 spin_lock_bh(&priv->stats_lock);
1331 memcpy(&priv->ret_stats, &priv->stats, sizeof(priv->stats));
1332 spin_unlock_bh(&priv->stats_lock);
1334 return &priv->ret_stats;
1337 static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv)
1339 struct mlx4_en_cq *cq;
1342 /* If we haven't received a specific coalescing setting
1343 * (module param), we set the moderation parameters as follows:
1344 * - moder_cnt is set to the number of mtu sized packets to
1345 * satisfy our coalescing target.
1346 * - moder_time is set to a fixed value.
1348 priv->rx_frames = MLX4_EN_RX_COAL_TARGET;
1349 priv->rx_usecs = MLX4_EN_RX_COAL_TIME;
1350 priv->tx_frames = MLX4_EN_TX_COAL_PKTS;
1351 priv->tx_usecs = MLX4_EN_TX_COAL_TIME;
1352 en_dbg(INTR, priv, "Default coalesing params for mtu:%d - rx_frames:%d rx_usecs:%d\n",
1353 priv->dev->mtu, priv->rx_frames, priv->rx_usecs);
1355 /* Setup cq moderation params */
1356 for (i = 0; i < priv->rx_ring_num; i++) {
1357 cq = priv->rx_cq[i];
1358 cq->moder_cnt = priv->rx_frames;
1359 cq->moder_time = priv->rx_usecs;
1360 priv->last_moder_time[i] = MLX4_EN_AUTO_CONF;
1361 priv->last_moder_packets[i] = 0;
1362 priv->last_moder_bytes[i] = 0;
1365 for (i = 0; i < priv->tx_ring_num; i++) {
1366 cq = priv->tx_cq[i];
1367 cq->moder_cnt = priv->tx_frames;
1368 cq->moder_time = priv->tx_usecs;
1371 /* Reset auto-moderation params */
1372 priv->pkt_rate_low = MLX4_EN_RX_RATE_LOW;
1373 priv->rx_usecs_low = MLX4_EN_RX_COAL_TIME_LOW;
1374 priv->pkt_rate_high = MLX4_EN_RX_RATE_HIGH;
1375 priv->rx_usecs_high = MLX4_EN_RX_COAL_TIME_HIGH;
1376 priv->sample_interval = MLX4_EN_SAMPLE_INTERVAL;
1377 priv->adaptive_rx_coal = 1;
1378 priv->last_moder_jiffies = 0;
1379 priv->last_moder_tx_packets = 0;
1382 static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv)
1384 unsigned long period = (unsigned long) (jiffies - priv->last_moder_jiffies);
1385 struct mlx4_en_cq *cq;
1386 unsigned long packets;
1388 unsigned long avg_pkt_size;
1389 unsigned long rx_packets;
1390 unsigned long rx_bytes;
1391 unsigned long rx_pkt_diff;
1395 if (!priv->adaptive_rx_coal || period < priv->sample_interval * HZ)
1398 for (ring = 0; ring < priv->rx_ring_num; ring++) {
1399 spin_lock_bh(&priv->stats_lock);
1400 rx_packets = priv->rx_ring[ring]->packets;
1401 rx_bytes = priv->rx_ring[ring]->bytes;
1402 spin_unlock_bh(&priv->stats_lock);
1404 rx_pkt_diff = ((unsigned long) (rx_packets -
1405 priv->last_moder_packets[ring]));
1406 packets = rx_pkt_diff;
1407 rate = packets * HZ / period;
1408 avg_pkt_size = packets ? ((unsigned long) (rx_bytes -
1409 priv->last_moder_bytes[ring])) / packets : 0;
1411 /* Apply auto-moderation only when packet rate
1412 * exceeds a rate that it matters */
1413 if (rate > (MLX4_EN_RX_RATE_THRESH / priv->rx_ring_num) &&
1414 avg_pkt_size > MLX4_EN_AVG_PKT_SMALL) {
1415 if (rate < priv->pkt_rate_low)
1416 moder_time = priv->rx_usecs_low;
1417 else if (rate > priv->pkt_rate_high)
1418 moder_time = priv->rx_usecs_high;
1420 moder_time = (rate - priv->pkt_rate_low) *
1421 (priv->rx_usecs_high - priv->rx_usecs_low) /
1422 (priv->pkt_rate_high - priv->pkt_rate_low) +
1425 moder_time = priv->rx_usecs_low;
1428 if (moder_time != priv->last_moder_time[ring]) {
1429 priv->last_moder_time[ring] = moder_time;
1430 cq = priv->rx_cq[ring];
1431 cq->moder_time = moder_time;
1432 cq->moder_cnt = priv->rx_frames;
1433 err = mlx4_en_set_cq_moder(priv, cq);
1435 en_err(priv, "Failed modifying moderation for cq:%d\n",
1438 priv->last_moder_packets[ring] = rx_packets;
1439 priv->last_moder_bytes[ring] = rx_bytes;
1442 priv->last_moder_jiffies = jiffies;
1445 static void mlx4_en_do_get_stats(struct work_struct *work)
1447 struct delayed_work *delay = to_delayed_work(work);
1448 struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv,
1450 struct mlx4_en_dev *mdev = priv->mdev;
1453 mutex_lock(&mdev->state_lock);
1454 if (mdev->device_up) {
1455 if (priv->port_up) {
1456 err = mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 0);
1458 en_dbg(HW, priv, "Could not update stats\n");
1460 mlx4_en_auto_moderation(priv);
1463 queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY);
1465 if (mdev->mac_removed[MLX4_MAX_PORTS + 1 - priv->port]) {
1466 mlx4_en_do_set_mac(priv, priv->current_mac);
1467 mdev->mac_removed[MLX4_MAX_PORTS + 1 - priv->port] = 0;
1469 mutex_unlock(&mdev->state_lock);
1472 /* mlx4_en_service_task - Run service task for tasks that needed to be done
1475 static void mlx4_en_service_task(struct work_struct *work)
1477 struct delayed_work *delay = to_delayed_work(work);
1478 struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv,
1480 struct mlx4_en_dev *mdev = priv->mdev;
1482 mutex_lock(&mdev->state_lock);
1483 if (mdev->device_up) {
1484 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
1485 mlx4_en_ptp_overflow_check(mdev);
1487 mlx4_en_recover_from_oom(priv);
1488 queue_delayed_work(mdev->workqueue, &priv->service_task,
1489 SERVICE_TASK_DELAY);
1491 mutex_unlock(&mdev->state_lock);
1494 static void mlx4_en_linkstate(struct work_struct *work)
1496 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
1498 struct mlx4_en_dev *mdev = priv->mdev;
1499 int linkstate = priv->link_state;
1501 mutex_lock(&mdev->state_lock);
1502 /* If observable port state changed set carrier state and
1503 * report to system log */
1504 if (priv->last_link_state != linkstate) {
1505 if (linkstate == MLX4_DEV_EVENT_PORT_DOWN) {
1506 en_info(priv, "Link Down\n");
1507 netif_carrier_off(priv->dev);
1509 en_info(priv, "Link Up\n");
1510 netif_carrier_on(priv->dev);
1513 priv->last_link_state = linkstate;
1514 mutex_unlock(&mdev->state_lock);
1517 static int mlx4_en_init_affinity_hint(struct mlx4_en_priv *priv, int ring_idx)
1519 struct mlx4_en_rx_ring *ring = priv->rx_ring[ring_idx];
1520 int numa_node = priv->mdev->dev->numa_node;
1522 if (!zalloc_cpumask_var(&ring->affinity_mask, GFP_KERNEL))
1525 cpumask_set_cpu(cpumask_local_spread(ring_idx, numa_node),
1526 ring->affinity_mask);
1530 static void mlx4_en_free_affinity_hint(struct mlx4_en_priv *priv, int ring_idx)
1532 free_cpumask_var(priv->rx_ring[ring_idx]->affinity_mask);
1535 int mlx4_en_start_port(struct net_device *dev)
1537 struct mlx4_en_priv *priv = netdev_priv(dev);
1538 struct mlx4_en_dev *mdev = priv->mdev;
1539 struct mlx4_en_cq *cq;
1540 struct mlx4_en_tx_ring *tx_ring;
1546 u8 mc_list[16] = {0};
1548 if (priv->port_up) {
1549 en_dbg(DRV, priv, "start port called while port already up\n");
1553 INIT_LIST_HEAD(&priv->mc_list);
1554 INIT_LIST_HEAD(&priv->curr_list);
1555 INIT_LIST_HEAD(&priv->ethtool_list);
1556 memset(&priv->ethtool_rules[0], 0,
1557 sizeof(struct ethtool_flow_id) * MAX_NUM_OF_FS_RULES);
1559 /* Calculate Rx buf size */
1560 dev->mtu = min(dev->mtu, priv->max_mtu);
1561 mlx4_en_calc_rx_buf(dev);
1562 en_dbg(DRV, priv, "Rx buf size:%d\n", priv->rx_skb_size);
1564 /* Configure rx cq's and rings */
1565 err = mlx4_en_activate_rx_rings(priv);
1567 en_err(priv, "Failed to activate RX rings\n");
1570 for (i = 0; i < priv->rx_ring_num; i++) {
1571 cq = priv->rx_cq[i];
1573 mlx4_en_cq_init_lock(cq);
1575 err = mlx4_en_init_affinity_hint(priv, i);
1577 en_err(priv, "Failed preparing IRQ affinity hint\n");
1581 err = mlx4_en_activate_cq(priv, cq, i);
1583 en_err(priv, "Failed activating Rx CQ\n");
1584 mlx4_en_free_affinity_hint(priv, i);
1588 for (j = 0; j < cq->size; j++) {
1589 struct mlx4_cqe *cqe = NULL;
1591 cqe = mlx4_en_get_cqe(cq->buf, j, priv->cqe_size) +
1593 cqe->owner_sr_opcode = MLX4_CQE_OWNER_MASK;
1596 err = mlx4_en_set_cq_moder(priv, cq);
1598 en_err(priv, "Failed setting cq moderation parameters\n");
1599 mlx4_en_deactivate_cq(priv, cq);
1600 mlx4_en_free_affinity_hint(priv, i);
1603 mlx4_en_arm_cq(priv, cq);
1604 priv->rx_ring[i]->cqn = cq->mcq.cqn;
1609 en_dbg(DRV, priv, "Getting qp number for port %d\n", priv->port);
1610 err = mlx4_en_get_qp(priv);
1612 en_err(priv, "Failed getting eth qp\n");
1615 mdev->mac_removed[priv->port] = 0;
1617 priv->counter_index =
1618 mlx4_get_default_counter_index(mdev->dev, priv->port);
1620 err = mlx4_en_config_rss_steer(priv);
1622 en_err(priv, "Failed configuring rss steering\n");
1626 err = mlx4_en_create_drop_qp(priv);
1630 /* Configure tx cq's and rings */
1631 for (i = 0; i < priv->tx_ring_num; i++) {
1633 cq = priv->tx_cq[i];
1634 err = mlx4_en_activate_cq(priv, cq, i);
1636 en_err(priv, "Failed allocating Tx CQ\n");
1639 err = mlx4_en_set_cq_moder(priv, cq);
1641 en_err(priv, "Failed setting cq moderation parameters\n");
1642 mlx4_en_deactivate_cq(priv, cq);
1645 en_dbg(DRV, priv, "Resetting index of collapsed CQ:%d to -1\n", i);
1646 cq->buf->wqe_index = cpu_to_be16(0xffff);
1648 /* Configure ring */
1649 tx_ring = priv->tx_ring[i];
1650 err = mlx4_en_activate_tx_ring(priv, tx_ring, cq->mcq.cqn,
1651 i / priv->num_tx_rings_p_up);
1653 en_err(priv, "Failed allocating Tx ring\n");
1654 mlx4_en_deactivate_cq(priv, cq);
1657 tx_ring->tx_queue = netdev_get_tx_queue(dev, i);
1659 /* Arm CQ for TX completions */
1660 mlx4_en_arm_cq(priv, cq);
1662 /* Set initial ownership of all Tx TXBBs to SW (1) */
1663 for (j = 0; j < tx_ring->buf_size; j += STAMP_STRIDE)
1664 *((u32 *) (tx_ring->buf + j)) = 0xffffffff;
1668 /* Configure port */
1669 err = mlx4_SET_PORT_general(mdev->dev, priv->port,
1670 priv->rx_skb_size + ETH_FCS_LEN,
1671 priv->prof->tx_pause,
1673 priv->prof->rx_pause,
1674 priv->prof->rx_ppp);
1676 en_err(priv, "Failed setting port general configurations for port %d, with error %d\n",
1680 /* Set default qp number */
1681 err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, priv->base_qpn, 0);
1683 en_err(priv, "Failed setting default qp numbers\n");
1687 if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
1688 err = mlx4_SET_PORT_VXLAN(mdev->dev, priv->port, VXLAN_STEER_BY_OUTER_MAC, 1);
1690 en_err(priv, "Failed setting port L2 tunnel configuration, err %d\n",
1697 en_dbg(HW, priv, "Initializing port\n");
1698 err = mlx4_INIT_PORT(mdev->dev, priv->port);
1700 en_err(priv, "Failed Initializing port\n");
1704 /* Set Unicast and VXLAN steering rules */
1705 if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_A0 &&
1706 mlx4_en_set_rss_steer_rules(priv))
1707 mlx4_warn(mdev, "Failed setting steering rules\n");
1709 /* Attach rx QP to bradcast address */
1710 eth_broadcast_addr(&mc_list[10]);
1711 mc_list[5] = priv->port; /* needed for B0 steering support */
1712 if (mlx4_multicast_attach(mdev->dev, &priv->rss_map.indir_qp, mc_list,
1713 priv->port, 0, MLX4_PROT_ETH,
1714 &priv->broadcast_id))
1715 mlx4_warn(mdev, "Failed Attaching Broadcast\n");
1717 /* Must redo promiscuous mode setup. */
1718 priv->flags &= ~(MLX4_EN_FLAG_PROMISC | MLX4_EN_FLAG_MC_PROMISC);
1720 /* Schedule multicast task to populate multicast list */
1721 queue_work(mdev->workqueue, &priv->rx_mode_task);
1723 #ifdef CONFIG_MLX4_EN_VXLAN
1724 if (priv->mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
1725 vxlan_get_rx_port(dev);
1727 priv->port_up = true;
1729 /* Process all completions if exist to prevent
1730 * the queues freezing if they are full
1732 for (i = 0; i < priv->rx_ring_num; i++) {
1734 napi_schedule(&priv->rx_cq[i]->napi);
1738 clear_bit(MLX4_EN_STATE_FLAG_RESTARTING, &priv->state);
1739 netif_tx_start_all_queues(dev);
1740 netif_device_attach(dev);
1745 while (tx_index--) {
1746 mlx4_en_deactivate_tx_ring(priv, priv->tx_ring[tx_index]);
1747 mlx4_en_deactivate_cq(priv, priv->tx_cq[tx_index]);
1749 mlx4_en_destroy_drop_qp(priv);
1751 mlx4_en_release_rss_steer(priv);
1753 mlx4_en_put_qp(priv);
1755 while (rx_index--) {
1756 mlx4_en_deactivate_cq(priv, priv->rx_cq[rx_index]);
1757 mlx4_en_free_affinity_hint(priv, rx_index);
1759 for (i = 0; i < priv->rx_ring_num; i++)
1760 mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]);
1762 return err; /* need to close devices */
1766 void mlx4_en_stop_port(struct net_device *dev, int detach)
1768 struct mlx4_en_priv *priv = netdev_priv(dev);
1769 struct mlx4_en_dev *mdev = priv->mdev;
1770 struct mlx4_en_mc_list *mclist, *tmp;
1771 struct ethtool_flow_id *flow, *tmp_flow;
1773 u8 mc_list[16] = {0};
1775 if (!priv->port_up) {
1776 en_dbg(DRV, priv, "stop port called while port already down\n");
1781 mlx4_CLOSE_PORT(mdev->dev, priv->port);
1783 /* Synchronize with tx routine */
1784 netif_tx_lock_bh(dev);
1786 netif_device_detach(dev);
1787 netif_tx_stop_all_queues(dev);
1788 netif_tx_unlock_bh(dev);
1790 netif_tx_disable(dev);
1792 /* Set port as not active */
1793 priv->port_up = false;
1794 priv->counter_index = MLX4_SINK_COUNTER_INDEX(mdev->dev);
1796 /* Promsicuous mode */
1797 if (mdev->dev->caps.steering_mode ==
1798 MLX4_STEERING_MODE_DEVICE_MANAGED) {
1799 priv->flags &= ~(MLX4_EN_FLAG_PROMISC |
1800 MLX4_EN_FLAG_MC_PROMISC);
1801 mlx4_flow_steer_promisc_remove(mdev->dev,
1803 MLX4_FS_ALL_DEFAULT);
1804 mlx4_flow_steer_promisc_remove(mdev->dev,
1806 MLX4_FS_MC_DEFAULT);
1807 } else if (priv->flags & MLX4_EN_FLAG_PROMISC) {
1808 priv->flags &= ~MLX4_EN_FLAG_PROMISC;
1810 /* Disable promiscouos mode */
1811 mlx4_unicast_promisc_remove(mdev->dev, priv->base_qpn,
1814 /* Disable Multicast promisc */
1815 if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
1816 mlx4_multicast_promisc_remove(mdev->dev, priv->base_qpn,
1818 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
1822 /* Detach All multicasts */
1823 eth_broadcast_addr(&mc_list[10]);
1824 mc_list[5] = priv->port; /* needed for B0 steering support */
1825 mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp, mc_list,
1826 MLX4_PROT_ETH, priv->broadcast_id);
1827 list_for_each_entry(mclist, &priv->curr_list, list) {
1828 memcpy(&mc_list[10], mclist->addr, ETH_ALEN);
1829 mc_list[5] = priv->port;
1830 mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp,
1831 mc_list, MLX4_PROT_ETH, mclist->reg_id);
1832 if (mclist->tunnel_reg_id)
1833 mlx4_flow_detach(mdev->dev, mclist->tunnel_reg_id);
1835 mlx4_en_clear_list(dev);
1836 list_for_each_entry_safe(mclist, tmp, &priv->curr_list, list) {
1837 list_del(&mclist->list);
1841 /* Flush multicast filter */
1842 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 1, MLX4_MCAST_CONFIG);
1844 /* Remove flow steering rules for the port*/
1845 if (mdev->dev->caps.steering_mode ==
1846 MLX4_STEERING_MODE_DEVICE_MANAGED) {
1848 list_for_each_entry_safe(flow, tmp_flow,
1849 &priv->ethtool_list, list) {
1850 mlx4_flow_detach(mdev->dev, flow->id);
1851 list_del(&flow->list);
1855 mlx4_en_destroy_drop_qp(priv);
1858 for (i = 0; i < priv->tx_ring_num; i++) {
1859 mlx4_en_deactivate_tx_ring(priv, priv->tx_ring[i]);
1860 mlx4_en_deactivate_cq(priv, priv->tx_cq[i]);
1864 for (i = 0; i < priv->tx_ring_num; i++)
1865 mlx4_en_free_tx_buf(dev, priv->tx_ring[i]);
1867 if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_A0)
1868 mlx4_en_delete_rss_steer_rules(priv);
1871 mlx4_en_release_rss_steer(priv);
1873 /* Unregister Mac address for the port */
1874 mlx4_en_put_qp(priv);
1875 if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_REASSIGN_MAC_EN))
1876 mdev->mac_removed[priv->port] = 1;
1879 for (i = 0; i < priv->rx_ring_num; i++) {
1880 struct mlx4_en_cq *cq = priv->rx_cq[i];
1883 while (!mlx4_en_cq_lock_napi(cq)) {
1884 pr_info("CQ %d locked\n", i);
1889 napi_synchronize(&cq->napi);
1890 mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]);
1891 mlx4_en_deactivate_cq(priv, cq);
1893 mlx4_en_free_affinity_hint(priv, i);
1897 static void mlx4_en_restart(struct work_struct *work)
1899 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
1901 struct mlx4_en_dev *mdev = priv->mdev;
1902 struct net_device *dev = priv->dev;
1904 en_dbg(DRV, priv, "Watchdog task called for port %d\n", priv->port);
1906 mutex_lock(&mdev->state_lock);
1907 if (priv->port_up) {
1908 mlx4_en_stop_port(dev, 1);
1909 if (mlx4_en_start_port(dev))
1910 en_err(priv, "Failed restarting port %d\n", priv->port);
1912 mutex_unlock(&mdev->state_lock);
1915 static void mlx4_en_clear_stats(struct net_device *dev)
1917 struct mlx4_en_priv *priv = netdev_priv(dev);
1918 struct mlx4_en_dev *mdev = priv->mdev;
1921 if (mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 1))
1922 en_dbg(HW, priv, "Failed dumping statistics\n");
1924 memset(&priv->stats, 0, sizeof(priv->stats));
1925 memset(&priv->pstats, 0, sizeof(priv->pstats));
1926 memset(&priv->pkstats, 0, sizeof(priv->pkstats));
1927 memset(&priv->port_stats, 0, sizeof(priv->port_stats));
1928 memset(&priv->rx_flowstats, 0, sizeof(priv->rx_flowstats));
1929 memset(&priv->tx_flowstats, 0, sizeof(priv->tx_flowstats));
1930 memset(&priv->rx_priority_flowstats, 0,
1931 sizeof(priv->rx_priority_flowstats));
1932 memset(&priv->tx_priority_flowstats, 0,
1933 sizeof(priv->tx_priority_flowstats));
1934 memset(&priv->pf_stats, 0, sizeof(priv->pf_stats));
1936 for (i = 0; i < priv->tx_ring_num; i++) {
1937 priv->tx_ring[i]->bytes = 0;
1938 priv->tx_ring[i]->packets = 0;
1939 priv->tx_ring[i]->tx_csum = 0;
1941 for (i = 0; i < priv->rx_ring_num; i++) {
1942 priv->rx_ring[i]->bytes = 0;
1943 priv->rx_ring[i]->packets = 0;
1944 priv->rx_ring[i]->csum_ok = 0;
1945 priv->rx_ring[i]->csum_none = 0;
1946 priv->rx_ring[i]->csum_complete = 0;
1950 static int mlx4_en_open(struct net_device *dev)
1952 struct mlx4_en_priv *priv = netdev_priv(dev);
1953 struct mlx4_en_dev *mdev = priv->mdev;
1956 mutex_lock(&mdev->state_lock);
1958 if (!mdev->device_up) {
1959 en_err(priv, "Cannot open - device down/disabled\n");
1964 /* Reset HW statistics and SW counters */
1965 mlx4_en_clear_stats(dev);
1967 err = mlx4_en_start_port(dev);
1969 en_err(priv, "Failed starting port:%d\n", priv->port);
1972 mutex_unlock(&mdev->state_lock);
1977 static int mlx4_en_close(struct net_device *dev)
1979 struct mlx4_en_priv *priv = netdev_priv(dev);
1980 struct mlx4_en_dev *mdev = priv->mdev;
1982 en_dbg(IFDOWN, priv, "Close port called\n");
1984 mutex_lock(&mdev->state_lock);
1986 mlx4_en_stop_port(dev, 0);
1987 netif_carrier_off(dev);
1989 mutex_unlock(&mdev->state_lock);
1993 void mlx4_en_free_resources(struct mlx4_en_priv *priv)
1997 #ifdef CONFIG_RFS_ACCEL
1998 priv->dev->rx_cpu_rmap = NULL;
2001 for (i = 0; i < priv->tx_ring_num; i++) {
2002 if (priv->tx_ring && priv->tx_ring[i])
2003 mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]);
2004 if (priv->tx_cq && priv->tx_cq[i])
2005 mlx4_en_destroy_cq(priv, &priv->tx_cq[i]);
2008 for (i = 0; i < priv->rx_ring_num; i++) {
2009 if (priv->rx_ring[i])
2010 mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i],
2011 priv->prof->rx_ring_size, priv->stride);
2013 mlx4_en_destroy_cq(priv, &priv->rx_cq[i]);
2018 int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
2020 struct mlx4_en_port_profile *prof = priv->prof;
2024 /* Create tx Rings */
2025 for (i = 0; i < priv->tx_ring_num; i++) {
2026 node = cpu_to_node(i % num_online_cpus());
2027 if (mlx4_en_create_cq(priv, &priv->tx_cq[i],
2028 prof->tx_ring_size, i, TX, node))
2031 if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i],
2032 prof->tx_ring_size, TXBB_SIZE,
2037 /* Create rx Rings */
2038 for (i = 0; i < priv->rx_ring_num; i++) {
2039 node = cpu_to_node(i % num_online_cpus());
2040 if (mlx4_en_create_cq(priv, &priv->rx_cq[i],
2041 prof->rx_ring_size, i, RX, node))
2044 if (mlx4_en_create_rx_ring(priv, &priv->rx_ring[i],
2045 prof->rx_ring_size, priv->stride,
2050 #ifdef CONFIG_RFS_ACCEL
2051 priv->dev->rx_cpu_rmap = mlx4_get_cpu_rmap(priv->mdev->dev, priv->port);
2057 en_err(priv, "Failed to allocate NIC resources\n");
2058 for (i = 0; i < priv->rx_ring_num; i++) {
2059 if (priv->rx_ring[i])
2060 mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i],
2064 mlx4_en_destroy_cq(priv, &priv->rx_cq[i]);
2066 for (i = 0; i < priv->tx_ring_num; i++) {
2067 if (priv->tx_ring[i])
2068 mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]);
2070 mlx4_en_destroy_cq(priv, &priv->tx_cq[i]);
2076 void mlx4_en_destroy_netdev(struct net_device *dev)
2078 struct mlx4_en_priv *priv = netdev_priv(dev);
2079 struct mlx4_en_dev *mdev = priv->mdev;
2081 en_dbg(DRV, priv, "Destroying netdev on port:%d\n", priv->port);
2083 /* Unregister device - this will close the port if it was up */
2084 if (priv->registered)
2085 unregister_netdev(dev);
2087 if (priv->allocated)
2088 mlx4_free_hwq_res(mdev->dev, &priv->res, MLX4_EN_PAGE_SIZE);
2090 cancel_delayed_work(&priv->stats_task);
2091 cancel_delayed_work(&priv->service_task);
2092 /* flush any pending task for this netdev */
2093 flush_workqueue(mdev->workqueue);
2095 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
2096 mlx4_en_remove_timestamp(mdev);
2098 /* Detach the netdev so tasks would not attempt to access it */
2099 mutex_lock(&mdev->state_lock);
2100 mdev->pndev[priv->port] = NULL;
2101 mdev->upper[priv->port] = NULL;
2102 mutex_unlock(&mdev->state_lock);
2104 mlx4_en_free_resources(priv);
2106 kfree(priv->tx_ring);
2112 static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
2114 struct mlx4_en_priv *priv = netdev_priv(dev);
2115 struct mlx4_en_dev *mdev = priv->mdev;
2118 en_dbg(DRV, priv, "Change MTU called - current:%d new:%d\n",
2121 if ((new_mtu < MLX4_EN_MIN_MTU) || (new_mtu > priv->max_mtu)) {
2122 en_err(priv, "Bad MTU size:%d.\n", new_mtu);
2127 if (netif_running(dev)) {
2128 mutex_lock(&mdev->state_lock);
2129 if (!mdev->device_up) {
2130 /* NIC is probably restarting - let restart task reset
2132 en_dbg(DRV, priv, "Change MTU called with card down!?\n");
2134 mlx4_en_stop_port(dev, 1);
2135 err = mlx4_en_start_port(dev);
2137 en_err(priv, "Failed restarting port:%d\n",
2139 if (!test_and_set_bit(MLX4_EN_STATE_FLAG_RESTARTING,
2141 queue_work(mdev->workqueue, &priv->restart_task);
2144 mutex_unlock(&mdev->state_lock);
2149 static int mlx4_en_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
2151 struct mlx4_en_priv *priv = netdev_priv(dev);
2152 struct mlx4_en_dev *mdev = priv->mdev;
2153 struct hwtstamp_config config;
2155 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
2158 /* reserved for future extensions */
2162 /* device doesn't support time stamping */
2163 if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS))
2166 /* TX HW timestamp */
2167 switch (config.tx_type) {
2168 case HWTSTAMP_TX_OFF:
2169 case HWTSTAMP_TX_ON:
2175 /* RX HW timestamp */
2176 switch (config.rx_filter) {
2177 case HWTSTAMP_FILTER_NONE:
2179 case HWTSTAMP_FILTER_ALL:
2180 case HWTSTAMP_FILTER_SOME:
2181 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
2182 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
2183 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
2184 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
2185 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
2186 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
2187 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
2188 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
2189 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
2190 case HWTSTAMP_FILTER_PTP_V2_EVENT:
2191 case HWTSTAMP_FILTER_PTP_V2_SYNC:
2192 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
2193 config.rx_filter = HWTSTAMP_FILTER_ALL;
2199 if (mlx4_en_reset_config(dev, config, dev->features)) {
2200 config.tx_type = HWTSTAMP_TX_OFF;
2201 config.rx_filter = HWTSTAMP_FILTER_NONE;
2204 return copy_to_user(ifr->ifr_data, &config,
2205 sizeof(config)) ? -EFAULT : 0;
2208 static int mlx4_en_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
2210 struct mlx4_en_priv *priv = netdev_priv(dev);
2212 return copy_to_user(ifr->ifr_data, &priv->hwtstamp_config,
2213 sizeof(priv->hwtstamp_config)) ? -EFAULT : 0;
2216 static int mlx4_en_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2220 return mlx4_en_hwtstamp_set(dev, ifr);
2222 return mlx4_en_hwtstamp_get(dev, ifr);
2228 static netdev_features_t mlx4_en_fix_features(struct net_device *netdev,
2229 netdev_features_t features)
2231 struct mlx4_en_priv *en_priv = netdev_priv(netdev);
2232 struct mlx4_en_dev *mdev = en_priv->mdev;
2234 /* Since there is no support for separate RX C-TAG/S-TAG vlan accel
2235 * enable/disable make sure S-TAG flag is always in same state as
2238 if (features & NETIF_F_HW_VLAN_CTAG_RX &&
2239 !(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN))
2240 features |= NETIF_F_HW_VLAN_STAG_RX;
2242 features &= ~NETIF_F_HW_VLAN_STAG_RX;
2247 static int mlx4_en_set_features(struct net_device *netdev,
2248 netdev_features_t features)
2250 struct mlx4_en_priv *priv = netdev_priv(netdev);
2254 if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_RXFCS)) {
2255 en_info(priv, "Turn %s RX-FCS\n",
2256 (features & NETIF_F_RXFCS) ? "ON" : "OFF");
2260 if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_RXALL)) {
2261 u8 ignore_fcs_value = (features & NETIF_F_RXALL) ? 1 : 0;
2263 en_info(priv, "Turn %s RX-ALL\n",
2264 ignore_fcs_value ? "ON" : "OFF");
2265 ret = mlx4_SET_PORT_fcs_check(priv->mdev->dev,
2266 priv->port, ignore_fcs_value);
2271 if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_HW_VLAN_CTAG_RX)) {
2272 en_info(priv, "Turn %s RX vlan strip offload\n",
2273 (features & NETIF_F_HW_VLAN_CTAG_RX) ? "ON" : "OFF");
2277 if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_HW_VLAN_CTAG_TX))
2278 en_info(priv, "Turn %s TX vlan strip offload\n",
2279 (features & NETIF_F_HW_VLAN_CTAG_TX) ? "ON" : "OFF");
2281 if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_HW_VLAN_STAG_TX))
2282 en_info(priv, "Turn %s TX S-VLAN strip offload\n",
2283 (features & NETIF_F_HW_VLAN_STAG_TX) ? "ON" : "OFF");
2285 if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_LOOPBACK)) {
2286 en_info(priv, "Turn %s loopback\n",
2287 (features & NETIF_F_LOOPBACK) ? "ON" : "OFF");
2288 mlx4_en_update_loopback_state(netdev, features);
2292 ret = mlx4_en_reset_config(netdev, priv->hwtstamp_config,
2301 static int mlx4_en_set_vf_mac(struct net_device *dev, int queue, u8 *mac)
2303 struct mlx4_en_priv *en_priv = netdev_priv(dev);
2304 struct mlx4_en_dev *mdev = en_priv->mdev;
2305 u64 mac_u64 = mlx4_mac_to_u64(mac);
2307 if (is_multicast_ether_addr(mac))
2310 return mlx4_set_vf_mac(mdev->dev, en_priv->port, queue, mac_u64);
2313 static int mlx4_en_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos)
2315 struct mlx4_en_priv *en_priv = netdev_priv(dev);
2316 struct mlx4_en_dev *mdev = en_priv->mdev;
2318 return mlx4_set_vf_vlan(mdev->dev, en_priv->port, vf, vlan, qos);
2321 static int mlx4_en_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate,
2324 struct mlx4_en_priv *en_priv = netdev_priv(dev);
2325 struct mlx4_en_dev *mdev = en_priv->mdev;
2327 return mlx4_set_vf_rate(mdev->dev, en_priv->port, vf, min_tx_rate,
2331 static int mlx4_en_set_vf_spoofchk(struct net_device *dev, int vf, bool setting)
2333 struct mlx4_en_priv *en_priv = netdev_priv(dev);
2334 struct mlx4_en_dev *mdev = en_priv->mdev;
2336 return mlx4_set_vf_spoofchk(mdev->dev, en_priv->port, vf, setting);
2339 static int mlx4_en_get_vf_config(struct net_device *dev, int vf, struct ifla_vf_info *ivf)
2341 struct mlx4_en_priv *en_priv = netdev_priv(dev);
2342 struct mlx4_en_dev *mdev = en_priv->mdev;
2344 return mlx4_get_vf_config(mdev->dev, en_priv->port, vf, ivf);
2347 static int mlx4_en_set_vf_link_state(struct net_device *dev, int vf, int link_state)
2349 struct mlx4_en_priv *en_priv = netdev_priv(dev);
2350 struct mlx4_en_dev *mdev = en_priv->mdev;
2352 return mlx4_set_vf_link_state(mdev->dev, en_priv->port, vf, link_state);
2355 static int mlx4_en_get_vf_stats(struct net_device *dev, int vf,
2356 struct ifla_vf_stats *vf_stats)
2358 struct mlx4_en_priv *en_priv = netdev_priv(dev);
2359 struct mlx4_en_dev *mdev = en_priv->mdev;
2361 return mlx4_get_vf_stats(mdev->dev, en_priv->port, vf, vf_stats);
2364 #define PORT_ID_BYTE_LEN 8
2365 static int mlx4_en_get_phys_port_id(struct net_device *dev,
2366 struct netdev_phys_item_id *ppid)
2368 struct mlx4_en_priv *priv = netdev_priv(dev);
2369 struct mlx4_dev *mdev = priv->mdev->dev;
2371 u64 phys_port_id = mdev->caps.phys_port_id[priv->port];
2376 ppid->id_len = sizeof(phys_port_id);
2377 for (i = PORT_ID_BYTE_LEN - 1; i >= 0; --i) {
2378 ppid->id[i] = phys_port_id & 0xff;
2384 #ifdef CONFIG_MLX4_EN_VXLAN
2385 static void mlx4_en_add_vxlan_offloads(struct work_struct *work)
2388 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
2391 ret = mlx4_config_vxlan_port(priv->mdev->dev, priv->vxlan_port);
2395 ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port,
2396 VXLAN_STEER_BY_OUTER_MAC, 1);
2399 en_err(priv, "failed setting L2 tunnel configuration ret %d\n", ret);
2404 priv->dev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
2405 NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL;
2408 static void mlx4_en_del_vxlan_offloads(struct work_struct *work)
2411 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
2413 /* unset offloads */
2414 priv->dev->hw_enc_features &= ~(NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
2415 NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL);
2417 ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port,
2418 VXLAN_STEER_BY_OUTER_MAC, 0);
2420 en_err(priv, "failed setting L2 tunnel configuration ret %d\n", ret);
2422 priv->vxlan_port = 0;
2425 static void mlx4_en_add_vxlan_port(struct net_device *dev,
2426 sa_family_t sa_family, __be16 port)
2428 struct mlx4_en_priv *priv = netdev_priv(dev);
2429 __be16 current_port;
2431 if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
2434 if (sa_family == AF_INET6)
2437 current_port = priv->vxlan_port;
2438 if (current_port && current_port != port) {
2439 en_warn(priv, "vxlan port %d configured, can't add port %d\n",
2440 ntohs(current_port), ntohs(port));
2444 priv->vxlan_port = port;
2445 queue_work(priv->mdev->workqueue, &priv->vxlan_add_task);
2448 static void mlx4_en_del_vxlan_port(struct net_device *dev,
2449 sa_family_t sa_family, __be16 port)
2451 struct mlx4_en_priv *priv = netdev_priv(dev);
2452 __be16 current_port;
2454 if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
2457 if (sa_family == AF_INET6)
2460 current_port = priv->vxlan_port;
2461 if (current_port != port) {
2462 en_dbg(DRV, priv, "vxlan port %d isn't configured, ignoring\n", ntohs(port));
2466 queue_work(priv->mdev->workqueue, &priv->vxlan_del_task);
2469 static netdev_features_t mlx4_en_features_check(struct sk_buff *skb,
2470 struct net_device *dev,
2471 netdev_features_t features)
2473 features = vlan_features_check(skb, features);
2474 return vxlan_features_check(skb, features);
2478 static int mlx4_en_set_tx_maxrate(struct net_device *dev, int queue_index, u32 maxrate)
2480 struct mlx4_en_priv *priv = netdev_priv(dev);
2481 struct mlx4_en_tx_ring *tx_ring = priv->tx_ring[queue_index];
2482 struct mlx4_update_qp_params params;
2485 if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QP_RATE_LIMIT))
2488 /* rate provided to us in Mbs, check if it fits into 12 bits, if not use Gbs */
2489 if (maxrate >> 12) {
2490 params.rate_unit = MLX4_QP_RATE_LIMIT_GBS;
2491 params.rate_val = maxrate / 1000;
2492 } else if (maxrate) {
2493 params.rate_unit = MLX4_QP_RATE_LIMIT_MBS;
2494 params.rate_val = maxrate;
2495 } else { /* zero serves to revoke the QP rate-limitation */
2496 params.rate_unit = 0;
2497 params.rate_val = 0;
2500 err = mlx4_update_qp(priv->mdev->dev, tx_ring->qpn, MLX4_UPDATE_QP_RATE_LIMIT,
2505 static const struct net_device_ops mlx4_netdev_ops = {
2506 .ndo_open = mlx4_en_open,
2507 .ndo_stop = mlx4_en_close,
2508 .ndo_start_xmit = mlx4_en_xmit,
2509 .ndo_select_queue = mlx4_en_select_queue,
2510 .ndo_get_stats = mlx4_en_get_stats,
2511 .ndo_set_rx_mode = mlx4_en_set_rx_mode,
2512 .ndo_set_mac_address = mlx4_en_set_mac,
2513 .ndo_validate_addr = eth_validate_addr,
2514 .ndo_change_mtu = mlx4_en_change_mtu,
2515 .ndo_do_ioctl = mlx4_en_ioctl,
2516 .ndo_tx_timeout = mlx4_en_tx_timeout,
2517 .ndo_vlan_rx_add_vid = mlx4_en_vlan_rx_add_vid,
2518 .ndo_vlan_rx_kill_vid = mlx4_en_vlan_rx_kill_vid,
2519 #ifdef CONFIG_NET_POLL_CONTROLLER
2520 .ndo_poll_controller = mlx4_en_netpoll,
2522 .ndo_set_features = mlx4_en_set_features,
2523 .ndo_fix_features = mlx4_en_fix_features,
2524 .ndo_setup_tc = mlx4_en_setup_tc,
2525 #ifdef CONFIG_RFS_ACCEL
2526 .ndo_rx_flow_steer = mlx4_en_filter_rfs,
2528 #ifdef CONFIG_NET_RX_BUSY_POLL
2529 .ndo_busy_poll = mlx4_en_low_latency_recv,
2531 .ndo_get_phys_port_id = mlx4_en_get_phys_port_id,
2532 #ifdef CONFIG_MLX4_EN_VXLAN
2533 .ndo_add_vxlan_port = mlx4_en_add_vxlan_port,
2534 .ndo_del_vxlan_port = mlx4_en_del_vxlan_port,
2535 .ndo_features_check = mlx4_en_features_check,
2537 .ndo_set_tx_maxrate = mlx4_en_set_tx_maxrate,
2540 static const struct net_device_ops mlx4_netdev_ops_master = {
2541 .ndo_open = mlx4_en_open,
2542 .ndo_stop = mlx4_en_close,
2543 .ndo_start_xmit = mlx4_en_xmit,
2544 .ndo_select_queue = mlx4_en_select_queue,
2545 .ndo_get_stats = mlx4_en_get_stats,
2546 .ndo_set_rx_mode = mlx4_en_set_rx_mode,
2547 .ndo_set_mac_address = mlx4_en_set_mac,
2548 .ndo_validate_addr = eth_validate_addr,
2549 .ndo_change_mtu = mlx4_en_change_mtu,
2550 .ndo_tx_timeout = mlx4_en_tx_timeout,
2551 .ndo_vlan_rx_add_vid = mlx4_en_vlan_rx_add_vid,
2552 .ndo_vlan_rx_kill_vid = mlx4_en_vlan_rx_kill_vid,
2553 .ndo_set_vf_mac = mlx4_en_set_vf_mac,
2554 .ndo_set_vf_vlan = mlx4_en_set_vf_vlan,
2555 .ndo_set_vf_rate = mlx4_en_set_vf_rate,
2556 .ndo_set_vf_spoofchk = mlx4_en_set_vf_spoofchk,
2557 .ndo_set_vf_link_state = mlx4_en_set_vf_link_state,
2558 .ndo_get_vf_stats = mlx4_en_get_vf_stats,
2559 .ndo_get_vf_config = mlx4_en_get_vf_config,
2560 #ifdef CONFIG_NET_POLL_CONTROLLER
2561 .ndo_poll_controller = mlx4_en_netpoll,
2563 .ndo_set_features = mlx4_en_set_features,
2564 .ndo_fix_features = mlx4_en_fix_features,
2565 .ndo_setup_tc = mlx4_en_setup_tc,
2566 #ifdef CONFIG_RFS_ACCEL
2567 .ndo_rx_flow_steer = mlx4_en_filter_rfs,
2569 .ndo_get_phys_port_id = mlx4_en_get_phys_port_id,
2570 #ifdef CONFIG_MLX4_EN_VXLAN
2571 .ndo_add_vxlan_port = mlx4_en_add_vxlan_port,
2572 .ndo_del_vxlan_port = mlx4_en_del_vxlan_port,
2573 .ndo_features_check = mlx4_en_features_check,
2575 .ndo_set_tx_maxrate = mlx4_en_set_tx_maxrate,
2578 struct mlx4_en_bond {
2579 struct work_struct work;
2580 struct mlx4_en_priv *priv;
2582 struct mlx4_port_map port_map;
2585 static void mlx4_en_bond_work(struct work_struct *work)
2587 struct mlx4_en_bond *bond = container_of(work,
2588 struct mlx4_en_bond,
2591 struct mlx4_dev *dev = bond->priv->mdev->dev;
2593 if (bond->is_bonded) {
2594 if (!mlx4_is_bonded(dev)) {
2595 err = mlx4_bond(dev);
2597 en_err(bond->priv, "Fail to bond device\n");
2600 err = mlx4_port_map_set(dev, &bond->port_map);
2602 en_err(bond->priv, "Fail to set port map [%d][%d]: %d\n",
2603 bond->port_map.port1,
2604 bond->port_map.port2,
2607 } else if (mlx4_is_bonded(dev)) {
2608 err = mlx4_unbond(dev);
2610 en_err(bond->priv, "Fail to unbond device\n");
2612 dev_put(bond->priv->dev);
2616 static int mlx4_en_queue_bond_work(struct mlx4_en_priv *priv, int is_bonded,
2617 u8 v2p_p1, u8 v2p_p2)
2619 struct mlx4_en_bond *bond = NULL;
2621 bond = kzalloc(sizeof(*bond), GFP_ATOMIC);
2625 INIT_WORK(&bond->work, mlx4_en_bond_work);
2627 bond->is_bonded = is_bonded;
2628 bond->port_map.port1 = v2p_p1;
2629 bond->port_map.port2 = v2p_p2;
2630 dev_hold(priv->dev);
2631 queue_work(priv->mdev->workqueue, &bond->work);
2635 int mlx4_en_netdev_event(struct notifier_block *this,
2636 unsigned long event, void *ptr)
2638 struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
2640 struct mlx4_en_dev *mdev;
2641 struct mlx4_dev *dev;
2642 int i, num_eth_ports = 0;
2643 bool do_bond = true;
2644 struct mlx4_en_priv *priv;
2648 if (!net_eq(dev_net(ndev), &init_net))
2651 mdev = container_of(this, struct mlx4_en_dev, nb);
2654 /* Go into this mode only when two network devices set on two ports
2655 * of the same mlx4 device are slaves of the same bonding master
2657 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) {
2659 if (!port && (mdev->pndev[i] == ndev))
2661 mdev->upper[i] = mdev->pndev[i] ?
2662 netdev_master_upper_dev_get(mdev->pndev[i]) : NULL;
2663 /* condition not met: network device is a slave */
2664 if (!mdev->upper[i])
2666 if (num_eth_ports < 2)
2668 /* condition not met: same master */
2669 if (mdev->upper[i] != mdev->upper[i-1])
2672 /* condition not met: 2 salves */
2673 do_bond = (num_eth_ports == 2) ? do_bond : false;
2675 /* handle only events that come with enough info */
2676 if ((do_bond && (event != NETDEV_BONDING_INFO)) || !port)
2679 priv = netdev_priv(ndev);
2681 struct netdev_notifier_bonding_info *notifier_info = ptr;
2682 struct netdev_bonding_info *bonding_info =
2683 ¬ifier_info->bonding_info;
2685 /* required mode 1, 2 or 4 */
2686 if ((bonding_info->master.bond_mode != BOND_MODE_ACTIVEBACKUP) &&
2687 (bonding_info->master.bond_mode != BOND_MODE_XOR) &&
2688 (bonding_info->master.bond_mode != BOND_MODE_8023AD))
2691 /* require exactly 2 slaves */
2692 if (bonding_info->master.num_slaves != 2)
2697 if (bonding_info->master.bond_mode ==
2698 BOND_MODE_ACTIVEBACKUP) {
2699 /* in active-backup mode virtual ports are
2700 * mapped to the physical port of the active
2702 if (bonding_info->slave.state ==
2703 BOND_STATE_BACKUP) {
2711 } else { /* BOND_STATE_ACTIVE */
2720 } else { /* Active-Active */
2721 /* in active-active mode a virtual port is
2722 * mapped to the native physical port if and only
2723 * if the physical port is up */
2724 __s8 link = bonding_info->slave.link;
2730 if ((link == BOND_LINK_UP) ||
2731 (link == BOND_LINK_FAIL)) {
2736 } else { /* BOND_LINK_DOWN || BOND_LINK_BACK */
2746 mlx4_en_queue_bond_work(priv, do_bond,
2747 v2p_port1, v2p_port2);
2752 void mlx4_en_update_pfc_stats_bitmap(struct mlx4_dev *dev,
2753 struct mlx4_en_stats_bitmap *stats_bitmap,
2754 u8 rx_ppp, u8 rx_pause,
2755 u8 tx_ppp, u8 tx_pause)
2757 int last_i = NUM_MAIN_STATS + NUM_PORT_STATS + NUM_PF_STATS;
2759 if (!mlx4_is_slave(dev) &&
2760 (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FLOWSTATS_EN)) {
2761 mutex_lock(&stats_bitmap->mutex);
2762 bitmap_clear(stats_bitmap->bitmap, last_i, NUM_FLOW_STATS);
2765 bitmap_set(stats_bitmap->bitmap, last_i,
2766 NUM_FLOW_PRIORITY_STATS_RX);
2767 last_i += NUM_FLOW_PRIORITY_STATS_RX;
2769 if (rx_pause && !(rx_ppp))
2770 bitmap_set(stats_bitmap->bitmap, last_i,
2772 last_i += NUM_FLOW_STATS_RX;
2775 bitmap_set(stats_bitmap->bitmap, last_i,
2776 NUM_FLOW_PRIORITY_STATS_TX);
2777 last_i += NUM_FLOW_PRIORITY_STATS_TX;
2779 if (tx_pause && !(tx_ppp))
2780 bitmap_set(stats_bitmap->bitmap, last_i,
2782 last_i += NUM_FLOW_STATS_TX;
2784 mutex_unlock(&stats_bitmap->mutex);
2788 void mlx4_en_set_stats_bitmap(struct mlx4_dev *dev,
2789 struct mlx4_en_stats_bitmap *stats_bitmap,
2790 u8 rx_ppp, u8 rx_pause,
2791 u8 tx_ppp, u8 tx_pause)
2795 mutex_init(&stats_bitmap->mutex);
2796 bitmap_zero(stats_bitmap->bitmap, NUM_ALL_STATS);
2798 if (mlx4_is_slave(dev)) {
2799 bitmap_set(stats_bitmap->bitmap, last_i +
2800 MLX4_FIND_NETDEV_STAT(rx_packets), 1);
2801 bitmap_set(stats_bitmap->bitmap, last_i +
2802 MLX4_FIND_NETDEV_STAT(tx_packets), 1);
2803 bitmap_set(stats_bitmap->bitmap, last_i +
2804 MLX4_FIND_NETDEV_STAT(rx_bytes), 1);
2805 bitmap_set(stats_bitmap->bitmap, last_i +
2806 MLX4_FIND_NETDEV_STAT(tx_bytes), 1);
2807 bitmap_set(stats_bitmap->bitmap, last_i +
2808 MLX4_FIND_NETDEV_STAT(rx_dropped), 1);
2809 bitmap_set(stats_bitmap->bitmap, last_i +
2810 MLX4_FIND_NETDEV_STAT(tx_dropped), 1);
2812 bitmap_set(stats_bitmap->bitmap, last_i, NUM_MAIN_STATS);
2814 last_i += NUM_MAIN_STATS;
2816 bitmap_set(stats_bitmap->bitmap, last_i, NUM_PORT_STATS);
2817 last_i += NUM_PORT_STATS;
2819 if (mlx4_is_master(dev))
2820 bitmap_set(stats_bitmap->bitmap, last_i,
2822 last_i += NUM_PF_STATS;
2824 mlx4_en_update_pfc_stats_bitmap(dev, stats_bitmap,
2827 last_i += NUM_FLOW_STATS;
2829 if (!mlx4_is_slave(dev))
2830 bitmap_set(stats_bitmap->bitmap, last_i, NUM_PKT_STATS);
2833 int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
2834 struct mlx4_en_port_profile *prof)
2836 struct net_device *dev;
2837 struct mlx4_en_priv *priv;
2841 dev = alloc_etherdev_mqs(sizeof(struct mlx4_en_priv),
2842 MAX_TX_RINGS, MAX_RX_RINGS);
2846 netif_set_real_num_tx_queues(dev, prof->tx_ring_num);
2847 netif_set_real_num_rx_queues(dev, prof->rx_ring_num);
2849 SET_NETDEV_DEV(dev, &mdev->dev->persist->pdev->dev);
2850 dev->dev_port = port - 1;
2853 * Initialize driver private data
2856 priv = netdev_priv(dev);
2857 memset(priv, 0, sizeof(struct mlx4_en_priv));
2858 priv->counter_index = MLX4_SINK_COUNTER_INDEX(mdev->dev);
2859 spin_lock_init(&priv->stats_lock);
2860 INIT_WORK(&priv->rx_mode_task, mlx4_en_do_set_rx_mode);
2861 INIT_WORK(&priv->restart_task, mlx4_en_restart);
2862 INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate);
2863 INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats);
2864 INIT_DELAYED_WORK(&priv->service_task, mlx4_en_service_task);
2865 #ifdef CONFIG_MLX4_EN_VXLAN
2866 INIT_WORK(&priv->vxlan_add_task, mlx4_en_add_vxlan_offloads);
2867 INIT_WORK(&priv->vxlan_del_task, mlx4_en_del_vxlan_offloads);
2869 #ifdef CONFIG_RFS_ACCEL
2870 INIT_LIST_HEAD(&priv->filters);
2871 spin_lock_init(&priv->filters_lock);
2876 priv->ddev = &mdev->pdev->dev;
2879 priv->port_up = false;
2880 priv->flags = prof->flags;
2881 priv->pflags = MLX4_EN_PRIV_FLAGS_BLUEFLAME;
2882 priv->ctrl_flags = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE |
2883 MLX4_WQE_CTRL_SOLICITED);
2884 priv->num_tx_rings_p_up = mdev->profile.num_tx_rings_p_up;
2885 priv->tx_ring_num = prof->tx_ring_num;
2886 priv->tx_work_limit = MLX4_EN_DEFAULT_TX_WORK;
2887 netdev_rss_key_fill(priv->rss_key, sizeof(priv->rss_key));
2889 priv->tx_ring = kzalloc(sizeof(struct mlx4_en_tx_ring *) * MAX_TX_RINGS,
2891 if (!priv->tx_ring) {
2895 priv->tx_cq = kzalloc(sizeof(struct mlx4_en_cq *) * MAX_TX_RINGS,
2901 priv->rx_ring_num = prof->rx_ring_num;
2902 priv->cqe_factor = (mdev->dev->caps.cqe_size == 64) ? 1 : 0;
2903 priv->cqe_size = mdev->dev->caps.cqe_size;
2904 priv->mac_index = -1;
2905 priv->msg_enable = MLX4_EN_MSG_LEVEL;
2906 #ifdef CONFIG_MLX4_EN_DCB
2907 if (!mlx4_is_slave(priv->mdev->dev)) {
2908 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETS_CFG) {
2909 dev->dcbnl_ops = &mlx4_en_dcbnl_ops;
2911 en_info(priv, "enabling only PFC DCB ops\n");
2912 dev->dcbnl_ops = &mlx4_en_dcbnl_pfc_ops;
2917 for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i)
2918 INIT_HLIST_HEAD(&priv->mac_hash[i]);
2920 /* Query for default mac and max mtu */
2921 priv->max_mtu = mdev->dev->caps.eth_mtu_cap[priv->port];
2923 if (mdev->dev->caps.rx_checksum_flags_port[priv->port] &
2924 MLX4_RX_CSUM_MODE_VAL_NON_TCP_UDP)
2925 priv->flags |= MLX4_EN_FLAG_RX_CSUM_NON_TCP_UDP;
2927 /* Set default MAC */
2928 dev->addr_len = ETH_ALEN;
2929 mlx4_en_u64_to_mac(dev->dev_addr, mdev->dev->caps.def_mac[priv->port]);
2930 if (!is_valid_ether_addr(dev->dev_addr)) {
2931 en_err(priv, "Port: %d, invalid mac burned: %pM, quiting\n",
2932 priv->port, dev->dev_addr);
2935 } else if (mlx4_is_slave(priv->mdev->dev) &&
2936 (priv->mdev->dev->port_random_macs & 1 << priv->port)) {
2937 /* Random MAC was assigned in mlx4_slave_cap
2938 * in mlx4_core module
2940 dev->addr_assign_type |= NET_ADDR_RANDOM;
2941 en_warn(priv, "Assigned random MAC address %pM\n", dev->dev_addr);
2944 memcpy(priv->current_mac, dev->dev_addr, sizeof(priv->current_mac));
2946 priv->stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) +
2947 DS_SIZE * MLX4_EN_MAX_RX_FRAGS);
2948 err = mlx4_en_alloc_resources(priv);
2952 /* Initialize time stamping config */
2953 priv->hwtstamp_config.flags = 0;
2954 priv->hwtstamp_config.tx_type = HWTSTAMP_TX_OFF;
2955 priv->hwtstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
2957 /* Allocate page for receive rings */
2958 err = mlx4_alloc_hwq_res(mdev->dev, &priv->res,
2959 MLX4_EN_PAGE_SIZE, MLX4_EN_PAGE_SIZE);
2961 en_err(priv, "Failed to allocate page for rx qps\n");
2964 priv->allocated = 1;
2967 * Initialize netdev entry points
2969 if (mlx4_is_master(priv->mdev->dev))
2970 dev->netdev_ops = &mlx4_netdev_ops_master;
2972 dev->netdev_ops = &mlx4_netdev_ops;
2973 dev->watchdog_timeo = MLX4_EN_WATCHDOG_TIMEOUT;
2974 netif_set_real_num_tx_queues(dev, priv->tx_ring_num);
2975 netif_set_real_num_rx_queues(dev, priv->rx_ring_num);
2977 dev->ethtool_ops = &mlx4_en_ethtool_ops;
2980 * Set driver features
2982 dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
2983 if (mdev->LSO_support)
2984 dev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
2986 dev->vlan_features = dev->hw_features;
2988 dev->hw_features |= NETIF_F_RXCSUM | NETIF_F_RXHASH;
2989 dev->features = dev->hw_features | NETIF_F_HIGHDMA |
2990 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
2991 NETIF_F_HW_VLAN_CTAG_FILTER;
2992 dev->hw_features |= NETIF_F_LOOPBACK |
2993 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
2995 if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN)) {
2996 dev->features |= NETIF_F_HW_VLAN_STAG_RX |
2997 NETIF_F_HW_VLAN_STAG_FILTER;
2998 dev->hw_features |= NETIF_F_HW_VLAN_STAG_RX;
3001 if (mlx4_is_slave(mdev->dev)) {
3004 err = get_phv_bit(mdev->dev, port, &phv);
3006 dev->hw_features |= NETIF_F_HW_VLAN_STAG_TX;
3007 priv->pflags |= MLX4_EN_PRIV_FLAGS_PHV;
3010 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PHV_EN &&
3011 !(mdev->dev->caps.flags2 &
3012 MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN))
3013 dev->hw_features |= NETIF_F_HW_VLAN_STAG_TX;
3016 if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP)
3017 dev->hw_features |= NETIF_F_RXFCS;
3019 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_IGNORE_FCS)
3020 dev->hw_features |= NETIF_F_RXALL;
3022 if (mdev->dev->caps.steering_mode ==
3023 MLX4_STEERING_MODE_DEVICE_MANAGED &&
3024 mdev->dev->caps.dmfs_high_steer_mode != MLX4_STEERING_DMFS_A0_STATIC)
3025 dev->hw_features |= NETIF_F_NTUPLE;
3027 if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_A0)
3028 dev->priv_flags |= IFF_UNICAST_FLT;
3030 /* Setting a default hash function value */
3031 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_TOP) {
3032 priv->rss_hash_fn = ETH_RSS_HASH_TOP;
3033 } else if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_XOR) {
3034 priv->rss_hash_fn = ETH_RSS_HASH_XOR;
3037 "No RSS hash capabilities exposed, using Toeplitz\n");
3038 priv->rss_hash_fn = ETH_RSS_HASH_TOP;
3041 if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
3042 dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
3043 dev->features |= NETIF_F_GSO_UDP_TUNNEL;
3046 mdev->pndev[port] = dev;
3047 mdev->upper[port] = NULL;
3049 netif_carrier_off(dev);
3050 mlx4_en_set_default_moderation(priv);
3052 en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num);
3053 en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num);
3055 mlx4_en_update_loopback_state(priv->dev, priv->dev->features);
3057 /* Configure port */
3058 mlx4_en_calc_rx_buf(dev);
3059 err = mlx4_SET_PORT_general(mdev->dev, priv->port,
3060 priv->rx_skb_size + ETH_FCS_LEN,
3061 prof->tx_pause, prof->tx_ppp,
3062 prof->rx_pause, prof->rx_ppp);
3064 en_err(priv, "Failed setting port general configurations for port %d, with error %d\n",
3069 if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
3070 err = mlx4_SET_PORT_VXLAN(mdev->dev, priv->port, VXLAN_STEER_BY_OUTER_MAC, 1);
3072 en_err(priv, "Failed setting port L2 tunnel configuration, err %d\n",
3079 en_warn(priv, "Initializing port\n");
3080 err = mlx4_INIT_PORT(mdev->dev, priv->port);
3082 en_err(priv, "Failed Initializing port\n");
3085 queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY);
3087 /* Initialize time stamp mechanism */
3088 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
3089 mlx4_en_init_timestamp(mdev);
3091 queue_delayed_work(mdev->workqueue, &priv->service_task,
3092 SERVICE_TASK_DELAY);
3094 mlx4_en_set_stats_bitmap(mdev->dev, &priv->stats_bitmap,
3095 mdev->profile.prof[priv->port].rx_ppp,
3096 mdev->profile.prof[priv->port].rx_pause,
3097 mdev->profile.prof[priv->port].tx_ppp,
3098 mdev->profile.prof[priv->port].tx_pause);
3100 err = register_netdev(dev);
3102 en_err(priv, "Netdev registration failed for port %d\n", port);
3106 priv->registered = 1;
3111 mlx4_en_destroy_netdev(dev);
3115 int mlx4_en_reset_config(struct net_device *dev,
3116 struct hwtstamp_config ts_config,
3117 netdev_features_t features)
3119 struct mlx4_en_priv *priv = netdev_priv(dev);
3120 struct mlx4_en_dev *mdev = priv->mdev;
3124 if (priv->hwtstamp_config.tx_type == ts_config.tx_type &&
3125 priv->hwtstamp_config.rx_filter == ts_config.rx_filter &&
3126 !DEV_FEATURE_CHANGED(dev, features, NETIF_F_HW_VLAN_CTAG_RX) &&
3127 !DEV_FEATURE_CHANGED(dev, features, NETIF_F_RXFCS))
3128 return 0; /* Nothing to change */
3130 if (DEV_FEATURE_CHANGED(dev, features, NETIF_F_HW_VLAN_CTAG_RX) &&
3131 (features & NETIF_F_HW_VLAN_CTAG_RX) &&
3132 (priv->hwtstamp_config.rx_filter != HWTSTAMP_FILTER_NONE)) {
3133 en_warn(priv, "Can't turn ON rx vlan offload while time-stamping rx filter is ON\n");
3137 mutex_lock(&mdev->state_lock);
3138 if (priv->port_up) {
3140 mlx4_en_stop_port(dev, 1);
3143 mlx4_en_free_resources(priv);
3145 en_warn(priv, "Changing device configuration rx filter(%x) rx vlan(%x)\n",
3146 ts_config.rx_filter, !!(features & NETIF_F_HW_VLAN_CTAG_RX));
3148 priv->hwtstamp_config.tx_type = ts_config.tx_type;
3149 priv->hwtstamp_config.rx_filter = ts_config.rx_filter;
3151 if (DEV_FEATURE_CHANGED(dev, features, NETIF_F_HW_VLAN_CTAG_RX)) {
3152 if (features & NETIF_F_HW_VLAN_CTAG_RX)
3153 dev->features |= NETIF_F_HW_VLAN_CTAG_RX;
3155 dev->features &= ~NETIF_F_HW_VLAN_CTAG_RX;
3156 } else if (ts_config.rx_filter == HWTSTAMP_FILTER_NONE) {
3157 /* RX time-stamping is OFF, update the RX vlan offload
3158 * to the latest wanted state
3160 if (dev->wanted_features & NETIF_F_HW_VLAN_CTAG_RX)
3161 dev->features |= NETIF_F_HW_VLAN_CTAG_RX;
3163 dev->features &= ~NETIF_F_HW_VLAN_CTAG_RX;
3166 if (DEV_FEATURE_CHANGED(dev, features, NETIF_F_RXFCS)) {
3167 if (features & NETIF_F_RXFCS)
3168 dev->features |= NETIF_F_RXFCS;
3170 dev->features &= ~NETIF_F_RXFCS;
3173 /* RX vlan offload and RX time-stamping can't co-exist !
3174 * Regardless of the caller's choice,
3175 * Turn Off RX vlan offload in case of time-stamping is ON
3177 if (ts_config.rx_filter != HWTSTAMP_FILTER_NONE) {
3178 if (dev->features & NETIF_F_HW_VLAN_CTAG_RX)
3179 en_warn(priv, "Turning off RX vlan offload since RX time-stamping is ON\n");
3180 dev->features &= ~NETIF_F_HW_VLAN_CTAG_RX;
3183 err = mlx4_en_alloc_resources(priv);
3185 en_err(priv, "Failed reallocating port resources\n");
3189 err = mlx4_en_start_port(dev);
3191 en_err(priv, "Failed starting port\n");
3195 err = mlx4_en_moderation_update(priv);
3197 mutex_unlock(&mdev->state_lock);
3198 netdev_features_change(dev);