1 /* QLogic qede NIC Driver
2 * Copyright (c) 2015-2017 QLogic Corporation
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <net/udp_tunnel.h>
35 #include <linux/bitops.h>
36 #include <linux/vmalloc.h>
38 #include <linux/qed/qed_if.h>
41 struct qede_arfs_tuple {
44 struct in6_addr src_ipv6;
48 struct in6_addr dst_ipv6;
56 struct qede_arfs_fltr_node {
57 #define QEDE_FLTR_VALID 0
60 /* pointer to aRFS packet buffer */
63 /* dma map address of aRFS packet buffer */
66 /* length of aRFS packet buffer */
69 /* tuples to hold from aRFS packet buffer */
70 struct qede_arfs_tuple tuple;
79 struct hlist_node node;
83 #define QEDE_ARFS_BUCKET_HEAD(edev, idx) (&(edev)->arfs->arfs_hl_head[idx])
84 #define QEDE_ARFS_POLL_COUNT 100
85 #define QEDE_RFS_FLW_BITSHIFT (4)
86 #define QEDE_RFS_FLW_MASK ((1 << QEDE_RFS_FLW_BITSHIFT) - 1)
87 struct hlist_head arfs_hl_head[1 << QEDE_RFS_FLW_BITSHIFT];
89 /* lock for filter list access */
90 spinlock_t arfs_list_lock;
91 unsigned long *arfs_fltr_bmap;
96 static void qede_configure_arfs_fltr(struct qede_dev *edev,
97 struct qede_arfs_fltr_node *n,
98 u16 rxq_id, bool add_fltr)
100 const struct qed_eth_ops *op = edev->ops;
105 DP_VERBOSE(edev, NETIF_MSG_RX_STATUS,
106 "%s arfs filter flow_id=%d, sw_id=%d, src_port=%d, dst_port=%d, rxq=%d\n",
107 add_fltr ? "Adding" : "Deleting",
108 n->flow_id, n->sw_id, ntohs(n->tuple.src_port),
109 ntohs(n->tuple.dst_port), rxq_id);
112 n->filter_op = add_fltr;
113 op->ntuple_filter_config(edev->cdev, n, n->mapping, n->buf_len, 0,
118 qede_free_arfs_filter(struct qede_dev *edev, struct qede_arfs_fltr_node *fltr)
121 clear_bit(fltr->sw_id, edev->arfs->arfs_fltr_bmap);
126 qede_enqueue_fltr_and_config_searcher(struct qede_dev *edev,
127 struct qede_arfs_fltr_node *fltr,
130 fltr->mapping = dma_map_single(&edev->pdev->dev, fltr->data,
131 fltr->buf_len, DMA_TO_DEVICE);
132 if (dma_mapping_error(&edev->pdev->dev, fltr->mapping)) {
133 DP_NOTICE(edev, "Failed to map DMA memory for rule\n");
134 qede_free_arfs_filter(edev, fltr);
138 INIT_HLIST_NODE(&fltr->node);
139 hlist_add_head(&fltr->node,
140 QEDE_ARFS_BUCKET_HEAD(edev, bucket_idx));
141 edev->arfs->filter_count++;
143 if (edev->arfs->filter_count == 1 && !edev->arfs->enable) {
144 edev->ops->configure_arfs_searcher(edev->cdev, true);
145 edev->arfs->enable = true;
152 qede_dequeue_fltr_and_config_searcher(struct qede_dev *edev,
153 struct qede_arfs_fltr_node *fltr)
155 hlist_del(&fltr->node);
156 dma_unmap_single(&edev->pdev->dev, fltr->mapping,
157 fltr->buf_len, DMA_TO_DEVICE);
159 qede_free_arfs_filter(edev, fltr);
160 edev->arfs->filter_count--;
162 if (!edev->arfs->filter_count && edev->arfs->enable) {
163 edev->arfs->enable = false;
164 edev->ops->configure_arfs_searcher(edev->cdev, false);
168 void qede_arfs_filter_op(void *dev, void *filter, u8 fw_rc)
170 struct qede_arfs_fltr_node *fltr = filter;
171 struct qede_dev *edev = dev;
177 "Failed arfs filter configuration fw_rc=%d, flow_id=%d, sw_id=%d, src_port=%d, dst_port=%d, rxq=%d\n",
178 fw_rc, fltr->flow_id, fltr->sw_id,
179 ntohs(fltr->tuple.src_port),
180 ntohs(fltr->tuple.dst_port), fltr->rxq_id);
182 spin_lock_bh(&edev->arfs->arfs_list_lock);
185 clear_bit(QEDE_FLTR_VALID, &fltr->state);
187 spin_unlock_bh(&edev->arfs->arfs_list_lock);
191 spin_lock_bh(&edev->arfs->arfs_list_lock);
195 if (fltr->filter_op) {
196 set_bit(QEDE_FLTR_VALID, &fltr->state);
197 if (fltr->rxq_id != fltr->next_rxq_id)
198 qede_configure_arfs_fltr(edev, fltr, fltr->rxq_id,
201 clear_bit(QEDE_FLTR_VALID, &fltr->state);
202 if (fltr->rxq_id != fltr->next_rxq_id) {
203 fltr->rxq_id = fltr->next_rxq_id;
204 qede_configure_arfs_fltr(edev, fltr,
209 spin_unlock_bh(&edev->arfs->arfs_list_lock);
212 /* Should be called while qede_lock is held */
213 void qede_process_arfs_filters(struct qede_dev *edev, bool free_fltr)
217 for (i = 0; i <= QEDE_RFS_FLW_MASK; i++) {
218 struct hlist_node *temp;
219 struct hlist_head *head;
220 struct qede_arfs_fltr_node *fltr;
222 head = &edev->arfs->arfs_hl_head[i];
224 hlist_for_each_entry_safe(fltr, temp, head, node) {
227 if (edev->state != QEDE_STATE_OPEN)
230 spin_lock_bh(&edev->arfs->arfs_list_lock);
232 if ((!test_bit(QEDE_FLTR_VALID, &fltr->state) &&
233 !fltr->used) || free_fltr) {
234 qede_dequeue_fltr_and_config_searcher(edev,
237 bool flow_exp = false;
238 #ifdef CONFIG_RFS_ACCEL
239 flow_exp = rps_may_expire_flow(edev->ndev,
244 if ((flow_exp || del) && !free_fltr)
245 qede_configure_arfs_fltr(edev, fltr,
250 spin_unlock_bh(&edev->arfs->arfs_list_lock);
254 spin_lock_bh(&edev->arfs->arfs_list_lock);
256 if (!edev->arfs->filter_count) {
257 if (edev->arfs->enable) {
258 edev->arfs->enable = false;
259 edev->ops->configure_arfs_searcher(edev->cdev, false);
261 #ifdef CONFIG_RFS_ACCEL
263 set_bit(QEDE_SP_ARFS_CONFIG, &edev->sp_flags);
264 schedule_delayed_work(&edev->sp_task,
265 QEDE_SP_TASK_POLL_DELAY);
269 spin_unlock_bh(&edev->arfs->arfs_list_lock);
272 /* This function waits until all aRFS filters get deleted and freed.
273 * On timeout it frees all filters forcefully.
275 void qede_poll_for_freeing_arfs_filters(struct qede_dev *edev)
277 int count = QEDE_ARFS_POLL_COUNT;
280 qede_process_arfs_filters(edev, false);
282 if (!edev->arfs->filter_count)
290 DP_NOTICE(edev, "Timeout in polling for arfs filter free\n");
292 /* Something is terribly wrong, free forcefully */
293 qede_process_arfs_filters(edev, true);
297 int qede_alloc_arfs(struct qede_dev *edev)
301 edev->arfs = vzalloc(sizeof(*edev->arfs));
305 spin_lock_init(&edev->arfs->arfs_list_lock);
307 for (i = 0; i <= QEDE_RFS_FLW_MASK; i++)
308 INIT_HLIST_HEAD(QEDE_ARFS_BUCKET_HEAD(edev, i));
310 edev->arfs->arfs_fltr_bmap = vzalloc(BITS_TO_LONGS(QEDE_RFS_MAX_FLTR) *
312 if (!edev->arfs->arfs_fltr_bmap) {
318 #ifdef CONFIG_RFS_ACCEL
319 edev->ndev->rx_cpu_rmap = alloc_irq_cpu_rmap(QEDE_RSS_COUNT(edev));
320 if (!edev->ndev->rx_cpu_rmap) {
321 vfree(edev->arfs->arfs_fltr_bmap);
322 edev->arfs->arfs_fltr_bmap = NULL;
331 void qede_free_arfs(struct qede_dev *edev)
336 #ifdef CONFIG_RFS_ACCEL
337 if (edev->ndev->rx_cpu_rmap)
338 free_irq_cpu_rmap(edev->ndev->rx_cpu_rmap);
340 edev->ndev->rx_cpu_rmap = NULL;
342 vfree(edev->arfs->arfs_fltr_bmap);
343 edev->arfs->arfs_fltr_bmap = NULL;
348 #ifdef CONFIG_RFS_ACCEL
349 static bool qede_compare_ip_addr(struct qede_arfs_fltr_node *tpos,
350 const struct sk_buff *skb)
352 if (skb->protocol == htons(ETH_P_IP)) {
353 if (tpos->tuple.src_ipv4 == ip_hdr(skb)->saddr &&
354 tpos->tuple.dst_ipv4 == ip_hdr(skb)->daddr)
359 struct in6_addr *src = &tpos->tuple.src_ipv6;
360 u8 size = sizeof(struct in6_addr);
362 if (!memcmp(src, &ipv6_hdr(skb)->saddr, size) &&
363 !memcmp(&tpos->tuple.dst_ipv6, &ipv6_hdr(skb)->daddr, size))
370 static struct qede_arfs_fltr_node *
371 qede_arfs_htbl_key_search(struct hlist_head *h, const struct sk_buff *skb,
372 __be16 src_port, __be16 dst_port, u8 ip_proto)
374 struct qede_arfs_fltr_node *tpos;
376 hlist_for_each_entry(tpos, h, node)
377 if (tpos->tuple.ip_proto == ip_proto &&
378 tpos->tuple.eth_proto == skb->protocol &&
379 qede_compare_ip_addr(tpos, skb) &&
380 tpos->tuple.src_port == src_port &&
381 tpos->tuple.dst_port == dst_port)
387 static struct qede_arfs_fltr_node *
388 qede_alloc_filter(struct qede_dev *edev, int min_hlen)
390 struct qede_arfs_fltr_node *n;
393 bit_id = find_first_zero_bit(edev->arfs->arfs_fltr_bmap,
396 if (bit_id >= QEDE_RFS_MAX_FLTR)
399 n = kzalloc(sizeof(*n), GFP_ATOMIC);
403 n->data = kzalloc(min_hlen, GFP_ATOMIC);
409 n->sw_id = (u16)bit_id;
410 set_bit(bit_id, edev->arfs->arfs_fltr_bmap);
414 int qede_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
415 u16 rxq_index, u32 flow_id)
417 struct qede_dev *edev = netdev_priv(dev);
418 struct qede_arfs_fltr_node *n;
419 int min_hlen, rc, tp_offset;
425 if (skb->encapsulation)
426 return -EPROTONOSUPPORT;
428 if (skb->protocol != htons(ETH_P_IP) &&
429 skb->protocol != htons(ETH_P_IPV6))
430 return -EPROTONOSUPPORT;
432 if (skb->protocol == htons(ETH_P_IP)) {
433 ip_proto = ip_hdr(skb)->protocol;
434 tp_offset = sizeof(struct iphdr);
436 ip_proto = ipv6_hdr(skb)->nexthdr;
437 tp_offset = sizeof(struct ipv6hdr);
440 if (ip_proto != IPPROTO_TCP && ip_proto != IPPROTO_UDP)
441 return -EPROTONOSUPPORT;
443 ports = (__be16 *)(skb->data + tp_offset);
444 tbl_idx = skb_get_hash_raw(skb) & QEDE_RFS_FLW_MASK;
446 spin_lock_bh(&edev->arfs->arfs_list_lock);
448 n = qede_arfs_htbl_key_search(QEDE_ARFS_BUCKET_HEAD(edev, tbl_idx),
449 skb, ports[0], ports[1], ip_proto);
452 n->next_rxq_id = rxq_index;
454 if (test_bit(QEDE_FLTR_VALID, &n->state)) {
455 if (n->rxq_id != rxq_index)
456 qede_configure_arfs_fltr(edev, n, n->rxq_id,
460 n->rxq_id = rxq_index;
461 qede_configure_arfs_fltr(edev, n, n->rxq_id,
470 min_hlen = ETH_HLEN + skb_headlen(skb);
472 n = qede_alloc_filter(edev, min_hlen);
478 n->buf_len = min_hlen;
479 n->rxq_id = rxq_index;
480 n->next_rxq_id = rxq_index;
481 n->tuple.src_port = ports[0];
482 n->tuple.dst_port = ports[1];
483 n->flow_id = flow_id;
485 if (skb->protocol == htons(ETH_P_IP)) {
486 n->tuple.src_ipv4 = ip_hdr(skb)->saddr;
487 n->tuple.dst_ipv4 = ip_hdr(skb)->daddr;
489 memcpy(&n->tuple.src_ipv6, &ipv6_hdr(skb)->saddr,
490 sizeof(struct in6_addr));
491 memcpy(&n->tuple.dst_ipv6, &ipv6_hdr(skb)->daddr,
492 sizeof(struct in6_addr));
495 eth = (struct ethhdr *)n->data;
496 eth->h_proto = skb->protocol;
497 n->tuple.eth_proto = skb->protocol;
498 n->tuple.ip_proto = ip_proto;
499 memcpy(n->data + ETH_HLEN, skb->data, skb_headlen(skb));
501 rc = qede_enqueue_fltr_and_config_searcher(edev, n, tbl_idx);
505 qede_configure_arfs_fltr(edev, n, n->rxq_id, true);
507 spin_unlock_bh(&edev->arfs->arfs_list_lock);
509 set_bit(QEDE_SP_ARFS_CONFIG, &edev->sp_flags);
510 schedule_delayed_work(&edev->sp_task, 0);
515 spin_unlock_bh(&edev->arfs->arfs_list_lock);
520 void qede_udp_ports_update(void *dev, u16 vxlan_port, u16 geneve_port)
522 struct qede_dev *edev = dev;
524 if (edev->vxlan_dst_port != vxlan_port)
525 edev->vxlan_dst_port = 0;
527 if (edev->geneve_dst_port != geneve_port)
528 edev->geneve_dst_port = 0;
531 void qede_force_mac(void *dev, u8 *mac, bool forced)
533 struct qede_dev *edev = dev;
537 /* MAC hints take effect only if we haven't set one already */
538 if (is_valid_ether_addr(edev->ndev->dev_addr) && !forced) {
543 ether_addr_copy(edev->ndev->dev_addr, mac);
547 void qede_fill_rss_params(struct qede_dev *edev,
548 struct qed_update_vport_rss_params *rss, u8 *update)
550 bool need_reset = false;
553 if (QEDE_RSS_COUNT(edev) <= 1) {
554 memset(rss, 0, sizeof(*rss));
559 /* Need to validate current RSS config uses valid entries */
560 for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) {
561 if (edev->rss_ind_table[i] >= QEDE_RSS_COUNT(edev)) {
567 if (!(edev->rss_params_inited & QEDE_RSS_INDIR_INITED) || need_reset) {
568 for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) {
571 val = QEDE_RSS_COUNT(edev);
572 indir_val = ethtool_rxfh_indir_default(i, val);
573 edev->rss_ind_table[i] = indir_val;
575 edev->rss_params_inited |= QEDE_RSS_INDIR_INITED;
578 /* Now that we have the queue-indirection, prepare the handles */
579 for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) {
580 u16 idx = QEDE_RX_QUEUE_IDX(edev, edev->rss_ind_table[i]);
582 rss->rss_ind_table[i] = edev->fp_array[idx].rxq->handle;
585 if (!(edev->rss_params_inited & QEDE_RSS_KEY_INITED)) {
586 netdev_rss_key_fill(edev->rss_key, sizeof(edev->rss_key));
587 edev->rss_params_inited |= QEDE_RSS_KEY_INITED;
589 memcpy(rss->rss_key, edev->rss_key, sizeof(rss->rss_key));
591 if (!(edev->rss_params_inited & QEDE_RSS_CAPS_INITED)) {
592 edev->rss_caps = QED_RSS_IPV4 | QED_RSS_IPV6 |
593 QED_RSS_IPV4_TCP | QED_RSS_IPV6_TCP;
594 edev->rss_params_inited |= QEDE_RSS_CAPS_INITED;
596 rss->rss_caps = edev->rss_caps;
601 static int qede_set_ucast_rx_mac(struct qede_dev *edev,
602 enum qed_filter_xcast_params_type opcode,
603 unsigned char mac[ETH_ALEN])
605 struct qed_filter_params filter_cmd;
607 memset(&filter_cmd, 0, sizeof(filter_cmd));
608 filter_cmd.type = QED_FILTER_TYPE_UCAST;
609 filter_cmd.filter.ucast.type = opcode;
610 filter_cmd.filter.ucast.mac_valid = 1;
611 ether_addr_copy(filter_cmd.filter.ucast.mac, mac);
613 return edev->ops->filter_config(edev->cdev, &filter_cmd);
616 static int qede_set_ucast_rx_vlan(struct qede_dev *edev,
617 enum qed_filter_xcast_params_type opcode,
620 struct qed_filter_params filter_cmd;
622 memset(&filter_cmd, 0, sizeof(filter_cmd));
623 filter_cmd.type = QED_FILTER_TYPE_UCAST;
624 filter_cmd.filter.ucast.type = opcode;
625 filter_cmd.filter.ucast.vlan_valid = 1;
626 filter_cmd.filter.ucast.vlan = vid;
628 return edev->ops->filter_config(edev->cdev, &filter_cmd);
631 static int qede_config_accept_any_vlan(struct qede_dev *edev, bool action)
633 struct qed_update_vport_params *params;
636 /* Proceed only if action actually needs to be performed */
637 if (edev->accept_any_vlan == action)
640 params = vzalloc(sizeof(*params));
644 params->vport_id = 0;
645 params->accept_any_vlan = action;
646 params->update_accept_any_vlan_flg = 1;
648 rc = edev->ops->vport_update(edev->cdev, params);
650 DP_ERR(edev, "Failed to %s accept-any-vlan\n",
651 action ? "enable" : "disable");
653 DP_INFO(edev, "%s accept-any-vlan\n",
654 action ? "enabled" : "disabled");
655 edev->accept_any_vlan = action;
662 int qede_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
664 struct qede_dev *edev = netdev_priv(dev);
665 struct qede_vlan *vlan, *tmp;
668 DP_VERBOSE(edev, NETIF_MSG_IFUP, "Adding vlan 0x%04x\n", vid);
670 vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
672 DP_INFO(edev, "Failed to allocate struct for vlan\n");
675 INIT_LIST_HEAD(&vlan->list);
677 vlan->configured = false;
679 /* Verify vlan isn't already configured */
680 list_for_each_entry(tmp, &edev->vlan_list, list) {
681 if (tmp->vid == vlan->vid) {
682 DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
683 "vlan already configured\n");
689 /* If interface is down, cache this VLAN ID and return */
691 if (edev->state != QEDE_STATE_OPEN) {
692 DP_VERBOSE(edev, NETIF_MSG_IFDOWN,
693 "Interface is down, VLAN %d will be configured when interface is up\n",
696 edev->non_configured_vlans++;
697 list_add(&vlan->list, &edev->vlan_list);
701 /* Check for the filter limit.
702 * Note - vlan0 has a reserved filter and can be added without
703 * worrying about quota
705 if ((edev->configured_vlans < edev->dev_info.num_vlan_filters) ||
707 rc = qede_set_ucast_rx_vlan(edev,
708 QED_FILTER_XCAST_TYPE_ADD,
711 DP_ERR(edev, "Failed to configure VLAN %d\n",
716 vlan->configured = true;
718 /* vlan0 filter isn't consuming out of our quota */
720 edev->configured_vlans++;
722 /* Out of quota; Activate accept-any-VLAN mode */
723 if (!edev->non_configured_vlans) {
724 rc = qede_config_accept_any_vlan(edev, true);
731 edev->non_configured_vlans++;
734 list_add(&vlan->list, &edev->vlan_list);
741 static void qede_del_vlan_from_list(struct qede_dev *edev,
742 struct qede_vlan *vlan)
744 /* vlan0 filter isn't consuming out of our quota */
745 if (vlan->vid != 0) {
746 if (vlan->configured)
747 edev->configured_vlans--;
749 edev->non_configured_vlans--;
752 list_del(&vlan->list);
756 int qede_configure_vlan_filters(struct qede_dev *edev)
758 int rc = 0, real_rc = 0, accept_any_vlan = 0;
759 struct qed_dev_eth_info *dev_info;
760 struct qede_vlan *vlan = NULL;
762 if (list_empty(&edev->vlan_list))
765 dev_info = &edev->dev_info;
767 /* Configure non-configured vlans */
768 list_for_each_entry(vlan, &edev->vlan_list, list) {
769 if (vlan->configured)
772 /* We have used all our credits, now enable accept_any_vlan */
773 if ((vlan->vid != 0) &&
774 (edev->configured_vlans == dev_info->num_vlan_filters)) {
779 DP_VERBOSE(edev, NETIF_MSG_IFUP, "Adding vlan %d\n", vlan->vid);
781 rc = qede_set_ucast_rx_vlan(edev, QED_FILTER_XCAST_TYPE_ADD,
784 DP_ERR(edev, "Failed to configure VLAN %u\n",
790 vlan->configured = true;
791 /* vlan0 filter doesn't consume our VLAN filter's quota */
792 if (vlan->vid != 0) {
793 edev->non_configured_vlans--;
794 edev->configured_vlans++;
798 /* enable accept_any_vlan mode if we have more VLANs than credits,
799 * or remove accept_any_vlan mode if we've actually removed
800 * a non-configured vlan, and all remaining vlans are truly configured.
804 rc = qede_config_accept_any_vlan(edev, true);
805 else if (!edev->non_configured_vlans)
806 rc = qede_config_accept_any_vlan(edev, false);
814 int qede_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
816 struct qede_dev *edev = netdev_priv(dev);
817 struct qede_vlan *vlan = NULL;
820 DP_VERBOSE(edev, NETIF_MSG_IFDOWN, "Removing vlan 0x%04x\n", vid);
822 /* Find whether entry exists */
824 list_for_each_entry(vlan, &edev->vlan_list, list)
825 if (vlan->vid == vid)
828 if (!vlan || (vlan->vid != vid)) {
829 DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
830 "Vlan isn't configured\n");
834 if (edev->state != QEDE_STATE_OPEN) {
835 /* As interface is already down, we don't have a VPORT
836 * instance to remove vlan filter. So just update vlan list
838 DP_VERBOSE(edev, NETIF_MSG_IFDOWN,
839 "Interface is down, removing VLAN from list only\n");
840 qede_del_vlan_from_list(edev, vlan);
845 if (vlan->configured) {
846 rc = qede_set_ucast_rx_vlan(edev, QED_FILTER_XCAST_TYPE_DEL,
849 DP_ERR(edev, "Failed to remove VLAN %d\n", vid);
854 qede_del_vlan_from_list(edev, vlan);
856 /* We have removed a VLAN - try to see if we can
857 * configure non-configured VLAN from the list.
859 rc = qede_configure_vlan_filters(edev);
866 void qede_vlan_mark_nonconfigured(struct qede_dev *edev)
868 struct qede_vlan *vlan = NULL;
870 if (list_empty(&edev->vlan_list))
873 list_for_each_entry(vlan, &edev->vlan_list, list) {
874 if (!vlan->configured)
877 vlan->configured = false;
879 /* vlan0 filter isn't consuming out of our quota */
880 if (vlan->vid != 0) {
881 edev->non_configured_vlans++;
882 edev->configured_vlans--;
885 DP_VERBOSE(edev, NETIF_MSG_IFDOWN,
886 "marked vlan %d as non-configured\n", vlan->vid);
889 edev->accept_any_vlan = false;
892 static void qede_set_features_reload(struct qede_dev *edev,
893 struct qede_reload_args *args)
895 edev->ndev->features = args->u.features;
898 int qede_set_features(struct net_device *dev, netdev_features_t features)
900 struct qede_dev *edev = netdev_priv(dev);
901 netdev_features_t changes = features ^ dev->features;
902 bool need_reload = false;
904 /* No action needed if hardware GRO is disabled during driver load */
905 if (changes & NETIF_F_GRO) {
906 if (dev->features & NETIF_F_GRO)
907 need_reload = !edev->gro_disable;
909 need_reload = edev->gro_disable;
913 struct qede_reload_args args;
915 args.u.features = features;
916 args.func = &qede_set_features_reload;
918 /* Make sure that we definitely need to reload.
919 * In case of an eBPF attached program, there will be no FW
920 * aggregations, so no need to actually reload.
924 args.func(edev, &args);
926 qede_reload(edev, &args, true);
935 void qede_udp_tunnel_add(struct net_device *dev, struct udp_tunnel_info *ti)
937 struct qede_dev *edev = netdev_priv(dev);
938 struct qed_tunn_params tunn_params;
939 u16 t_port = ntohs(ti->port);
942 memset(&tunn_params, 0, sizeof(tunn_params));
945 case UDP_TUNNEL_TYPE_VXLAN:
946 if (!edev->dev_info.common.vxlan_enable)
949 if (edev->vxlan_dst_port)
952 tunn_params.update_vxlan_port = 1;
953 tunn_params.vxlan_port = t_port;
956 rc = edev->ops->tunn_config(edev->cdev, &tunn_params);
960 edev->vxlan_dst_port = t_port;
961 DP_VERBOSE(edev, QED_MSG_DEBUG, "Added vxlan port=%d\n",
964 DP_NOTICE(edev, "Failed to add vxlan UDP port=%d\n",
969 case UDP_TUNNEL_TYPE_GENEVE:
970 if (!edev->dev_info.common.geneve_enable)
973 if (edev->geneve_dst_port)
976 tunn_params.update_geneve_port = 1;
977 tunn_params.geneve_port = t_port;
980 rc = edev->ops->tunn_config(edev->cdev, &tunn_params);
984 edev->geneve_dst_port = t_port;
985 DP_VERBOSE(edev, QED_MSG_DEBUG,
986 "Added geneve port=%d\n", t_port);
988 DP_NOTICE(edev, "Failed to add geneve UDP port=%d\n",
998 void qede_udp_tunnel_del(struct net_device *dev,
999 struct udp_tunnel_info *ti)
1001 struct qede_dev *edev = netdev_priv(dev);
1002 struct qed_tunn_params tunn_params;
1003 u16 t_port = ntohs(ti->port);
1005 memset(&tunn_params, 0, sizeof(tunn_params));
1008 case UDP_TUNNEL_TYPE_VXLAN:
1009 if (t_port != edev->vxlan_dst_port)
1012 tunn_params.update_vxlan_port = 1;
1013 tunn_params.vxlan_port = 0;
1016 edev->ops->tunn_config(edev->cdev, &tunn_params);
1017 __qede_unlock(edev);
1019 edev->vxlan_dst_port = 0;
1021 DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted vxlan port=%d\n",
1025 case UDP_TUNNEL_TYPE_GENEVE:
1026 if (t_port != edev->geneve_dst_port)
1029 tunn_params.update_geneve_port = 1;
1030 tunn_params.geneve_port = 0;
1033 edev->ops->tunn_config(edev->cdev, &tunn_params);
1034 __qede_unlock(edev);
1036 edev->geneve_dst_port = 0;
1038 DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted geneve port=%d\n",
1046 static void qede_xdp_reload_func(struct qede_dev *edev,
1047 struct qede_reload_args *args)
1049 struct bpf_prog *old;
1051 old = xchg(&edev->xdp_prog, args->u.new_prog);
1056 static int qede_xdp_set(struct qede_dev *edev, struct bpf_prog *prog)
1058 struct qede_reload_args args;
1060 /* If we're called, there was already a bpf reference increment */
1061 args.func = &qede_xdp_reload_func;
1062 args.u.new_prog = prog;
1063 qede_reload(edev, &args, false);
1068 int qede_xdp(struct net_device *dev, struct netdev_xdp *xdp)
1070 struct qede_dev *edev = netdev_priv(dev);
1072 switch (xdp->command) {
1073 case XDP_SETUP_PROG:
1074 return qede_xdp_set(edev, xdp->prog);
1075 case XDP_QUERY_PROG:
1076 xdp->prog_attached = !!edev->xdp_prog;
1077 xdp->prog_id = edev->xdp_prog ? edev->xdp_prog->aux->id : 0;
1084 static int qede_set_mcast_rx_mac(struct qede_dev *edev,
1085 enum qed_filter_xcast_params_type opcode,
1086 unsigned char *mac, int num_macs)
1088 struct qed_filter_params filter_cmd;
1091 memset(&filter_cmd, 0, sizeof(filter_cmd));
1092 filter_cmd.type = QED_FILTER_TYPE_MCAST;
1093 filter_cmd.filter.mcast.type = opcode;
1094 filter_cmd.filter.mcast.num = num_macs;
1096 for (i = 0; i < num_macs; i++, mac += ETH_ALEN)
1097 ether_addr_copy(filter_cmd.filter.mcast.mac[i], mac);
1099 return edev->ops->filter_config(edev->cdev, &filter_cmd);
1102 int qede_set_mac_addr(struct net_device *ndev, void *p)
1104 struct qede_dev *edev = netdev_priv(ndev);
1105 struct sockaddr *addr = p;
1108 /* Make sure the state doesn't transition while changing the MAC.
1109 * Also, all flows accessing the dev_addr field are doing that under
1114 if (!is_valid_ether_addr(addr->sa_data)) {
1115 DP_NOTICE(edev, "The MAC address is not valid\n");
1120 if (!edev->ops->check_mac(edev->cdev, addr->sa_data)) {
1121 DP_NOTICE(edev, "qed prevents setting MAC %pM\n",
1127 if (edev->state == QEDE_STATE_OPEN) {
1128 /* Remove the previous primary mac */
1129 rc = qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_DEL,
1135 ether_addr_copy(ndev->dev_addr, addr->sa_data);
1136 DP_INFO(edev, "Setting device MAC to %pM\n", addr->sa_data);
1138 if (edev->state != QEDE_STATE_OPEN) {
1139 DP_VERBOSE(edev, NETIF_MSG_IFDOWN,
1140 "The device is currently down\n");
1144 edev->ops->common->update_mac(edev->cdev, ndev->dev_addr);
1146 rc = qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_ADD,
1149 __qede_unlock(edev);
1154 qede_configure_mcast_filtering(struct net_device *ndev,
1155 enum qed_filter_rx_mode_type *accept_flags)
1157 struct qede_dev *edev = netdev_priv(ndev);
1158 unsigned char *mc_macs, *temp;
1159 struct netdev_hw_addr *ha;
1160 int rc = 0, mc_count;
1163 size = 64 * ETH_ALEN;
1165 mc_macs = kzalloc(size, GFP_KERNEL);
1168 "Failed to allocate memory for multicast MACs\n");
1175 /* Remove all previously configured MAC filters */
1176 rc = qede_set_mcast_rx_mac(edev, QED_FILTER_XCAST_TYPE_DEL,
1181 netif_addr_lock_bh(ndev);
1183 mc_count = netdev_mc_count(ndev);
1184 if (mc_count <= 64) {
1185 netdev_for_each_mc_addr(ha, ndev) {
1186 ether_addr_copy(temp, ha->addr);
1191 netif_addr_unlock_bh(ndev);
1193 /* Check for all multicast @@@TBD resource allocation */
1194 if ((ndev->flags & IFF_ALLMULTI) || (mc_count > 64)) {
1195 if (*accept_flags == QED_FILTER_RX_MODE_TYPE_REGULAR)
1196 *accept_flags = QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC;
1198 /* Add all multicast MAC filters */
1199 rc = qede_set_mcast_rx_mac(edev, QED_FILTER_XCAST_TYPE_ADD,
1208 void qede_set_rx_mode(struct net_device *ndev)
1210 struct qede_dev *edev = netdev_priv(ndev);
1212 set_bit(QEDE_SP_RX_MODE, &edev->sp_flags);
1213 schedule_delayed_work(&edev->sp_task, 0);
1216 /* Must be called with qede_lock held */
1217 void qede_config_rx_mode(struct net_device *ndev)
1219 enum qed_filter_rx_mode_type accept_flags;
1220 struct qede_dev *edev = netdev_priv(ndev);
1221 struct qed_filter_params rx_mode;
1222 unsigned char *uc_macs, *temp;
1223 struct netdev_hw_addr *ha;
1227 netif_addr_lock_bh(ndev);
1229 uc_count = netdev_uc_count(ndev);
1230 size = uc_count * ETH_ALEN;
1232 uc_macs = kzalloc(size, GFP_ATOMIC);
1234 DP_NOTICE(edev, "Failed to allocate memory for unicast MACs\n");
1235 netif_addr_unlock_bh(ndev);
1240 netdev_for_each_uc_addr(ha, ndev) {
1241 ether_addr_copy(temp, ha->addr);
1245 netif_addr_unlock_bh(ndev);
1247 /* Configure the struct for the Rx mode */
1248 memset(&rx_mode, 0, sizeof(struct qed_filter_params));
1249 rx_mode.type = QED_FILTER_TYPE_RX_MODE;
1251 /* Remove all previous unicast secondary macs and multicast macs
1252 * (configrue / leave the primary mac)
1254 rc = qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_REPLACE,
1255 edev->ndev->dev_addr);
1259 /* Check for promiscuous */
1260 if (ndev->flags & IFF_PROMISC)
1261 accept_flags = QED_FILTER_RX_MODE_TYPE_PROMISC;
1263 accept_flags = QED_FILTER_RX_MODE_TYPE_REGULAR;
1265 /* Configure all filters regardless, in case promisc is rejected */
1266 if (uc_count < edev->dev_info.num_mac_filters) {
1270 for (i = 0; i < uc_count; i++) {
1271 rc = qede_set_ucast_rx_mac(edev,
1272 QED_FILTER_XCAST_TYPE_ADD,
1280 accept_flags = QED_FILTER_RX_MODE_TYPE_PROMISC;
1283 rc = qede_configure_mcast_filtering(ndev, &accept_flags);
1287 /* take care of VLAN mode */
1288 if (ndev->flags & IFF_PROMISC) {
1289 qede_config_accept_any_vlan(edev, true);
1290 } else if (!edev->non_configured_vlans) {
1291 /* It's possible that accept_any_vlan mode is set due to a
1292 * previous setting of IFF_PROMISC. If vlan credits are
1293 * sufficient, disable accept_any_vlan.
1295 qede_config_accept_any_vlan(edev, false);
1298 rx_mode.filter.accept_flags = accept_flags;
1299 edev->ops->filter_config(edev->cdev, &rx_mode);
1304 static struct qede_arfs_fltr_node *
1305 qede_get_arfs_fltr_by_loc(struct hlist_head *head, u32 location)
1307 struct qede_arfs_fltr_node *fltr;
1309 hlist_for_each_entry(fltr, head, node)
1310 if (location == fltr->sw_id)
1317 qede_compare_user_flow_ips(struct qede_arfs_fltr_node *tpos,
1318 struct ethtool_rx_flow_spec *fsp,
1321 if (proto == htons(ETH_P_IP)) {
1322 struct ethtool_tcpip4_spec *ip;
1324 ip = &fsp->h_u.tcp_ip4_spec;
1326 if (tpos->tuple.src_ipv4 == ip->ip4src &&
1327 tpos->tuple.dst_ipv4 == ip->ip4dst)
1332 struct ethtool_tcpip6_spec *ip6;
1333 struct in6_addr *src;
1335 ip6 = &fsp->h_u.tcp_ip6_spec;
1336 src = &tpos->tuple.src_ipv6;
1338 if (!memcmp(src, &ip6->ip6src, sizeof(struct in6_addr)) &&
1339 !memcmp(&tpos->tuple.dst_ipv6, &ip6->ip6dst,
1340 sizeof(struct in6_addr)))
1348 int qede_get_cls_rule_all(struct qede_dev *edev, struct ethtool_rxnfc *info,
1351 struct qede_arfs_fltr_node *fltr;
1352 struct hlist_head *head;
1353 int cnt = 0, rc = 0;
1355 info->data = QEDE_RFS_MAX_FLTR;
1364 head = QEDE_ARFS_BUCKET_HEAD(edev, 0);
1366 hlist_for_each_entry(fltr, head, node) {
1367 if (cnt == info->rule_cnt) {
1372 rule_locs[cnt] = fltr->sw_id;
1376 info->rule_cnt = cnt;
1379 __qede_unlock(edev);
1383 int qede_get_cls_rule_entry(struct qede_dev *edev, struct ethtool_rxnfc *cmd)
1385 struct ethtool_rx_flow_spec *fsp = &cmd->fs;
1386 struct qede_arfs_fltr_node *fltr = NULL;
1389 cmd->data = QEDE_RFS_MAX_FLTR;
1398 fltr = qede_get_arfs_fltr_by_loc(QEDE_ARFS_BUCKET_HEAD(edev, 0),
1401 DP_NOTICE(edev, "Rule not found - location=0x%x\n",
1407 if (fltr->tuple.eth_proto == htons(ETH_P_IP)) {
1408 if (fltr->tuple.ip_proto == IPPROTO_TCP)
1409 fsp->flow_type = TCP_V4_FLOW;
1411 fsp->flow_type = UDP_V4_FLOW;
1413 fsp->h_u.tcp_ip4_spec.psrc = fltr->tuple.src_port;
1414 fsp->h_u.tcp_ip4_spec.pdst = fltr->tuple.dst_port;
1415 fsp->h_u.tcp_ip4_spec.ip4src = fltr->tuple.src_ipv4;
1416 fsp->h_u.tcp_ip4_spec.ip4dst = fltr->tuple.dst_ipv4;
1418 if (fltr->tuple.ip_proto == IPPROTO_TCP)
1419 fsp->flow_type = TCP_V6_FLOW;
1421 fsp->flow_type = UDP_V6_FLOW;
1422 fsp->h_u.tcp_ip6_spec.psrc = fltr->tuple.src_port;
1423 fsp->h_u.tcp_ip6_spec.pdst = fltr->tuple.dst_port;
1424 memcpy(&fsp->h_u.tcp_ip6_spec.ip6src,
1425 &fltr->tuple.src_ipv6, sizeof(struct in6_addr));
1426 memcpy(&fsp->h_u.tcp_ip6_spec.ip6dst,
1427 &fltr->tuple.dst_ipv6, sizeof(struct in6_addr));
1430 fsp->ring_cookie = fltr->rxq_id;
1433 __qede_unlock(edev);
1438 qede_validate_and_check_flow_exist(struct qede_dev *edev,
1439 struct ethtool_rx_flow_spec *fsp,
1442 __be16 src_port = 0x0, dst_port = 0x0;
1443 struct qede_arfs_fltr_node *fltr;
1444 struct hlist_node *temp;
1445 struct hlist_head *head;
1449 if (fsp->location >= QEDE_RFS_MAX_FLTR ||
1450 fsp->ring_cookie >= QEDE_RSS_COUNT(edev))
1453 if (fsp->flow_type == TCP_V4_FLOW) {
1454 *min_hlen += sizeof(struct iphdr) +
1455 sizeof(struct tcphdr);
1456 eth_proto = htons(ETH_P_IP);
1457 ip_proto = IPPROTO_TCP;
1458 } else if (fsp->flow_type == UDP_V4_FLOW) {
1459 *min_hlen += sizeof(struct iphdr) +
1460 sizeof(struct udphdr);
1461 eth_proto = htons(ETH_P_IP);
1462 ip_proto = IPPROTO_UDP;
1463 } else if (fsp->flow_type == TCP_V6_FLOW) {
1464 *min_hlen += sizeof(struct ipv6hdr) +
1465 sizeof(struct tcphdr);
1466 eth_proto = htons(ETH_P_IPV6);
1467 ip_proto = IPPROTO_TCP;
1468 } else if (fsp->flow_type == UDP_V6_FLOW) {
1469 *min_hlen += sizeof(struct ipv6hdr) +
1470 sizeof(struct udphdr);
1471 eth_proto = htons(ETH_P_IPV6);
1472 ip_proto = IPPROTO_UDP;
1474 DP_NOTICE(edev, "Unsupported flow type = 0x%x\n",
1476 return -EPROTONOSUPPORT;
1479 if (eth_proto == htons(ETH_P_IP)) {
1480 src_port = fsp->h_u.tcp_ip4_spec.psrc;
1481 dst_port = fsp->h_u.tcp_ip4_spec.pdst;
1483 src_port = fsp->h_u.tcp_ip6_spec.psrc;
1484 dst_port = fsp->h_u.tcp_ip6_spec.pdst;
1487 head = QEDE_ARFS_BUCKET_HEAD(edev, 0);
1488 hlist_for_each_entry_safe(fltr, temp, head, node) {
1489 if ((fltr->tuple.ip_proto == ip_proto &&
1490 fltr->tuple.eth_proto == eth_proto &&
1491 qede_compare_user_flow_ips(fltr, fsp, eth_proto) &&
1492 fltr->tuple.src_port == src_port &&
1493 fltr->tuple.dst_port == dst_port) ||
1494 fltr->sw_id == fsp->location)
1502 qede_poll_arfs_filter_config(struct qede_dev *edev,
1503 struct qede_arfs_fltr_node *fltr)
1505 int count = QEDE_ARFS_POLL_COUNT;
1507 while (fltr->used && count) {
1512 if (count == 0 || fltr->fw_rc) {
1513 qede_dequeue_fltr_and_config_searcher(edev, fltr);
1520 int qede_add_cls_rule(struct qede_dev *edev, struct ethtool_rxnfc *info)
1522 struct ethtool_rx_flow_spec *fsp = &info->fs;
1523 struct qede_arfs_fltr_node *n;
1524 int min_hlen = ETH_HLEN, rc;
1536 rc = qede_validate_and_check_flow_exist(edev, fsp, &min_hlen);
1540 n = kzalloc(sizeof(*n), GFP_KERNEL);
1546 n->data = kzalloc(min_hlen, GFP_KERNEL);
1553 n->sw_id = fsp->location;
1554 set_bit(n->sw_id, edev->arfs->arfs_fltr_bmap);
1555 n->buf_len = min_hlen;
1556 n->rxq_id = fsp->ring_cookie;
1557 n->next_rxq_id = n->rxq_id;
1558 eth = (struct ethhdr *)n->data;
1560 if (info->fs.flow_type == TCP_V4_FLOW ||
1561 info->fs.flow_type == UDP_V4_FLOW) {
1562 ports = (__be16 *)(n->data + ETH_HLEN +
1563 sizeof(struct iphdr));
1564 eth->h_proto = htons(ETH_P_IP);
1565 n->tuple.eth_proto = htons(ETH_P_IP);
1566 n->tuple.src_ipv4 = info->fs.h_u.tcp_ip4_spec.ip4src;
1567 n->tuple.dst_ipv4 = info->fs.h_u.tcp_ip4_spec.ip4dst;
1568 n->tuple.src_port = info->fs.h_u.tcp_ip4_spec.psrc;
1569 n->tuple.dst_port = info->fs.h_u.tcp_ip4_spec.pdst;
1570 ports[0] = n->tuple.src_port;
1571 ports[1] = n->tuple.dst_port;
1572 ip = (struct iphdr *)(n->data + ETH_HLEN);
1573 ip->saddr = info->fs.h_u.tcp_ip4_spec.ip4src;
1574 ip->daddr = info->fs.h_u.tcp_ip4_spec.ip4dst;
1578 if (info->fs.flow_type == TCP_V4_FLOW) {
1579 n->tuple.ip_proto = IPPROTO_TCP;
1580 ip->protocol = IPPROTO_TCP;
1582 n->tuple.ip_proto = IPPROTO_UDP;
1583 ip->protocol = IPPROTO_UDP;
1585 ip->tot_len = cpu_to_be16(min_hlen - ETH_HLEN);
1587 struct ipv6hdr *ip6;
1589 ip6 = (struct ipv6hdr *)(n->data + ETH_HLEN);
1590 ports = (__be16 *)(n->data + ETH_HLEN +
1591 sizeof(struct ipv6hdr));
1592 eth->h_proto = htons(ETH_P_IPV6);
1593 n->tuple.eth_proto = htons(ETH_P_IPV6);
1594 memcpy(&n->tuple.src_ipv6, &info->fs.h_u.tcp_ip6_spec.ip6src,
1595 sizeof(struct in6_addr));
1596 memcpy(&n->tuple.dst_ipv6, &info->fs.h_u.tcp_ip6_spec.ip6dst,
1597 sizeof(struct in6_addr));
1598 n->tuple.src_port = info->fs.h_u.tcp_ip6_spec.psrc;
1599 n->tuple.dst_port = info->fs.h_u.tcp_ip6_spec.pdst;
1600 ports[0] = n->tuple.src_port;
1601 ports[1] = n->tuple.dst_port;
1602 memcpy(&ip6->saddr, &n->tuple.src_ipv6,
1603 sizeof(struct in6_addr));
1604 memcpy(&ip6->daddr, &n->tuple.dst_ipv6,
1605 sizeof(struct in6_addr));
1608 if (info->fs.flow_type == TCP_V6_FLOW) {
1609 n->tuple.ip_proto = IPPROTO_TCP;
1610 ip6->nexthdr = NEXTHDR_TCP;
1611 ip6->payload_len = cpu_to_be16(sizeof(struct tcphdr));
1613 n->tuple.ip_proto = IPPROTO_UDP;
1614 ip6->nexthdr = NEXTHDR_UDP;
1615 ip6->payload_len = cpu_to_be16(sizeof(struct udphdr));
1619 rc = qede_enqueue_fltr_and_config_searcher(edev, n, 0);
1623 qede_configure_arfs_fltr(edev, n, n->rxq_id, true);
1624 rc = qede_poll_arfs_filter_config(edev, n);
1626 __qede_unlock(edev);
1630 int qede_del_cls_rule(struct qede_dev *edev, struct ethtool_rxnfc *info)
1632 struct ethtool_rx_flow_spec *fsp = &info->fs;
1633 struct qede_arfs_fltr_node *fltr = NULL;
1640 fltr = qede_get_arfs_fltr_by_loc(QEDE_ARFS_BUCKET_HEAD(edev, 0),
1645 qede_configure_arfs_fltr(edev, fltr, fltr->rxq_id, false);
1647 rc = qede_poll_arfs_filter_config(edev, fltr);
1649 qede_dequeue_fltr_and_config_searcher(edev, fltr);
1652 __qede_unlock(edev);
1656 int qede_get_arfs_filter_count(struct qede_dev *edev)
1665 count = edev->arfs->filter_count;
1668 __qede_unlock(edev);