1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
2 /* QLogic qede NIC Driver
3 * Copyright (c) 2015-2017 QLogic Corporation
4 * Copyright (c) 2019-2020 Marvell International Ltd.
7 #include <linux/netdevice.h>
8 #include <linux/etherdevice.h>
9 #include <net/udp_tunnel.h>
10 #include <linux/bitops.h>
11 #include <linux/vmalloc.h>
13 #include <linux/qed/qed_if.h>
16 #define QEDE_FILTER_PRINT_MAX_LEN (64)
17 struct qede_arfs_tuple {
20 struct in6_addr src_ipv6;
24 struct in6_addr dst_ipv6;
31 /* Describe filtering mode needed for this kind of filter */
32 enum qed_filter_config_mode mode;
34 /* Used to compare new/old filters. Return true if IPs match */
35 bool (*ip_comp)(struct qede_arfs_tuple *a, struct qede_arfs_tuple *b);
37 /* Given an address into ethhdr build a header from tuple info */
38 void (*build_hdr)(struct qede_arfs_tuple *t, void *header);
40 /* Stringify the tuple for a print into the provided buffer */
41 void (*stringify)(struct qede_arfs_tuple *t, void *buffer);
44 struct qede_arfs_fltr_node {
45 #define QEDE_FLTR_VALID 0
48 /* pointer to aRFS packet buffer */
51 /* dma map address of aRFS packet buffer */
54 /* length of aRFS packet buffer */
57 /* tuples to hold from aRFS packet buffer */
58 struct qede_arfs_tuple tuple;
69 struct hlist_node node;
73 #define QEDE_ARFS_BUCKET_HEAD(edev, idx) (&(edev)->arfs->arfs_hl_head[idx])
74 #define QEDE_ARFS_POLL_COUNT 100
75 #define QEDE_RFS_FLW_BITSHIFT (4)
76 #define QEDE_RFS_FLW_MASK ((1 << QEDE_RFS_FLW_BITSHIFT) - 1)
77 struct hlist_head arfs_hl_head[1 << QEDE_RFS_FLW_BITSHIFT];
79 /* lock for filter list access */
80 spinlock_t arfs_list_lock;
81 unsigned long *arfs_fltr_bmap;
84 /* Currently configured filtering mode */
85 enum qed_filter_config_mode mode;
88 static void qede_configure_arfs_fltr(struct qede_dev *edev,
89 struct qede_arfs_fltr_node *n,
90 u16 rxq_id, bool add_fltr)
92 const struct qed_eth_ops *op = edev->ops;
93 struct qed_ntuple_filter_params params;
98 memset(¶ms, 0, sizeof(params));
100 params.addr = n->mapping;
101 params.length = n->buf_len;
103 params.b_is_add = add_fltr;
104 params.b_is_drop = n->b_is_drop;
107 params.b_is_vf = true;
108 params.vf_id = n->vfid - 1;
111 if (n->tuple.stringify) {
112 char tuple_buffer[QEDE_FILTER_PRINT_MAX_LEN];
114 n->tuple.stringify(&n->tuple, tuple_buffer);
115 DP_VERBOSE(edev, NETIF_MSG_RX_STATUS,
116 "%s sw_id[0x%llx]: %s [vf %u queue %d]\n",
117 add_fltr ? "Adding" : "Deleting",
118 n->sw_id, tuple_buffer, n->vfid, rxq_id);
122 n->filter_op = add_fltr;
123 op->ntuple_filter_config(edev->cdev, n, ¶ms);
127 qede_free_arfs_filter(struct qede_dev *edev, struct qede_arfs_fltr_node *fltr)
131 if (fltr->sw_id < QEDE_RFS_MAX_FLTR)
132 clear_bit(fltr->sw_id, edev->arfs->arfs_fltr_bmap);
138 qede_enqueue_fltr_and_config_searcher(struct qede_dev *edev,
139 struct qede_arfs_fltr_node *fltr,
142 fltr->mapping = dma_map_single(&edev->pdev->dev, fltr->data,
143 fltr->buf_len, DMA_TO_DEVICE);
144 if (dma_mapping_error(&edev->pdev->dev, fltr->mapping)) {
145 DP_NOTICE(edev, "Failed to map DMA memory for rule\n");
146 qede_free_arfs_filter(edev, fltr);
150 INIT_HLIST_NODE(&fltr->node);
151 hlist_add_head(&fltr->node,
152 QEDE_ARFS_BUCKET_HEAD(edev, bucket_idx));
154 edev->arfs->filter_count++;
155 if (edev->arfs->filter_count == 1 &&
156 edev->arfs->mode == QED_FILTER_CONFIG_MODE_DISABLE) {
157 edev->ops->configure_arfs_searcher(edev->cdev,
159 edev->arfs->mode = fltr->tuple.mode;
166 qede_dequeue_fltr_and_config_searcher(struct qede_dev *edev,
167 struct qede_arfs_fltr_node *fltr)
169 hlist_del(&fltr->node);
170 dma_unmap_single(&edev->pdev->dev, fltr->mapping,
171 fltr->buf_len, DMA_TO_DEVICE);
173 qede_free_arfs_filter(edev, fltr);
175 edev->arfs->filter_count--;
176 if (!edev->arfs->filter_count &&
177 edev->arfs->mode != QED_FILTER_CONFIG_MODE_DISABLE) {
178 enum qed_filter_config_mode mode;
180 mode = QED_FILTER_CONFIG_MODE_DISABLE;
181 edev->ops->configure_arfs_searcher(edev->cdev, mode);
182 edev->arfs->mode = QED_FILTER_CONFIG_MODE_DISABLE;
186 void qede_arfs_filter_op(void *dev, void *filter, u8 fw_rc)
188 struct qede_arfs_fltr_node *fltr = filter;
189 struct qede_dev *edev = dev;
195 "Failed arfs filter configuration fw_rc=%d, flow_id=%d, sw_id=0x%llx, src_port=%d, dst_port=%d, rxq=%d\n",
196 fw_rc, fltr->flow_id, fltr->sw_id,
197 ntohs(fltr->tuple.src_port),
198 ntohs(fltr->tuple.dst_port), fltr->rxq_id);
200 spin_lock_bh(&edev->arfs->arfs_list_lock);
203 clear_bit(QEDE_FLTR_VALID, &fltr->state);
205 spin_unlock_bh(&edev->arfs->arfs_list_lock);
209 spin_lock_bh(&edev->arfs->arfs_list_lock);
213 if (fltr->filter_op) {
214 set_bit(QEDE_FLTR_VALID, &fltr->state);
215 if (fltr->rxq_id != fltr->next_rxq_id)
216 qede_configure_arfs_fltr(edev, fltr, fltr->rxq_id,
219 clear_bit(QEDE_FLTR_VALID, &fltr->state);
220 if (fltr->rxq_id != fltr->next_rxq_id) {
221 fltr->rxq_id = fltr->next_rxq_id;
222 qede_configure_arfs_fltr(edev, fltr,
227 spin_unlock_bh(&edev->arfs->arfs_list_lock);
230 /* Should be called while qede_lock is held */
231 void qede_process_arfs_filters(struct qede_dev *edev, bool free_fltr)
235 for (i = 0; i <= QEDE_RFS_FLW_MASK; i++) {
236 struct hlist_node *temp;
237 struct hlist_head *head;
238 struct qede_arfs_fltr_node *fltr;
240 head = &edev->arfs->arfs_hl_head[i];
242 hlist_for_each_entry_safe(fltr, temp, head, node) {
245 if (edev->state != QEDE_STATE_OPEN)
248 spin_lock_bh(&edev->arfs->arfs_list_lock);
250 if ((!test_bit(QEDE_FLTR_VALID, &fltr->state) &&
251 !fltr->used) || free_fltr) {
252 qede_dequeue_fltr_and_config_searcher(edev,
255 bool flow_exp = false;
256 #ifdef CONFIG_RFS_ACCEL
257 flow_exp = rps_may_expire_flow(edev->ndev,
262 if ((flow_exp || del) && !free_fltr)
263 qede_configure_arfs_fltr(edev, fltr,
268 spin_unlock_bh(&edev->arfs->arfs_list_lock);
272 #ifdef CONFIG_RFS_ACCEL
273 spin_lock_bh(&edev->arfs->arfs_list_lock);
275 if (edev->arfs->filter_count) {
276 set_bit(QEDE_SP_ARFS_CONFIG, &edev->sp_flags);
277 schedule_delayed_work(&edev->sp_task,
278 QEDE_SP_TASK_POLL_DELAY);
281 spin_unlock_bh(&edev->arfs->arfs_list_lock);
285 /* This function waits until all aRFS filters get deleted and freed.
286 * On timeout it frees all filters forcefully.
288 void qede_poll_for_freeing_arfs_filters(struct qede_dev *edev)
290 int count = QEDE_ARFS_POLL_COUNT;
293 qede_process_arfs_filters(edev, false);
295 if (!edev->arfs->filter_count)
303 DP_NOTICE(edev, "Timeout in polling for arfs filter free\n");
305 /* Something is terribly wrong, free forcefully */
306 qede_process_arfs_filters(edev, true);
310 int qede_alloc_arfs(struct qede_dev *edev)
314 if (!edev->dev_info.common.b_arfs_capable)
317 edev->arfs = vzalloc(sizeof(*edev->arfs));
321 spin_lock_init(&edev->arfs->arfs_list_lock);
323 for (i = 0; i <= QEDE_RFS_FLW_MASK; i++)
324 INIT_HLIST_HEAD(QEDE_ARFS_BUCKET_HEAD(edev, i));
326 edev->arfs->arfs_fltr_bmap =
327 vzalloc(array_size(sizeof(long),
328 BITS_TO_LONGS(QEDE_RFS_MAX_FLTR)));
329 if (!edev->arfs->arfs_fltr_bmap) {
335 #ifdef CONFIG_RFS_ACCEL
336 edev->ndev->rx_cpu_rmap = alloc_irq_cpu_rmap(QEDE_RSS_COUNT(edev));
337 if (!edev->ndev->rx_cpu_rmap) {
338 vfree(edev->arfs->arfs_fltr_bmap);
339 edev->arfs->arfs_fltr_bmap = NULL;
348 void qede_free_arfs(struct qede_dev *edev)
353 #ifdef CONFIG_RFS_ACCEL
354 if (edev->ndev->rx_cpu_rmap)
355 free_irq_cpu_rmap(edev->ndev->rx_cpu_rmap);
357 edev->ndev->rx_cpu_rmap = NULL;
359 vfree(edev->arfs->arfs_fltr_bmap);
360 edev->arfs->arfs_fltr_bmap = NULL;
365 #ifdef CONFIG_RFS_ACCEL
366 static bool qede_compare_ip_addr(struct qede_arfs_fltr_node *tpos,
367 const struct sk_buff *skb)
369 if (skb->protocol == htons(ETH_P_IP)) {
370 if (tpos->tuple.src_ipv4 == ip_hdr(skb)->saddr &&
371 tpos->tuple.dst_ipv4 == ip_hdr(skb)->daddr)
376 struct in6_addr *src = &tpos->tuple.src_ipv6;
377 u8 size = sizeof(struct in6_addr);
379 if (!memcmp(src, &ipv6_hdr(skb)->saddr, size) &&
380 !memcmp(&tpos->tuple.dst_ipv6, &ipv6_hdr(skb)->daddr, size))
387 static struct qede_arfs_fltr_node *
388 qede_arfs_htbl_key_search(struct hlist_head *h, const struct sk_buff *skb,
389 __be16 src_port, __be16 dst_port, u8 ip_proto)
391 struct qede_arfs_fltr_node *tpos;
393 hlist_for_each_entry(tpos, h, node)
394 if (tpos->tuple.ip_proto == ip_proto &&
395 tpos->tuple.eth_proto == skb->protocol &&
396 qede_compare_ip_addr(tpos, skb) &&
397 tpos->tuple.src_port == src_port &&
398 tpos->tuple.dst_port == dst_port)
404 static struct qede_arfs_fltr_node *
405 qede_alloc_filter(struct qede_dev *edev, int min_hlen)
407 struct qede_arfs_fltr_node *n;
410 bit_id = find_first_zero_bit(edev->arfs->arfs_fltr_bmap,
413 if (bit_id >= QEDE_RFS_MAX_FLTR)
416 n = kzalloc(sizeof(*n), GFP_ATOMIC);
420 n->data = kzalloc(min_hlen, GFP_ATOMIC);
426 n->sw_id = (u16)bit_id;
427 set_bit(bit_id, edev->arfs->arfs_fltr_bmap);
431 int qede_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
432 u16 rxq_index, u32 flow_id)
434 struct qede_dev *edev = netdev_priv(dev);
435 struct qede_arfs_fltr_node *n;
436 int min_hlen, rc, tp_offset;
442 if (skb->encapsulation)
443 return -EPROTONOSUPPORT;
445 if (skb->protocol != htons(ETH_P_IP) &&
446 skb->protocol != htons(ETH_P_IPV6))
447 return -EPROTONOSUPPORT;
449 if (skb->protocol == htons(ETH_P_IP)) {
450 ip_proto = ip_hdr(skb)->protocol;
451 tp_offset = sizeof(struct iphdr);
453 ip_proto = ipv6_hdr(skb)->nexthdr;
454 tp_offset = sizeof(struct ipv6hdr);
457 if (ip_proto != IPPROTO_TCP && ip_proto != IPPROTO_UDP)
458 return -EPROTONOSUPPORT;
460 ports = (__be16 *)(skb->data + tp_offset);
461 tbl_idx = skb_get_hash_raw(skb) & QEDE_RFS_FLW_MASK;
463 spin_lock_bh(&edev->arfs->arfs_list_lock);
465 n = qede_arfs_htbl_key_search(QEDE_ARFS_BUCKET_HEAD(edev, tbl_idx),
466 skb, ports[0], ports[1], ip_proto);
469 n->next_rxq_id = rxq_index;
471 if (test_bit(QEDE_FLTR_VALID, &n->state)) {
472 if (n->rxq_id != rxq_index)
473 qede_configure_arfs_fltr(edev, n, n->rxq_id,
477 n->rxq_id = rxq_index;
478 qede_configure_arfs_fltr(edev, n, n->rxq_id,
487 min_hlen = ETH_HLEN + skb_headlen(skb);
489 n = qede_alloc_filter(edev, min_hlen);
495 n->buf_len = min_hlen;
496 n->rxq_id = rxq_index;
497 n->next_rxq_id = rxq_index;
498 n->tuple.src_port = ports[0];
499 n->tuple.dst_port = ports[1];
500 n->flow_id = flow_id;
502 if (skb->protocol == htons(ETH_P_IP)) {
503 n->tuple.src_ipv4 = ip_hdr(skb)->saddr;
504 n->tuple.dst_ipv4 = ip_hdr(skb)->daddr;
506 memcpy(&n->tuple.src_ipv6, &ipv6_hdr(skb)->saddr,
507 sizeof(struct in6_addr));
508 memcpy(&n->tuple.dst_ipv6, &ipv6_hdr(skb)->daddr,
509 sizeof(struct in6_addr));
512 eth = (struct ethhdr *)n->data;
513 eth->h_proto = skb->protocol;
514 n->tuple.eth_proto = skb->protocol;
515 n->tuple.ip_proto = ip_proto;
516 n->tuple.mode = QED_FILTER_CONFIG_MODE_5_TUPLE;
517 memcpy(n->data + ETH_HLEN, skb->data, skb_headlen(skb));
519 rc = qede_enqueue_fltr_and_config_searcher(edev, n, tbl_idx);
523 qede_configure_arfs_fltr(edev, n, n->rxq_id, true);
525 spin_unlock_bh(&edev->arfs->arfs_list_lock);
527 set_bit(QEDE_SP_ARFS_CONFIG, &edev->sp_flags);
528 schedule_delayed_work(&edev->sp_task, 0);
533 spin_unlock_bh(&edev->arfs->arfs_list_lock);
538 void qede_udp_ports_update(void *dev, u16 vxlan_port, u16 geneve_port)
540 struct qede_dev *edev = dev;
542 if (edev->vxlan_dst_port != vxlan_port)
543 edev->vxlan_dst_port = 0;
545 if (edev->geneve_dst_port != geneve_port)
546 edev->geneve_dst_port = 0;
549 void qede_force_mac(void *dev, u8 *mac, bool forced)
551 struct qede_dev *edev = dev;
555 if (!is_valid_ether_addr(mac)) {
560 ether_addr_copy(edev->ndev->dev_addr, mac);
564 void qede_fill_rss_params(struct qede_dev *edev,
565 struct qed_update_vport_rss_params *rss, u8 *update)
567 bool need_reset = false;
570 if (QEDE_RSS_COUNT(edev) <= 1) {
571 memset(rss, 0, sizeof(*rss));
576 /* Need to validate current RSS config uses valid entries */
577 for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) {
578 if (edev->rss_ind_table[i] >= QEDE_RSS_COUNT(edev)) {
584 if (!(edev->rss_params_inited & QEDE_RSS_INDIR_INITED) || need_reset) {
585 for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) {
588 val = QEDE_RSS_COUNT(edev);
589 indir_val = ethtool_rxfh_indir_default(i, val);
590 edev->rss_ind_table[i] = indir_val;
592 edev->rss_params_inited |= QEDE_RSS_INDIR_INITED;
595 /* Now that we have the queue-indirection, prepare the handles */
596 for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) {
597 u16 idx = QEDE_RX_QUEUE_IDX(edev, edev->rss_ind_table[i]);
599 rss->rss_ind_table[i] = edev->fp_array[idx].rxq->handle;
602 if (!(edev->rss_params_inited & QEDE_RSS_KEY_INITED)) {
603 netdev_rss_key_fill(edev->rss_key, sizeof(edev->rss_key));
604 edev->rss_params_inited |= QEDE_RSS_KEY_INITED;
606 memcpy(rss->rss_key, edev->rss_key, sizeof(rss->rss_key));
608 if (!(edev->rss_params_inited & QEDE_RSS_CAPS_INITED)) {
609 edev->rss_caps = QED_RSS_IPV4 | QED_RSS_IPV6 |
610 QED_RSS_IPV4_TCP | QED_RSS_IPV6_TCP;
611 edev->rss_params_inited |= QEDE_RSS_CAPS_INITED;
613 rss->rss_caps = edev->rss_caps;
618 static int qede_set_ucast_rx_mac(struct qede_dev *edev,
619 enum qed_filter_xcast_params_type opcode,
620 unsigned char mac[ETH_ALEN])
622 struct qed_filter_params filter_cmd;
624 memset(&filter_cmd, 0, sizeof(filter_cmd));
625 filter_cmd.type = QED_FILTER_TYPE_UCAST;
626 filter_cmd.filter.ucast.type = opcode;
627 filter_cmd.filter.ucast.mac_valid = 1;
628 ether_addr_copy(filter_cmd.filter.ucast.mac, mac);
630 return edev->ops->filter_config(edev->cdev, &filter_cmd);
633 static int qede_set_ucast_rx_vlan(struct qede_dev *edev,
634 enum qed_filter_xcast_params_type opcode,
637 struct qed_filter_params filter_cmd;
639 memset(&filter_cmd, 0, sizeof(filter_cmd));
640 filter_cmd.type = QED_FILTER_TYPE_UCAST;
641 filter_cmd.filter.ucast.type = opcode;
642 filter_cmd.filter.ucast.vlan_valid = 1;
643 filter_cmd.filter.ucast.vlan = vid;
645 return edev->ops->filter_config(edev->cdev, &filter_cmd);
648 static int qede_config_accept_any_vlan(struct qede_dev *edev, bool action)
650 struct qed_update_vport_params *params;
653 /* Proceed only if action actually needs to be performed */
654 if (edev->accept_any_vlan == action)
657 params = vzalloc(sizeof(*params));
661 params->vport_id = 0;
662 params->accept_any_vlan = action;
663 params->update_accept_any_vlan_flg = 1;
665 rc = edev->ops->vport_update(edev->cdev, params);
667 DP_ERR(edev, "Failed to %s accept-any-vlan\n",
668 action ? "enable" : "disable");
670 DP_INFO(edev, "%s accept-any-vlan\n",
671 action ? "enabled" : "disabled");
672 edev->accept_any_vlan = action;
679 int qede_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
681 struct qede_dev *edev = netdev_priv(dev);
682 struct qede_vlan *vlan, *tmp;
685 DP_VERBOSE(edev, NETIF_MSG_IFUP, "Adding vlan 0x%04x\n", vid);
687 vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
689 DP_INFO(edev, "Failed to allocate struct for vlan\n");
692 INIT_LIST_HEAD(&vlan->list);
694 vlan->configured = false;
696 /* Verify vlan isn't already configured */
697 list_for_each_entry(tmp, &edev->vlan_list, list) {
698 if (tmp->vid == vlan->vid) {
699 DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
700 "vlan already configured\n");
706 /* If interface is down, cache this VLAN ID and return */
708 if (edev->state != QEDE_STATE_OPEN) {
709 DP_VERBOSE(edev, NETIF_MSG_IFDOWN,
710 "Interface is down, VLAN %d will be configured when interface is up\n",
713 edev->non_configured_vlans++;
714 list_add(&vlan->list, &edev->vlan_list);
718 /* Check for the filter limit.
719 * Note - vlan0 has a reserved filter and can be added without
720 * worrying about quota
722 if ((edev->configured_vlans < edev->dev_info.num_vlan_filters) ||
724 rc = qede_set_ucast_rx_vlan(edev,
725 QED_FILTER_XCAST_TYPE_ADD,
728 DP_ERR(edev, "Failed to configure VLAN %d\n",
733 vlan->configured = true;
735 /* vlan0 filter isn't consuming out of our quota */
737 edev->configured_vlans++;
739 /* Out of quota; Activate accept-any-VLAN mode */
740 if (!edev->non_configured_vlans) {
741 rc = qede_config_accept_any_vlan(edev, true);
748 edev->non_configured_vlans++;
751 list_add(&vlan->list, &edev->vlan_list);
758 static void qede_del_vlan_from_list(struct qede_dev *edev,
759 struct qede_vlan *vlan)
761 /* vlan0 filter isn't consuming out of our quota */
762 if (vlan->vid != 0) {
763 if (vlan->configured)
764 edev->configured_vlans--;
766 edev->non_configured_vlans--;
769 list_del(&vlan->list);
773 int qede_configure_vlan_filters(struct qede_dev *edev)
775 int rc = 0, real_rc = 0, accept_any_vlan = 0;
776 struct qed_dev_eth_info *dev_info;
777 struct qede_vlan *vlan = NULL;
779 if (list_empty(&edev->vlan_list))
782 dev_info = &edev->dev_info;
784 /* Configure non-configured vlans */
785 list_for_each_entry(vlan, &edev->vlan_list, list) {
786 if (vlan->configured)
789 /* We have used all our credits, now enable accept_any_vlan */
790 if ((vlan->vid != 0) &&
791 (edev->configured_vlans == dev_info->num_vlan_filters)) {
796 DP_VERBOSE(edev, NETIF_MSG_IFUP, "Adding vlan %d\n", vlan->vid);
798 rc = qede_set_ucast_rx_vlan(edev, QED_FILTER_XCAST_TYPE_ADD,
801 DP_ERR(edev, "Failed to configure VLAN %u\n",
807 vlan->configured = true;
808 /* vlan0 filter doesn't consume our VLAN filter's quota */
809 if (vlan->vid != 0) {
810 edev->non_configured_vlans--;
811 edev->configured_vlans++;
815 /* enable accept_any_vlan mode if we have more VLANs than credits,
816 * or remove accept_any_vlan mode if we've actually removed
817 * a non-configured vlan, and all remaining vlans are truly configured.
821 rc = qede_config_accept_any_vlan(edev, true);
822 else if (!edev->non_configured_vlans)
823 rc = qede_config_accept_any_vlan(edev, false);
831 int qede_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
833 struct qede_dev *edev = netdev_priv(dev);
834 struct qede_vlan *vlan;
837 DP_VERBOSE(edev, NETIF_MSG_IFDOWN, "Removing vlan 0x%04x\n", vid);
839 /* Find whether entry exists */
841 list_for_each_entry(vlan, &edev->vlan_list, list)
842 if (vlan->vid == vid)
845 if (list_entry_is_head(vlan, &edev->vlan_list, list)) {
846 DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
847 "Vlan isn't configured\n");
851 if (edev->state != QEDE_STATE_OPEN) {
852 /* As interface is already down, we don't have a VPORT
853 * instance to remove vlan filter. So just update vlan list
855 DP_VERBOSE(edev, NETIF_MSG_IFDOWN,
856 "Interface is down, removing VLAN from list only\n");
857 qede_del_vlan_from_list(edev, vlan);
862 if (vlan->configured) {
863 rc = qede_set_ucast_rx_vlan(edev, QED_FILTER_XCAST_TYPE_DEL,
866 DP_ERR(edev, "Failed to remove VLAN %d\n", vid);
871 qede_del_vlan_from_list(edev, vlan);
873 /* We have removed a VLAN - try to see if we can
874 * configure non-configured VLAN from the list.
876 rc = qede_configure_vlan_filters(edev);
883 void qede_vlan_mark_nonconfigured(struct qede_dev *edev)
885 struct qede_vlan *vlan = NULL;
887 if (list_empty(&edev->vlan_list))
890 list_for_each_entry(vlan, &edev->vlan_list, list) {
891 if (!vlan->configured)
894 vlan->configured = false;
896 /* vlan0 filter isn't consuming out of our quota */
897 if (vlan->vid != 0) {
898 edev->non_configured_vlans++;
899 edev->configured_vlans--;
902 DP_VERBOSE(edev, NETIF_MSG_IFDOWN,
903 "marked vlan %d as non-configured\n", vlan->vid);
906 edev->accept_any_vlan = false;
909 static void qede_set_features_reload(struct qede_dev *edev,
910 struct qede_reload_args *args)
912 edev->ndev->features = args->u.features;
915 netdev_features_t qede_fix_features(struct net_device *dev,
916 netdev_features_t features)
918 struct qede_dev *edev = netdev_priv(dev);
920 if (edev->xdp_prog || edev->ndev->mtu > PAGE_SIZE ||
921 !(features & NETIF_F_GRO))
922 features &= ~NETIF_F_GRO_HW;
927 int qede_set_features(struct net_device *dev, netdev_features_t features)
929 struct qede_dev *edev = netdev_priv(dev);
930 netdev_features_t changes = features ^ dev->features;
931 bool need_reload = false;
933 if (changes & NETIF_F_GRO_HW)
937 struct qede_reload_args args;
939 args.u.features = features;
940 args.func = &qede_set_features_reload;
942 /* Make sure that we definitely need to reload.
943 * In case of an eBPF attached program, there will be no FW
944 * aggregations, so no need to actually reload.
948 args.func(edev, &args);
950 qede_reload(edev, &args, true);
959 static int qede_udp_tunnel_sync(struct net_device *dev, unsigned int table)
961 struct qede_dev *edev = netdev_priv(dev);
962 struct qed_tunn_params tunn_params;
963 struct udp_tunnel_info ti;
967 memset(&tunn_params, 0, sizeof(tunn_params));
969 udp_tunnel_nic_get_port(dev, table, 0, &ti);
970 if (ti.type == UDP_TUNNEL_TYPE_VXLAN) {
971 tunn_params.update_vxlan_port = 1;
972 tunn_params.vxlan_port = ntohs(ti.port);
973 save_port = &edev->vxlan_dst_port;
975 tunn_params.update_geneve_port = 1;
976 tunn_params.geneve_port = ntohs(ti.port);
977 save_port = &edev->geneve_dst_port;
981 rc = edev->ops->tunn_config(edev->cdev, &tunn_params);
986 *save_port = ntohs(ti.port);
990 static const struct udp_tunnel_nic_info qede_udp_tunnels_both = {
991 .sync_table = qede_udp_tunnel_sync,
992 .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP,
994 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
995 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
997 }, qede_udp_tunnels_vxlan = {
998 .sync_table = qede_udp_tunnel_sync,
999 .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP,
1001 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
1003 }, qede_udp_tunnels_geneve = {
1004 .sync_table = qede_udp_tunnel_sync,
1005 .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP,
1007 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
1011 void qede_set_udp_tunnels(struct qede_dev *edev)
1013 if (edev->dev_info.common.vxlan_enable &&
1014 edev->dev_info.common.geneve_enable)
1015 edev->ndev->udp_tunnel_nic_info = &qede_udp_tunnels_both;
1016 else if (edev->dev_info.common.vxlan_enable)
1017 edev->ndev->udp_tunnel_nic_info = &qede_udp_tunnels_vxlan;
1018 else if (edev->dev_info.common.geneve_enable)
1019 edev->ndev->udp_tunnel_nic_info = &qede_udp_tunnels_geneve;
1022 static void qede_xdp_reload_func(struct qede_dev *edev,
1023 struct qede_reload_args *args)
1025 struct bpf_prog *old;
1027 old = xchg(&edev->xdp_prog, args->u.new_prog);
1032 static int qede_xdp_set(struct qede_dev *edev, struct bpf_prog *prog)
1034 struct qede_reload_args args;
1036 /* If we're called, there was already a bpf reference increment */
1037 args.func = &qede_xdp_reload_func;
1038 args.u.new_prog = prog;
1039 qede_reload(edev, &args, false);
1044 int qede_xdp(struct net_device *dev, struct netdev_bpf *xdp)
1046 struct qede_dev *edev = netdev_priv(dev);
1048 switch (xdp->command) {
1049 case XDP_SETUP_PROG:
1050 return qede_xdp_set(edev, xdp->prog);
1056 static int qede_set_mcast_rx_mac(struct qede_dev *edev,
1057 enum qed_filter_xcast_params_type opcode,
1058 unsigned char *mac, int num_macs)
1060 struct qed_filter_params filter_cmd;
1063 memset(&filter_cmd, 0, sizeof(filter_cmd));
1064 filter_cmd.type = QED_FILTER_TYPE_MCAST;
1065 filter_cmd.filter.mcast.type = opcode;
1066 filter_cmd.filter.mcast.num = num_macs;
1068 for (i = 0; i < num_macs; i++, mac += ETH_ALEN)
1069 ether_addr_copy(filter_cmd.filter.mcast.mac[i], mac);
1071 return edev->ops->filter_config(edev->cdev, &filter_cmd);
1074 int qede_set_mac_addr(struct net_device *ndev, void *p)
1076 struct qede_dev *edev = netdev_priv(ndev);
1077 struct sockaddr *addr = p;
1080 /* Make sure the state doesn't transition while changing the MAC.
1081 * Also, all flows accessing the dev_addr field are doing that under
1086 if (!is_valid_ether_addr(addr->sa_data)) {
1087 DP_NOTICE(edev, "The MAC address is not valid\n");
1092 if (!edev->ops->check_mac(edev->cdev, addr->sa_data)) {
1093 DP_NOTICE(edev, "qed prevents setting MAC %pM\n",
1099 if (edev->state == QEDE_STATE_OPEN) {
1100 /* Remove the previous primary mac */
1101 rc = qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_DEL,
1107 ether_addr_copy(ndev->dev_addr, addr->sa_data);
1108 DP_INFO(edev, "Setting device MAC to %pM\n", addr->sa_data);
1110 if (edev->state != QEDE_STATE_OPEN) {
1111 DP_VERBOSE(edev, NETIF_MSG_IFDOWN,
1112 "The device is currently down\n");
1113 /* Ask PF to explicitly update a copy in bulletin board */
1114 if (IS_VF(edev) && edev->ops->req_bulletin_update_mac)
1115 edev->ops->req_bulletin_update_mac(edev->cdev,
1120 edev->ops->common->update_mac(edev->cdev, ndev->dev_addr);
1122 rc = qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_ADD,
1125 __qede_unlock(edev);
1130 qede_configure_mcast_filtering(struct net_device *ndev,
1131 enum qed_filter_rx_mode_type *accept_flags)
1133 struct qede_dev *edev = netdev_priv(ndev);
1134 unsigned char *mc_macs, *temp;
1135 struct netdev_hw_addr *ha;
1136 int rc = 0, mc_count;
1139 size = 64 * ETH_ALEN;
1141 mc_macs = kzalloc(size, GFP_KERNEL);
1144 "Failed to allocate memory for multicast MACs\n");
1151 /* Remove all previously configured MAC filters */
1152 rc = qede_set_mcast_rx_mac(edev, QED_FILTER_XCAST_TYPE_DEL,
1157 netif_addr_lock_bh(ndev);
1159 mc_count = netdev_mc_count(ndev);
1160 if (mc_count <= 64) {
1161 netdev_for_each_mc_addr(ha, ndev) {
1162 ether_addr_copy(temp, ha->addr);
1167 netif_addr_unlock_bh(ndev);
1169 /* Check for all multicast @@@TBD resource allocation */
1170 if ((ndev->flags & IFF_ALLMULTI) || (mc_count > 64)) {
1171 if (*accept_flags == QED_FILTER_RX_MODE_TYPE_REGULAR)
1172 *accept_flags = QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC;
1174 /* Add all multicast MAC filters */
1175 rc = qede_set_mcast_rx_mac(edev, QED_FILTER_XCAST_TYPE_ADD,
1184 void qede_set_rx_mode(struct net_device *ndev)
1186 struct qede_dev *edev = netdev_priv(ndev);
1188 set_bit(QEDE_SP_RX_MODE, &edev->sp_flags);
1189 schedule_delayed_work(&edev->sp_task, 0);
1192 /* Must be called with qede_lock held */
1193 void qede_config_rx_mode(struct net_device *ndev)
1195 enum qed_filter_rx_mode_type accept_flags;
1196 struct qede_dev *edev = netdev_priv(ndev);
1197 struct qed_filter_params rx_mode;
1198 unsigned char *uc_macs, *temp;
1199 struct netdev_hw_addr *ha;
1203 netif_addr_lock_bh(ndev);
1205 uc_count = netdev_uc_count(ndev);
1206 size = uc_count * ETH_ALEN;
1208 uc_macs = kzalloc(size, GFP_ATOMIC);
1210 DP_NOTICE(edev, "Failed to allocate memory for unicast MACs\n");
1211 netif_addr_unlock_bh(ndev);
1216 netdev_for_each_uc_addr(ha, ndev) {
1217 ether_addr_copy(temp, ha->addr);
1221 netif_addr_unlock_bh(ndev);
1223 /* Configure the struct for the Rx mode */
1224 memset(&rx_mode, 0, sizeof(struct qed_filter_params));
1225 rx_mode.type = QED_FILTER_TYPE_RX_MODE;
1227 /* Remove all previous unicast secondary macs and multicast macs
1228 * (configure / leave the primary mac)
1230 rc = qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_REPLACE,
1231 edev->ndev->dev_addr);
1235 /* Check for promiscuous */
1236 if (ndev->flags & IFF_PROMISC)
1237 accept_flags = QED_FILTER_RX_MODE_TYPE_PROMISC;
1239 accept_flags = QED_FILTER_RX_MODE_TYPE_REGULAR;
1241 /* Configure all filters regardless, in case promisc is rejected */
1242 if (uc_count < edev->dev_info.num_mac_filters) {
1246 for (i = 0; i < uc_count; i++) {
1247 rc = qede_set_ucast_rx_mac(edev,
1248 QED_FILTER_XCAST_TYPE_ADD,
1256 accept_flags = QED_FILTER_RX_MODE_TYPE_PROMISC;
1259 rc = qede_configure_mcast_filtering(ndev, &accept_flags);
1263 /* take care of VLAN mode */
1264 if (ndev->flags & IFF_PROMISC) {
1265 qede_config_accept_any_vlan(edev, true);
1266 } else if (!edev->non_configured_vlans) {
1267 /* It's possible that accept_any_vlan mode is set due to a
1268 * previous setting of IFF_PROMISC. If vlan credits are
1269 * sufficient, disable accept_any_vlan.
1271 qede_config_accept_any_vlan(edev, false);
1274 rx_mode.filter.accept_flags = accept_flags;
1275 edev->ops->filter_config(edev->cdev, &rx_mode);
1280 static struct qede_arfs_fltr_node *
1281 qede_get_arfs_fltr_by_loc(struct hlist_head *head, u64 location)
1283 struct qede_arfs_fltr_node *fltr;
1285 hlist_for_each_entry(fltr, head, node)
1286 if (location == fltr->sw_id)
1292 int qede_get_cls_rule_all(struct qede_dev *edev, struct ethtool_rxnfc *info,
1295 struct qede_arfs_fltr_node *fltr;
1296 struct hlist_head *head;
1297 int cnt = 0, rc = 0;
1299 info->data = QEDE_RFS_MAX_FLTR;
1308 head = QEDE_ARFS_BUCKET_HEAD(edev, 0);
1310 hlist_for_each_entry(fltr, head, node) {
1311 if (cnt == info->rule_cnt) {
1316 rule_locs[cnt] = fltr->sw_id;
1320 info->rule_cnt = cnt;
1323 __qede_unlock(edev);
1327 int qede_get_cls_rule_entry(struct qede_dev *edev, struct ethtool_rxnfc *cmd)
1329 struct ethtool_rx_flow_spec *fsp = &cmd->fs;
1330 struct qede_arfs_fltr_node *fltr = NULL;
1333 cmd->data = QEDE_RFS_MAX_FLTR;
1342 fltr = qede_get_arfs_fltr_by_loc(QEDE_ARFS_BUCKET_HEAD(edev, 0),
1345 DP_NOTICE(edev, "Rule not found - location=0x%x\n",
1351 if (fltr->tuple.eth_proto == htons(ETH_P_IP)) {
1352 if (fltr->tuple.ip_proto == IPPROTO_TCP)
1353 fsp->flow_type = TCP_V4_FLOW;
1355 fsp->flow_type = UDP_V4_FLOW;
1357 fsp->h_u.tcp_ip4_spec.psrc = fltr->tuple.src_port;
1358 fsp->h_u.tcp_ip4_spec.pdst = fltr->tuple.dst_port;
1359 fsp->h_u.tcp_ip4_spec.ip4src = fltr->tuple.src_ipv4;
1360 fsp->h_u.tcp_ip4_spec.ip4dst = fltr->tuple.dst_ipv4;
1362 if (fltr->tuple.ip_proto == IPPROTO_TCP)
1363 fsp->flow_type = TCP_V6_FLOW;
1365 fsp->flow_type = UDP_V6_FLOW;
1366 fsp->h_u.tcp_ip6_spec.psrc = fltr->tuple.src_port;
1367 fsp->h_u.tcp_ip6_spec.pdst = fltr->tuple.dst_port;
1368 memcpy(&fsp->h_u.tcp_ip6_spec.ip6src,
1369 &fltr->tuple.src_ipv6, sizeof(struct in6_addr));
1370 memcpy(&fsp->h_u.tcp_ip6_spec.ip6dst,
1371 &fltr->tuple.dst_ipv6, sizeof(struct in6_addr));
1374 fsp->ring_cookie = fltr->rxq_id;
1377 fsp->ring_cookie |= ((u64)fltr->vfid) <<
1378 ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
1381 if (fltr->b_is_drop)
1382 fsp->ring_cookie = RX_CLS_FLOW_DISC;
1384 __qede_unlock(edev);
1389 qede_poll_arfs_filter_config(struct qede_dev *edev,
1390 struct qede_arfs_fltr_node *fltr)
1392 int count = QEDE_ARFS_POLL_COUNT;
1394 while (fltr->used && count) {
1399 if (count == 0 || fltr->fw_rc) {
1400 DP_NOTICE(edev, "Timeout in polling filter config\n");
1401 qede_dequeue_fltr_and_config_searcher(edev, fltr);
1408 static int qede_flow_get_min_header_size(struct qede_arfs_tuple *t)
1410 int size = ETH_HLEN;
1412 if (t->eth_proto == htons(ETH_P_IP))
1413 size += sizeof(struct iphdr);
1415 size += sizeof(struct ipv6hdr);
1417 if (t->ip_proto == IPPROTO_TCP)
1418 size += sizeof(struct tcphdr);
1420 size += sizeof(struct udphdr);
1425 static bool qede_flow_spec_ipv4_cmp(struct qede_arfs_tuple *a,
1426 struct qede_arfs_tuple *b)
1428 if (a->eth_proto != htons(ETH_P_IP) ||
1429 b->eth_proto != htons(ETH_P_IP))
1432 return (a->src_ipv4 == b->src_ipv4) &&
1433 (a->dst_ipv4 == b->dst_ipv4);
1436 static void qede_flow_build_ipv4_hdr(struct qede_arfs_tuple *t,
1439 __be16 *ports = (__be16 *)(header + ETH_HLEN + sizeof(struct iphdr));
1440 struct iphdr *ip = (struct iphdr *)(header + ETH_HLEN);
1441 struct ethhdr *eth = (struct ethhdr *)header;
1443 eth->h_proto = t->eth_proto;
1444 ip->saddr = t->src_ipv4;
1445 ip->daddr = t->dst_ipv4;
1448 ip->protocol = t->ip_proto;
1449 ip->tot_len = cpu_to_be16(qede_flow_get_min_header_size(t) - ETH_HLEN);
1451 /* ports is weakly typed to suit both TCP and UDP ports */
1452 ports[0] = t->src_port;
1453 ports[1] = t->dst_port;
1456 static void qede_flow_stringify_ipv4_hdr(struct qede_arfs_tuple *t,
1459 const char *prefix = t->ip_proto == IPPROTO_TCP ? "TCP" : "UDP";
1461 snprintf(buffer, QEDE_FILTER_PRINT_MAX_LEN,
1462 "%s %pI4 (%04x) -> %pI4 (%04x)",
1463 prefix, &t->src_ipv4, t->src_port,
1464 &t->dst_ipv4, t->dst_port);
1467 static bool qede_flow_spec_ipv6_cmp(struct qede_arfs_tuple *a,
1468 struct qede_arfs_tuple *b)
1470 if (a->eth_proto != htons(ETH_P_IPV6) ||
1471 b->eth_proto != htons(ETH_P_IPV6))
1474 if (memcmp(&a->src_ipv6, &b->src_ipv6, sizeof(struct in6_addr)))
1477 if (memcmp(&a->dst_ipv6, &b->dst_ipv6, sizeof(struct in6_addr)))
1483 static void qede_flow_build_ipv6_hdr(struct qede_arfs_tuple *t,
1486 __be16 *ports = (__be16 *)(header + ETH_HLEN + sizeof(struct ipv6hdr));
1487 struct ipv6hdr *ip6 = (struct ipv6hdr *)(header + ETH_HLEN);
1488 struct ethhdr *eth = (struct ethhdr *)header;
1490 eth->h_proto = t->eth_proto;
1491 memcpy(&ip6->saddr, &t->src_ipv6, sizeof(struct in6_addr));
1492 memcpy(&ip6->daddr, &t->dst_ipv6, sizeof(struct in6_addr));
1495 if (t->ip_proto == IPPROTO_TCP) {
1496 ip6->nexthdr = NEXTHDR_TCP;
1497 ip6->payload_len = cpu_to_be16(sizeof(struct tcphdr));
1499 ip6->nexthdr = NEXTHDR_UDP;
1500 ip6->payload_len = cpu_to_be16(sizeof(struct udphdr));
1503 /* ports is weakly typed to suit both TCP and UDP ports */
1504 ports[0] = t->src_port;
1505 ports[1] = t->dst_port;
1508 /* Validate fields which are set and not accepted by the driver */
1509 static int qede_flow_spec_validate_unused(struct qede_dev *edev,
1510 struct ethtool_rx_flow_spec *fs)
1512 if (fs->flow_type & FLOW_MAC_EXT) {
1513 DP_INFO(edev, "Don't support MAC extensions\n");
1517 if ((fs->flow_type & FLOW_EXT) &&
1518 (fs->h_ext.vlan_etype || fs->h_ext.vlan_tci)) {
1519 DP_INFO(edev, "Don't support vlan-based classification\n");
1523 if ((fs->flow_type & FLOW_EXT) &&
1524 (fs->h_ext.data[0] || fs->h_ext.data[1])) {
1525 DP_INFO(edev, "Don't support user defined data\n");
1532 static int qede_set_v4_tuple_to_profile(struct qede_dev *edev,
1533 struct qede_arfs_tuple *t)
1535 /* We must have Only 4-tuples/l4 port/src ip/dst ip
1538 if (t->src_port && t->dst_port && t->src_ipv4 && t->dst_ipv4) {
1539 t->mode = QED_FILTER_CONFIG_MODE_5_TUPLE;
1540 } else if (!t->src_port && t->dst_port &&
1541 !t->src_ipv4 && !t->dst_ipv4) {
1542 t->mode = QED_FILTER_CONFIG_MODE_L4_PORT;
1543 } else if (!t->src_port && !t->dst_port &&
1544 !t->dst_ipv4 && t->src_ipv4) {
1545 t->mode = QED_FILTER_CONFIG_MODE_IP_SRC;
1546 } else if (!t->src_port && !t->dst_port &&
1547 t->dst_ipv4 && !t->src_ipv4) {
1548 t->mode = QED_FILTER_CONFIG_MODE_IP_DEST;
1550 DP_INFO(edev, "Invalid N-tuple\n");
1554 t->ip_comp = qede_flow_spec_ipv4_cmp;
1555 t->build_hdr = qede_flow_build_ipv4_hdr;
1556 t->stringify = qede_flow_stringify_ipv4_hdr;
1561 static int qede_set_v6_tuple_to_profile(struct qede_dev *edev,
1562 struct qede_arfs_tuple *t,
1563 struct in6_addr *zaddr)
1565 /* We must have Only 4-tuples/l4 port/src ip/dst ip
1568 if (t->src_port && t->dst_port &&
1569 memcmp(&t->src_ipv6, zaddr, sizeof(struct in6_addr)) &&
1570 memcmp(&t->dst_ipv6, zaddr, sizeof(struct in6_addr))) {
1571 t->mode = QED_FILTER_CONFIG_MODE_5_TUPLE;
1572 } else if (!t->src_port && t->dst_port &&
1573 !memcmp(&t->src_ipv6, zaddr, sizeof(struct in6_addr)) &&
1574 !memcmp(&t->dst_ipv6, zaddr, sizeof(struct in6_addr))) {
1575 t->mode = QED_FILTER_CONFIG_MODE_L4_PORT;
1576 } else if (!t->src_port && !t->dst_port &&
1577 !memcmp(&t->dst_ipv6, zaddr, sizeof(struct in6_addr)) &&
1578 memcmp(&t->src_ipv6, zaddr, sizeof(struct in6_addr))) {
1579 t->mode = QED_FILTER_CONFIG_MODE_IP_SRC;
1580 } else if (!t->src_port && !t->dst_port &&
1581 memcmp(&t->dst_ipv6, zaddr, sizeof(struct in6_addr)) &&
1582 !memcmp(&t->src_ipv6, zaddr, sizeof(struct in6_addr))) {
1583 t->mode = QED_FILTER_CONFIG_MODE_IP_DEST;
1585 DP_INFO(edev, "Invalid N-tuple\n");
1589 t->ip_comp = qede_flow_spec_ipv6_cmp;
1590 t->build_hdr = qede_flow_build_ipv6_hdr;
1595 /* Must be called while qede lock is held */
1596 static struct qede_arfs_fltr_node *
1597 qede_flow_find_fltr(struct qede_dev *edev, struct qede_arfs_tuple *t)
1599 struct qede_arfs_fltr_node *fltr;
1600 struct hlist_node *temp;
1601 struct hlist_head *head;
1603 head = QEDE_ARFS_BUCKET_HEAD(edev, 0);
1605 hlist_for_each_entry_safe(fltr, temp, head, node) {
1606 if (fltr->tuple.ip_proto == t->ip_proto &&
1607 fltr->tuple.src_port == t->src_port &&
1608 fltr->tuple.dst_port == t->dst_port &&
1609 t->ip_comp(&fltr->tuple, t))
1616 static void qede_flow_set_destination(struct qede_dev *edev,
1617 struct qede_arfs_fltr_node *n,
1618 struct ethtool_rx_flow_spec *fs)
1620 if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
1621 n->b_is_drop = true;
1625 n->vfid = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
1626 n->rxq_id = ethtool_get_flow_spec_ring(fs->ring_cookie);
1627 n->next_rxq_id = n->rxq_id;
1630 DP_VERBOSE(edev, QED_MSG_SP,
1631 "Configuring N-tuple for VF 0x%02x\n", n->vfid - 1);
1634 int qede_delete_flow_filter(struct qede_dev *edev, u64 cookie)
1636 struct qede_arfs_fltr_node *fltr = NULL;
1643 fltr = qede_get_arfs_fltr_by_loc(QEDE_ARFS_BUCKET_HEAD(edev, 0),
1648 qede_configure_arfs_fltr(edev, fltr, fltr->rxq_id, false);
1650 rc = qede_poll_arfs_filter_config(edev, fltr);
1652 qede_dequeue_fltr_and_config_searcher(edev, fltr);
1655 __qede_unlock(edev);
1659 int qede_get_arfs_filter_count(struct qede_dev *edev)
1668 count = edev->arfs->filter_count;
1671 __qede_unlock(edev);
1675 static int qede_parse_actions(struct qede_dev *edev,
1676 struct flow_action *flow_action,
1677 struct netlink_ext_ack *extack)
1679 const struct flow_action_entry *act;
1682 if (!flow_action_has_entries(flow_action)) {
1683 DP_NOTICE(edev, "No actions received\n");
1687 if (!flow_action_basic_hw_stats_check(flow_action, extack))
1690 flow_action_for_each(i, act, flow_action) {
1692 case FLOW_ACTION_DROP:
1694 case FLOW_ACTION_QUEUE:
1698 if (act->queue.index >= QEDE_RSS_COUNT(edev)) {
1699 DP_INFO(edev, "Queue out-of-bounds\n");
1712 qede_flow_parse_ports(struct qede_dev *edev, struct flow_rule *rule,
1713 struct qede_arfs_tuple *t)
1715 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
1716 struct flow_match_ports match;
1718 flow_rule_match_ports(rule, &match);
1719 if ((match.key->src && match.mask->src != htons(U16_MAX)) ||
1720 (match.key->dst && match.mask->dst != htons(U16_MAX))) {
1721 DP_NOTICE(edev, "Do not support ports masks\n");
1725 t->src_port = match.key->src;
1726 t->dst_port = match.key->dst;
1733 qede_flow_parse_v6_common(struct qede_dev *edev, struct flow_rule *rule,
1734 struct qede_arfs_tuple *t)
1736 struct in6_addr zero_addr, addr;
1738 memset(&zero_addr, 0, sizeof(addr));
1739 memset(&addr, 0xff, sizeof(addr));
1741 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
1742 struct flow_match_ipv6_addrs match;
1744 flow_rule_match_ipv6_addrs(rule, &match);
1745 if ((memcmp(&match.key->src, &zero_addr, sizeof(addr)) &&
1746 memcmp(&match.mask->src, &addr, sizeof(addr))) ||
1747 (memcmp(&match.key->dst, &zero_addr, sizeof(addr)) &&
1748 memcmp(&match.mask->dst, &addr, sizeof(addr)))) {
1750 "Do not support IPv6 address prefix/mask\n");
1754 memcpy(&t->src_ipv6, &match.key->src, sizeof(addr));
1755 memcpy(&t->dst_ipv6, &match.key->dst, sizeof(addr));
1758 if (qede_flow_parse_ports(edev, rule, t))
1761 return qede_set_v6_tuple_to_profile(edev, t, &zero_addr);
1765 qede_flow_parse_v4_common(struct qede_dev *edev, struct flow_rule *rule,
1766 struct qede_arfs_tuple *t)
1768 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
1769 struct flow_match_ipv4_addrs match;
1771 flow_rule_match_ipv4_addrs(rule, &match);
1772 if ((match.key->src && match.mask->src != htonl(U32_MAX)) ||
1773 (match.key->dst && match.mask->dst != htonl(U32_MAX))) {
1774 DP_NOTICE(edev, "Do not support ipv4 prefix/masks\n");
1778 t->src_ipv4 = match.key->src;
1779 t->dst_ipv4 = match.key->dst;
1782 if (qede_flow_parse_ports(edev, rule, t))
1785 return qede_set_v4_tuple_to_profile(edev, t);
1789 qede_flow_parse_tcp_v6(struct qede_dev *edev, struct flow_rule *rule,
1790 struct qede_arfs_tuple *tuple)
1792 tuple->ip_proto = IPPROTO_TCP;
1793 tuple->eth_proto = htons(ETH_P_IPV6);
1795 return qede_flow_parse_v6_common(edev, rule, tuple);
1799 qede_flow_parse_tcp_v4(struct qede_dev *edev, struct flow_rule *rule,
1800 struct qede_arfs_tuple *tuple)
1802 tuple->ip_proto = IPPROTO_TCP;
1803 tuple->eth_proto = htons(ETH_P_IP);
1805 return qede_flow_parse_v4_common(edev, rule, tuple);
1809 qede_flow_parse_udp_v6(struct qede_dev *edev, struct flow_rule *rule,
1810 struct qede_arfs_tuple *tuple)
1812 tuple->ip_proto = IPPROTO_UDP;
1813 tuple->eth_proto = htons(ETH_P_IPV6);
1815 return qede_flow_parse_v6_common(edev, rule, tuple);
1819 qede_flow_parse_udp_v4(struct qede_dev *edev, struct flow_rule *rule,
1820 struct qede_arfs_tuple *tuple)
1822 tuple->ip_proto = IPPROTO_UDP;
1823 tuple->eth_proto = htons(ETH_P_IP);
1825 return qede_flow_parse_v4_common(edev, rule, tuple);
1829 qede_parse_flow_attr(struct qede_dev *edev, __be16 proto,
1830 struct flow_rule *rule, struct qede_arfs_tuple *tuple)
1832 struct flow_dissector *dissector = rule->match.dissector;
1836 memset(tuple, 0, sizeof(*tuple));
1838 if (dissector->used_keys &
1839 ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
1840 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
1841 BIT(FLOW_DISSECTOR_KEY_BASIC) |
1842 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
1843 BIT(FLOW_DISSECTOR_KEY_PORTS))) {
1844 DP_NOTICE(edev, "Unsupported key set:0x%x\n",
1845 dissector->used_keys);
1849 if (proto != htons(ETH_P_IP) &&
1850 proto != htons(ETH_P_IPV6)) {
1851 DP_NOTICE(edev, "Unsupported proto=0x%x\n", proto);
1852 return -EPROTONOSUPPORT;
1855 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
1856 struct flow_match_basic match;
1858 flow_rule_match_basic(rule, &match);
1859 ip_proto = match.key->ip_proto;
1862 if (ip_proto == IPPROTO_TCP && proto == htons(ETH_P_IP))
1863 rc = qede_flow_parse_tcp_v4(edev, rule, tuple);
1864 else if (ip_proto == IPPROTO_TCP && proto == htons(ETH_P_IPV6))
1865 rc = qede_flow_parse_tcp_v6(edev, rule, tuple);
1866 else if (ip_proto == IPPROTO_UDP && proto == htons(ETH_P_IP))
1867 rc = qede_flow_parse_udp_v4(edev, rule, tuple);
1868 else if (ip_proto == IPPROTO_UDP && proto == htons(ETH_P_IPV6))
1869 rc = qede_flow_parse_udp_v6(edev, rule, tuple);
1871 DP_NOTICE(edev, "Invalid protocol request\n");
1876 int qede_add_tc_flower_fltr(struct qede_dev *edev, __be16 proto,
1877 struct flow_cls_offload *f)
1879 struct qede_arfs_fltr_node *n;
1880 int min_hlen, rc = -EINVAL;
1881 struct qede_arfs_tuple t;
1890 /* parse flower attribute and prepare filter */
1891 if (qede_parse_flow_attr(edev, proto, f->rule, &t))
1894 /* Validate profile mode and number of filters */
1895 if ((edev->arfs->filter_count && edev->arfs->mode != t.mode) ||
1896 edev->arfs->filter_count == QEDE_RFS_MAX_FLTR) {
1898 "Filter configuration invalidated, filter mode=0x%x, configured mode=0x%x, filter count=0x%x\n",
1899 t.mode, edev->arfs->mode, edev->arfs->filter_count);
1903 /* parse tc actions and get the vf_id */
1904 if (qede_parse_actions(edev, &f->rule->action, f->common.extack))
1907 if (qede_flow_find_fltr(edev, &t)) {
1912 n = kzalloc(sizeof(*n), GFP_KERNEL);
1918 min_hlen = qede_flow_get_min_header_size(&t);
1920 n->data = kzalloc(min_hlen, GFP_KERNEL);
1927 memcpy(&n->tuple, &t, sizeof(n->tuple));
1929 n->buf_len = min_hlen;
1930 n->b_is_drop = true;
1931 n->sw_id = f->cookie;
1933 n->tuple.build_hdr(&n->tuple, n->data);
1935 rc = qede_enqueue_fltr_and_config_searcher(edev, n, 0);
1939 qede_configure_arfs_fltr(edev, n, n->rxq_id, true);
1940 rc = qede_poll_arfs_filter_config(edev, n);
1943 __qede_unlock(edev);
1947 static int qede_flow_spec_validate(struct qede_dev *edev,
1948 struct flow_action *flow_action,
1949 struct qede_arfs_tuple *t,
1952 if (location >= QEDE_RFS_MAX_FLTR) {
1953 DP_INFO(edev, "Location out-of-bounds\n");
1957 /* Check location isn't already in use */
1958 if (test_bit(location, edev->arfs->arfs_fltr_bmap)) {
1959 DP_INFO(edev, "Location already in use\n");
1963 /* Check if the filtering-mode could support the filter */
1964 if (edev->arfs->filter_count &&
1965 edev->arfs->mode != t->mode) {
1967 "flow_spec would require filtering mode %08x, but %08x is configured\n",
1968 t->mode, edev->arfs->filter_count);
1972 if (qede_parse_actions(edev, flow_action, NULL))
1978 static int qede_flow_spec_to_rule(struct qede_dev *edev,
1979 struct qede_arfs_tuple *t,
1980 struct ethtool_rx_flow_spec *fs)
1982 struct ethtool_rx_flow_spec_input input = {};
1983 struct ethtool_rx_flow_rule *flow;
1987 if (qede_flow_spec_validate_unused(edev, fs))
1990 switch ((fs->flow_type & ~FLOW_EXT)) {
1993 proto = htons(ETH_P_IP);
1997 proto = htons(ETH_P_IPV6);
2000 DP_VERBOSE(edev, NETIF_MSG_IFUP,
2001 "Can't support flow of type %08x\n", fs->flow_type);
2006 flow = ethtool_rx_flow_rule_create(&input);
2008 return PTR_ERR(flow);
2010 if (qede_parse_flow_attr(edev, proto, flow->rule, t)) {
2015 /* Make sure location is valid and filter isn't already set */
2016 err = qede_flow_spec_validate(edev, &flow->rule->action, t,
2019 ethtool_rx_flow_rule_destroy(flow);
2024 int qede_add_cls_rule(struct qede_dev *edev, struct ethtool_rxnfc *info)
2026 struct ethtool_rx_flow_spec *fsp = &info->fs;
2027 struct qede_arfs_fltr_node *n;
2028 struct qede_arfs_tuple t;
2038 /* Translate the flow specification into something fittign our DB */
2039 rc = qede_flow_spec_to_rule(edev, &t, fsp);
2043 if (qede_flow_find_fltr(edev, &t)) {
2048 n = kzalloc(sizeof(*n), GFP_KERNEL);
2054 min_hlen = qede_flow_get_min_header_size(&t);
2055 n->data = kzalloc(min_hlen, GFP_KERNEL);
2062 n->sw_id = fsp->location;
2063 set_bit(n->sw_id, edev->arfs->arfs_fltr_bmap);
2064 n->buf_len = min_hlen;
2066 memcpy(&n->tuple, &t, sizeof(n->tuple));
2068 qede_flow_set_destination(edev, n, fsp);
2070 /* Build a minimal header according to the flow */
2071 n->tuple.build_hdr(&n->tuple, n->data);
2073 rc = qede_enqueue_fltr_and_config_searcher(edev, n, 0);
2077 qede_configure_arfs_fltr(edev, n, n->rxq_id, true);
2078 rc = qede_poll_arfs_filter_config(edev, n);
2080 __qede_unlock(edev);