1 /* QLogic qed NIC Driver
2 * Copyright (c) 2015-2017 QLogic Corporation
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/types.h>
34 #include <asm/byteorder.h>
35 #include <asm/param.h>
36 #include <linux/delay.h>
37 #include <linux/dma-mapping.h>
38 #include <linux/etherdevice.h>
39 #include <linux/interrupt.h>
40 #include <linux/kernel.h>
41 #include <linux/module.h>
42 #include <linux/pci.h>
43 #include <linux/slab.h>
44 #include <linux/stddef.h>
45 #include <linux/string.h>
46 #include <linux/workqueue.h>
47 #include <linux/bitops.h>
48 #include <linux/bug.h>
49 #include <linux/vmalloc.h>
51 #include <linux/qed/qed_chain.h>
53 #include "qed_dev_api.h"
54 #include <linux/qed/qed_eth_if.h>
60 #include "qed_reg_addr.h"
62 #include "qed_sriov.h"
65 #define QED_MAX_SGES_NUM 16
66 #define CRC32_POLY 0x1edc6f41
70 unsigned long **pp_qid_usage;
72 /* The lock is meant to synchronize access to the qid usage */
76 int qed_l2_alloc(struct qed_hwfn *p_hwfn)
78 struct qed_l2_info *p_l2_info;
79 unsigned long **pp_qids;
82 if (!QED_IS_L2_PERSONALITY(p_hwfn))
85 p_l2_info = kzalloc(sizeof(*p_l2_info), GFP_KERNEL);
88 p_hwfn->p_l2_info = p_l2_info;
90 if (IS_PF(p_hwfn->cdev)) {
91 p_l2_info->queues = RESC_NUM(p_hwfn, QED_L2_QUEUE);
95 qed_vf_get_num_rxqs(p_hwfn, &rx);
96 qed_vf_get_num_txqs(p_hwfn, &tx);
98 p_l2_info->queues = max_t(u8, rx, tx);
101 pp_qids = kzalloc(sizeof(unsigned long *) * p_l2_info->queues,
105 p_l2_info->pp_qid_usage = pp_qids;
107 for (i = 0; i < p_l2_info->queues; i++) {
108 pp_qids[i] = kzalloc(MAX_QUEUES_PER_QZONE / 8, GFP_KERNEL);
116 void qed_l2_setup(struct qed_hwfn *p_hwfn)
118 if (!QED_IS_L2_PERSONALITY(p_hwfn))
121 mutex_init(&p_hwfn->p_l2_info->lock);
124 void qed_l2_free(struct qed_hwfn *p_hwfn)
128 if (!QED_IS_L2_PERSONALITY(p_hwfn))
131 if (!p_hwfn->p_l2_info)
134 if (!p_hwfn->p_l2_info->pp_qid_usage)
137 /* Free until hit first uninitialized entry */
138 for (i = 0; i < p_hwfn->p_l2_info->queues; i++) {
139 if (!p_hwfn->p_l2_info->pp_qid_usage[i])
141 kfree(p_hwfn->p_l2_info->pp_qid_usage[i]);
144 kfree(p_hwfn->p_l2_info->pp_qid_usage);
147 kfree(p_hwfn->p_l2_info);
148 p_hwfn->p_l2_info = NULL;
151 static bool qed_eth_queue_qid_usage_add(struct qed_hwfn *p_hwfn,
152 struct qed_queue_cid *p_cid)
154 struct qed_l2_info *p_l2_info = p_hwfn->p_l2_info;
155 u16 queue_id = p_cid->rel.queue_id;
159 mutex_lock(&p_l2_info->lock);
161 if (queue_id >= p_l2_info->queues) {
163 "Requested to increase usage for qzone %04x out of %08x\n",
164 queue_id, p_l2_info->queues);
169 first = (u8)find_first_zero_bit(p_l2_info->pp_qid_usage[queue_id],
170 MAX_QUEUES_PER_QZONE);
171 if (first >= MAX_QUEUES_PER_QZONE) {
176 __set_bit(first, p_l2_info->pp_qid_usage[queue_id]);
177 p_cid->qid_usage_idx = first;
180 mutex_unlock(&p_l2_info->lock);
184 static void qed_eth_queue_qid_usage_del(struct qed_hwfn *p_hwfn,
185 struct qed_queue_cid *p_cid)
187 mutex_lock(&p_hwfn->p_l2_info->lock);
189 clear_bit(p_cid->qid_usage_idx,
190 p_hwfn->p_l2_info->pp_qid_usage[p_cid->rel.queue_id]);
192 mutex_unlock(&p_hwfn->p_l2_info->lock);
195 void qed_eth_queue_cid_release(struct qed_hwfn *p_hwfn,
196 struct qed_queue_cid *p_cid)
198 bool b_legacy_vf = !!(p_cid->vf_legacy & QED_QCID_LEGACY_VF_CID);
200 if (IS_PF(p_hwfn->cdev) && !b_legacy_vf)
201 _qed_cxt_release_cid(p_hwfn, p_cid->cid, p_cid->vfid);
203 /* For PF's VFs we maintain the index inside queue-zone in IOV */
204 if (p_cid->vfid == QED_QUEUE_CID_SELF)
205 qed_eth_queue_qid_usage_del(p_hwfn, p_cid);
210 /* The internal is only meant to be directly called by PFs initializeing CIDs
213 static struct qed_queue_cid *
214 _qed_eth_queue_to_cid(struct qed_hwfn *p_hwfn,
217 struct qed_queue_start_common_params *p_params,
219 struct qed_queue_cid_vf_params *p_vf_params)
221 struct qed_queue_cid *p_cid;
224 p_cid = vmalloc(sizeof(*p_cid));
227 memset(p_cid, 0, sizeof(*p_cid));
229 p_cid->opaque_fid = opaque_fid;
231 p_cid->p_owner = p_hwfn;
233 /* Fill in parameters */
234 p_cid->rel.vport_id = p_params->vport_id;
235 p_cid->rel.queue_id = p_params->queue_id;
236 p_cid->rel.stats_id = p_params->stats_id;
237 p_cid->sb_igu_id = p_params->p_sb->igu_sb_id;
238 p_cid->b_is_rx = b_is_rx;
239 p_cid->sb_idx = p_params->sb_idx;
241 /* Fill-in bits related to VFs' queues if information was provided */
243 p_cid->vfid = p_vf_params->vfid;
244 p_cid->vf_qid = p_vf_params->vf_qid;
245 p_cid->vf_legacy = p_vf_params->vf_legacy;
247 p_cid->vfid = QED_QUEUE_CID_SELF;
250 /* Don't try calculating the absolute indices for VFs */
251 if (IS_VF(p_hwfn->cdev)) {
252 p_cid->abs = p_cid->rel;
256 /* Calculate the engine-absolute indices of the resources.
257 * This would guarantee they're valid later on.
258 * In some cases [SBs] we already have the right values.
260 rc = qed_fw_vport(p_hwfn, p_cid->rel.vport_id, &p_cid->abs.vport_id);
264 rc = qed_fw_l2_queue(p_hwfn, p_cid->rel.queue_id, &p_cid->abs.queue_id);
268 /* In case of a PF configuring its VF's queues, the stats-id is already
269 * absolute [since there's a single index that's suitable per-VF].
271 if (p_cid->vfid == QED_QUEUE_CID_SELF) {
272 rc = qed_fw_vport(p_hwfn, p_cid->rel.stats_id,
273 &p_cid->abs.stats_id);
277 p_cid->abs.stats_id = p_cid->rel.stats_id;
281 /* VF-images have provided the qid_usage_idx on their own.
282 * Otherwise, we need to allocate a unique one.
285 if (!qed_eth_queue_qid_usage_add(p_hwfn, p_cid))
288 p_cid->qid_usage_idx = p_vf_params->qid_usage_idx;
293 "opaque_fid: %04x CID %08x vport %02x [%02x] qzone %04x.%02x [%04x] stats %02x [%02x] SB %04x PI %02x\n",
299 p_cid->qid_usage_idx,
302 p_cid->abs.stats_id, p_cid->sb_igu_id, p_cid->sb_idx);
311 struct qed_queue_cid *
312 qed_eth_queue_to_cid(struct qed_hwfn *p_hwfn,
314 struct qed_queue_start_common_params *p_params,
316 struct qed_queue_cid_vf_params *p_vf_params)
318 struct qed_queue_cid *p_cid;
319 u8 vfid = QED_CXT_PF_CID;
320 bool b_legacy_vf = false;
323 /* In case of legacy VFs, The CID can be derived from the additional
324 * VF parameters - the VF assumes queue X uses CID X, so we can simply
325 * use the vf_qid for this purpose as well.
328 vfid = p_vf_params->vfid;
330 if (p_vf_params->vf_legacy & QED_QCID_LEGACY_VF_CID) {
332 cid = p_vf_params->vf_qid;
336 /* Get a unique firmware CID for this queue, in case it's a PF.
337 * VF's don't need a CID as the queue configuration will be done
340 if (IS_PF(p_hwfn->cdev) && !b_legacy_vf) {
341 if (_qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH,
343 DP_NOTICE(p_hwfn, "Failed to acquire cid\n");
348 p_cid = _qed_eth_queue_to_cid(p_hwfn, opaque_fid, cid,
349 p_params, b_is_rx, p_vf_params);
350 if (!p_cid && IS_PF(p_hwfn->cdev) && !b_legacy_vf)
351 _qed_cxt_release_cid(p_hwfn, cid, vfid);
356 static struct qed_queue_cid *
357 qed_eth_queue_to_cid_pf(struct qed_hwfn *p_hwfn,
360 struct qed_queue_start_common_params *p_params)
362 return qed_eth_queue_to_cid(p_hwfn, opaque_fid, p_params, b_is_rx,
366 int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn,
367 struct qed_sp_vport_start_params *p_params)
369 struct vport_start_ramrod_data *p_ramrod = NULL;
370 struct qed_spq_entry *p_ent = NULL;
371 struct qed_sp_init_data init_data;
376 rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
380 memset(&init_data, 0, sizeof(init_data));
381 init_data.cid = qed_spq_get_cid(p_hwfn);
382 init_data.opaque_fid = p_params->opaque_fid;
383 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
385 rc = qed_sp_init_request(p_hwfn, &p_ent,
386 ETH_RAMROD_VPORT_START,
387 PROTOCOLID_ETH, &init_data);
391 p_ramrod = &p_ent->ramrod.vport_start;
392 p_ramrod->vport_id = abs_vport_id;
394 p_ramrod->mtu = cpu_to_le16(p_params->mtu);
395 p_ramrod->handle_ptp_pkts = p_params->handle_ptp_pkts;
396 p_ramrod->inner_vlan_removal_en = p_params->remove_inner_vlan;
397 p_ramrod->drop_ttl0_en = p_params->drop_ttl0;
398 p_ramrod->untagged = p_params->only_untagged;
400 SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_UCAST_DROP_ALL, 1);
401 SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_MCAST_DROP_ALL, 1);
403 p_ramrod->rx_mode.state = cpu_to_le16(rx_mode);
405 /* TPA related fields */
406 memset(&p_ramrod->tpa_param, 0, sizeof(struct eth_vport_tpa_param));
408 p_ramrod->tpa_param.max_buff_num = p_params->max_buffers_per_cqe;
410 switch (p_params->tpa_mode) {
411 case QED_TPA_MODE_GRO:
412 p_ramrod->tpa_param.tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM;
413 p_ramrod->tpa_param.tpa_max_size = (u16)-1;
414 p_ramrod->tpa_param.tpa_min_size_to_cont = p_params->mtu / 2;
415 p_ramrod->tpa_param.tpa_min_size_to_start = p_params->mtu / 2;
416 p_ramrod->tpa_param.tpa_ipv4_en_flg = 1;
417 p_ramrod->tpa_param.tpa_ipv6_en_flg = 1;
418 p_ramrod->tpa_param.tpa_pkt_split_flg = 1;
419 p_ramrod->tpa_param.tpa_gro_consistent_flg = 1;
425 p_ramrod->tx_switching_en = p_params->tx_switching;
427 p_ramrod->ctl_frame_mac_check_en = !!p_params->check_mac;
428 p_ramrod->ctl_frame_ethtype_check_en = !!p_params->check_ethtype;
430 /* Software Function ID in hwfn (PFs are 0 - 15, VFs are 16 - 135) */
431 p_ramrod->sw_fid = qed_concrete_to_sw_fid(p_hwfn->cdev,
432 p_params->concrete_fid);
434 return qed_spq_post(p_hwfn, p_ent, NULL);
437 static int qed_sp_vport_start(struct qed_hwfn *p_hwfn,
438 struct qed_sp_vport_start_params *p_params)
440 if (IS_VF(p_hwfn->cdev)) {
441 return qed_vf_pf_vport_start(p_hwfn, p_params->vport_id,
443 p_params->remove_inner_vlan,
445 p_params->max_buffers_per_cqe,
446 p_params->only_untagged);
449 return qed_sp_eth_vport_start(p_hwfn, p_params);
453 qed_sp_vport_update_rss(struct qed_hwfn *p_hwfn,
454 struct vport_update_ramrod_data *p_ramrod,
455 struct qed_rss_params *p_rss)
457 struct eth_vport_rss_config *p_config;
458 u16 capabilities = 0;
463 p_ramrod->common.update_rss_flg = 0;
466 p_config = &p_ramrod->rss_config;
468 BUILD_BUG_ON(QED_RSS_IND_TABLE_SIZE != ETH_RSS_IND_TABLE_ENTRIES_NUM);
470 rc = qed_fw_rss_eng(p_hwfn, p_rss->rss_eng_id, &p_config->rss_id);
474 p_ramrod->common.update_rss_flg = p_rss->update_rss_config;
475 p_config->update_rss_capabilities = p_rss->update_rss_capabilities;
476 p_config->update_rss_ind_table = p_rss->update_rss_ind_table;
477 p_config->update_rss_key = p_rss->update_rss_key;
479 p_config->rss_mode = p_rss->rss_enable ?
480 ETH_VPORT_RSS_MODE_REGULAR :
481 ETH_VPORT_RSS_MODE_DISABLED;
483 SET_FIELD(capabilities,
484 ETH_VPORT_RSS_CONFIG_IPV4_CAPABILITY,
485 !!(p_rss->rss_caps & QED_RSS_IPV4));
486 SET_FIELD(capabilities,
487 ETH_VPORT_RSS_CONFIG_IPV6_CAPABILITY,
488 !!(p_rss->rss_caps & QED_RSS_IPV6));
489 SET_FIELD(capabilities,
490 ETH_VPORT_RSS_CONFIG_IPV4_TCP_CAPABILITY,
491 !!(p_rss->rss_caps & QED_RSS_IPV4_TCP));
492 SET_FIELD(capabilities,
493 ETH_VPORT_RSS_CONFIG_IPV6_TCP_CAPABILITY,
494 !!(p_rss->rss_caps & QED_RSS_IPV6_TCP));
495 SET_FIELD(capabilities,
496 ETH_VPORT_RSS_CONFIG_IPV4_UDP_CAPABILITY,
497 !!(p_rss->rss_caps & QED_RSS_IPV4_UDP));
498 SET_FIELD(capabilities,
499 ETH_VPORT_RSS_CONFIG_IPV6_UDP_CAPABILITY,
500 !!(p_rss->rss_caps & QED_RSS_IPV6_UDP));
501 p_config->tbl_size = p_rss->rss_table_size_log;
503 p_config->capabilities = cpu_to_le16(capabilities);
505 DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP,
506 "update rss flag %d, rss_mode = %d, update_caps = %d, capabilities = %d, update_ind = %d, update_rss_key = %d\n",
507 p_ramrod->common.update_rss_flg,
509 p_config->update_rss_capabilities,
510 p_config->capabilities,
511 p_config->update_rss_ind_table, p_config->update_rss_key);
513 table_size = min_t(int, QED_RSS_IND_TABLE_SIZE,
514 1 << p_config->tbl_size);
515 for (i = 0; i < table_size; i++) {
516 struct qed_queue_cid *p_queue = p_rss->rss_ind_table[i];
521 p_config->indirection_table[i] =
522 cpu_to_le16(p_queue->abs.queue_id);
525 DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP,
526 "Configured RSS indirection table [%d entries]:\n",
528 for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i += 0x10) {
531 "%04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x\n",
532 le16_to_cpu(p_config->indirection_table[i]),
533 le16_to_cpu(p_config->indirection_table[i + 1]),
534 le16_to_cpu(p_config->indirection_table[i + 2]),
535 le16_to_cpu(p_config->indirection_table[i + 3]),
536 le16_to_cpu(p_config->indirection_table[i + 4]),
537 le16_to_cpu(p_config->indirection_table[i + 5]),
538 le16_to_cpu(p_config->indirection_table[i + 6]),
539 le16_to_cpu(p_config->indirection_table[i + 7]),
540 le16_to_cpu(p_config->indirection_table[i + 8]),
541 le16_to_cpu(p_config->indirection_table[i + 9]),
542 le16_to_cpu(p_config->indirection_table[i + 10]),
543 le16_to_cpu(p_config->indirection_table[i + 11]),
544 le16_to_cpu(p_config->indirection_table[i + 12]),
545 le16_to_cpu(p_config->indirection_table[i + 13]),
546 le16_to_cpu(p_config->indirection_table[i + 14]),
547 le16_to_cpu(p_config->indirection_table[i + 15]));
550 for (i = 0; i < 10; i++)
551 p_config->rss_key[i] = cpu_to_le32(p_rss->rss_key[i]);
557 qed_sp_update_accept_mode(struct qed_hwfn *p_hwfn,
558 struct vport_update_ramrod_data *p_ramrod,
559 struct qed_filter_accept_flags accept_flags)
561 p_ramrod->common.update_rx_mode_flg =
562 accept_flags.update_rx_mode_config;
564 p_ramrod->common.update_tx_mode_flg =
565 accept_flags.update_tx_mode_config;
567 /* Set Rx mode accept flags */
568 if (p_ramrod->common.update_rx_mode_flg) {
569 u8 accept_filter = accept_flags.rx_accept_filter;
572 SET_FIELD(state, ETH_VPORT_RX_MODE_UCAST_DROP_ALL,
573 !(!!(accept_filter & QED_ACCEPT_UCAST_MATCHED) ||
574 !!(accept_filter & QED_ACCEPT_UCAST_UNMATCHED)));
576 SET_FIELD(state, ETH_VPORT_RX_MODE_UCAST_ACCEPT_UNMATCHED,
577 !!(accept_filter & QED_ACCEPT_UCAST_UNMATCHED));
579 SET_FIELD(state, ETH_VPORT_RX_MODE_MCAST_DROP_ALL,
580 !(!!(accept_filter & QED_ACCEPT_MCAST_MATCHED) ||
581 !!(accept_filter & QED_ACCEPT_MCAST_UNMATCHED)));
583 SET_FIELD(state, ETH_VPORT_RX_MODE_MCAST_ACCEPT_ALL,
584 (!!(accept_filter & QED_ACCEPT_MCAST_MATCHED) &&
585 !!(accept_filter & QED_ACCEPT_MCAST_UNMATCHED)));
587 SET_FIELD(state, ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL,
588 !!(accept_filter & QED_ACCEPT_BCAST));
590 p_ramrod->rx_mode.state = cpu_to_le16(state);
591 DP_VERBOSE(p_hwfn, QED_MSG_SP,
592 "p_ramrod->rx_mode.state = 0x%x\n", state);
595 /* Set Tx mode accept flags */
596 if (p_ramrod->common.update_tx_mode_flg) {
597 u8 accept_filter = accept_flags.tx_accept_filter;
600 SET_FIELD(state, ETH_VPORT_TX_MODE_UCAST_DROP_ALL,
601 !!(accept_filter & QED_ACCEPT_NONE));
603 SET_FIELD(state, ETH_VPORT_TX_MODE_MCAST_DROP_ALL,
604 !!(accept_filter & QED_ACCEPT_NONE));
606 SET_FIELD(state, ETH_VPORT_TX_MODE_MCAST_ACCEPT_ALL,
607 (!!(accept_filter & QED_ACCEPT_MCAST_MATCHED) &&
608 !!(accept_filter & QED_ACCEPT_MCAST_UNMATCHED)));
610 SET_FIELD(state, ETH_VPORT_TX_MODE_UCAST_ACCEPT_ALL,
611 (!!(accept_filter & QED_ACCEPT_UCAST_MATCHED) &&
612 !!(accept_filter & QED_ACCEPT_UCAST_UNMATCHED)));
614 SET_FIELD(state, ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL,
615 !!(accept_filter & QED_ACCEPT_BCAST));
617 p_ramrod->tx_mode.state = cpu_to_le16(state);
618 DP_VERBOSE(p_hwfn, QED_MSG_SP,
619 "p_ramrod->tx_mode.state = 0x%x\n", state);
624 qed_sp_vport_update_sge_tpa(struct qed_hwfn *p_hwfn,
625 struct vport_update_ramrod_data *p_ramrod,
626 struct qed_sge_tpa_params *p_params)
628 struct eth_vport_tpa_param *p_tpa;
631 p_ramrod->common.update_tpa_param_flg = 0;
632 p_ramrod->common.update_tpa_en_flg = 0;
633 p_ramrod->common.update_tpa_param_flg = 0;
637 p_ramrod->common.update_tpa_en_flg = p_params->update_tpa_en_flg;
638 p_tpa = &p_ramrod->tpa_param;
639 p_tpa->tpa_ipv4_en_flg = p_params->tpa_ipv4_en_flg;
640 p_tpa->tpa_ipv6_en_flg = p_params->tpa_ipv6_en_flg;
641 p_tpa->tpa_ipv4_tunn_en_flg = p_params->tpa_ipv4_tunn_en_flg;
642 p_tpa->tpa_ipv6_tunn_en_flg = p_params->tpa_ipv6_tunn_en_flg;
644 p_ramrod->common.update_tpa_param_flg = p_params->update_tpa_param_flg;
645 p_tpa->max_buff_num = p_params->max_buffers_per_cqe;
646 p_tpa->tpa_pkt_split_flg = p_params->tpa_pkt_split_flg;
647 p_tpa->tpa_hdr_data_split_flg = p_params->tpa_hdr_data_split_flg;
648 p_tpa->tpa_gro_consistent_flg = p_params->tpa_gro_consistent_flg;
649 p_tpa->tpa_max_aggs_num = p_params->tpa_max_aggs_num;
650 p_tpa->tpa_max_size = p_params->tpa_max_size;
651 p_tpa->tpa_min_size_to_start = p_params->tpa_min_size_to_start;
652 p_tpa->tpa_min_size_to_cont = p_params->tpa_min_size_to_cont;
656 qed_sp_update_mcast_bin(struct qed_hwfn *p_hwfn,
657 struct vport_update_ramrod_data *p_ramrod,
658 struct qed_sp_vport_update_params *p_params)
662 memset(&p_ramrod->approx_mcast.bins, 0,
663 sizeof(p_ramrod->approx_mcast.bins));
665 if (!p_params->update_approx_mcast_flg)
668 p_ramrod->common.update_approx_mcast_flg = 1;
669 for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) {
670 u32 *p_bins = p_params->bins;
672 p_ramrod->approx_mcast.bins[i] = cpu_to_le32(p_bins[i]);
676 int qed_sp_vport_update(struct qed_hwfn *p_hwfn,
677 struct qed_sp_vport_update_params *p_params,
678 enum spq_mode comp_mode,
679 struct qed_spq_comp_cb *p_comp_data)
681 struct qed_rss_params *p_rss_params = p_params->rss_params;
682 struct vport_update_ramrod_data_cmn *p_cmn;
683 struct qed_sp_init_data init_data;
684 struct vport_update_ramrod_data *p_ramrod = NULL;
685 struct qed_spq_entry *p_ent = NULL;
686 u8 abs_vport_id = 0, val;
689 if (IS_VF(p_hwfn->cdev)) {
690 rc = qed_vf_pf_vport_update(p_hwfn, p_params);
694 rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
698 memset(&init_data, 0, sizeof(init_data));
699 init_data.cid = qed_spq_get_cid(p_hwfn);
700 init_data.opaque_fid = p_params->opaque_fid;
701 init_data.comp_mode = comp_mode;
702 init_data.p_comp_data = p_comp_data;
704 rc = qed_sp_init_request(p_hwfn, &p_ent,
705 ETH_RAMROD_VPORT_UPDATE,
706 PROTOCOLID_ETH, &init_data);
710 /* Copy input params to ramrod according to FW struct */
711 p_ramrod = &p_ent->ramrod.vport_update;
712 p_cmn = &p_ramrod->common;
714 p_cmn->vport_id = abs_vport_id;
715 p_cmn->rx_active_flg = p_params->vport_active_rx_flg;
716 p_cmn->update_rx_active_flg = p_params->update_vport_active_rx_flg;
717 p_cmn->tx_active_flg = p_params->vport_active_tx_flg;
718 p_cmn->update_tx_active_flg = p_params->update_vport_active_tx_flg;
719 p_cmn->accept_any_vlan = p_params->accept_any_vlan;
720 val = p_params->update_accept_any_vlan_flg;
721 p_cmn->update_accept_any_vlan_flg = val;
723 p_cmn->inner_vlan_removal_en = p_params->inner_vlan_removal_flg;
724 val = p_params->update_inner_vlan_removal_flg;
725 p_cmn->update_inner_vlan_removal_en_flg = val;
727 p_cmn->default_vlan_en = p_params->default_vlan_enable_flg;
728 val = p_params->update_default_vlan_enable_flg;
729 p_cmn->update_default_vlan_en_flg = val;
731 p_cmn->default_vlan = cpu_to_le16(p_params->default_vlan);
732 p_cmn->update_default_vlan_flg = p_params->update_default_vlan_flg;
734 p_cmn->silent_vlan_removal_en = p_params->silent_vlan_removal_flg;
736 p_ramrod->common.tx_switching_en = p_params->tx_switching_flg;
737 p_cmn->update_tx_switching_en_flg = p_params->update_tx_switching_flg;
739 p_cmn->anti_spoofing_en = p_params->anti_spoofing_en;
740 val = p_params->update_anti_spoofing_en_flg;
741 p_ramrod->common.update_anti_spoofing_en_flg = val;
743 rc = qed_sp_vport_update_rss(p_hwfn, p_ramrod, p_rss_params);
745 /* Return spq entry which is taken in qed_sp_init_request()*/
746 qed_spq_return_entry(p_hwfn, p_ent);
750 if (p_params->update_ctl_frame_check) {
751 p_cmn->ctl_frame_mac_check_en = p_params->mac_chk_en;
752 p_cmn->ctl_frame_ethtype_check_en = p_params->ethtype_chk_en;
755 /* Update mcast bins for VFs, PF doesn't use this functionality */
756 qed_sp_update_mcast_bin(p_hwfn, p_ramrod, p_params);
758 qed_sp_update_accept_mode(p_hwfn, p_ramrod, p_params->accept_flags);
759 qed_sp_vport_update_sge_tpa(p_hwfn, p_ramrod, p_params->sge_tpa_params);
760 return qed_spq_post(p_hwfn, p_ent, NULL);
763 int qed_sp_vport_stop(struct qed_hwfn *p_hwfn, u16 opaque_fid, u8 vport_id)
765 struct vport_stop_ramrod_data *p_ramrod;
766 struct qed_sp_init_data init_data;
767 struct qed_spq_entry *p_ent;
771 if (IS_VF(p_hwfn->cdev))
772 return qed_vf_pf_vport_stop(p_hwfn);
774 rc = qed_fw_vport(p_hwfn, vport_id, &abs_vport_id);
778 memset(&init_data, 0, sizeof(init_data));
779 init_data.cid = qed_spq_get_cid(p_hwfn);
780 init_data.opaque_fid = opaque_fid;
781 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
783 rc = qed_sp_init_request(p_hwfn, &p_ent,
784 ETH_RAMROD_VPORT_STOP,
785 PROTOCOLID_ETH, &init_data);
789 p_ramrod = &p_ent->ramrod.vport_stop;
790 p_ramrod->vport_id = abs_vport_id;
792 return qed_spq_post(p_hwfn, p_ent, NULL);
796 qed_vf_pf_accept_flags(struct qed_hwfn *p_hwfn,
797 struct qed_filter_accept_flags *p_accept_flags)
799 struct qed_sp_vport_update_params s_params;
801 memset(&s_params, 0, sizeof(s_params));
802 memcpy(&s_params.accept_flags, p_accept_flags,
803 sizeof(struct qed_filter_accept_flags));
805 return qed_vf_pf_vport_update(p_hwfn, &s_params);
808 static int qed_filter_accept_cmd(struct qed_dev *cdev,
810 struct qed_filter_accept_flags accept_flags,
811 u8 update_accept_any_vlan,
813 enum spq_mode comp_mode,
814 struct qed_spq_comp_cb *p_comp_data)
816 struct qed_sp_vport_update_params vport_update_params;
819 /* Prepare and send the vport rx_mode change */
820 memset(&vport_update_params, 0, sizeof(vport_update_params));
821 vport_update_params.vport_id = vport;
822 vport_update_params.accept_flags = accept_flags;
823 vport_update_params.update_accept_any_vlan_flg = update_accept_any_vlan;
824 vport_update_params.accept_any_vlan = accept_any_vlan;
826 for_each_hwfn(cdev, i) {
827 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
829 vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
832 rc = qed_vf_pf_accept_flags(p_hwfn, &accept_flags);
838 rc = qed_sp_vport_update(p_hwfn, &vport_update_params,
839 comp_mode, p_comp_data);
841 DP_ERR(cdev, "Update rx_mode failed %d\n", rc);
845 DP_VERBOSE(p_hwfn, QED_MSG_SP,
846 "Accept filter configured, flags = [Rx]%x [Tx]%x\n",
847 accept_flags.rx_accept_filter,
848 accept_flags.tx_accept_filter);
849 if (update_accept_any_vlan)
850 DP_VERBOSE(p_hwfn, QED_MSG_SP,
851 "accept_any_vlan=%d configured\n",
858 int qed_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn,
859 struct qed_queue_cid *p_cid,
861 dma_addr_t bd_chain_phys_addr,
862 dma_addr_t cqe_pbl_addr, u16 cqe_pbl_size)
864 struct rx_queue_start_ramrod_data *p_ramrod = NULL;
865 struct qed_spq_entry *p_ent = NULL;
866 struct qed_sp_init_data init_data;
869 DP_VERBOSE(p_hwfn, QED_MSG_SP,
870 "opaque_fid=0x%x, cid=0x%x, rx_qzone=0x%x, vport_id=0x%x, sb_id=0x%x\n",
871 p_cid->opaque_fid, p_cid->cid,
872 p_cid->abs.queue_id, p_cid->abs.vport_id, p_cid->sb_igu_id);
875 memset(&init_data, 0, sizeof(init_data));
876 init_data.cid = p_cid->cid;
877 init_data.opaque_fid = p_cid->opaque_fid;
878 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
880 rc = qed_sp_init_request(p_hwfn, &p_ent,
881 ETH_RAMROD_RX_QUEUE_START,
882 PROTOCOLID_ETH, &init_data);
886 p_ramrod = &p_ent->ramrod.rx_queue_start;
888 p_ramrod->sb_id = cpu_to_le16(p_cid->sb_igu_id);
889 p_ramrod->sb_index = p_cid->sb_idx;
890 p_ramrod->vport_id = p_cid->abs.vport_id;
891 p_ramrod->stats_counter_id = p_cid->abs.stats_id;
892 p_ramrod->rx_queue_id = cpu_to_le16(p_cid->abs.queue_id);
893 p_ramrod->complete_cqe_flg = 0;
894 p_ramrod->complete_event_flg = 1;
896 p_ramrod->bd_max_bytes = cpu_to_le16(bd_max_bytes);
897 DMA_REGPAIR_LE(p_ramrod->bd_base, bd_chain_phys_addr);
899 p_ramrod->num_of_pbl_pages = cpu_to_le16(cqe_pbl_size);
900 DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr, cqe_pbl_addr);
902 if (p_cid->vfid != QED_QUEUE_CID_SELF) {
903 bool b_legacy_vf = !!(p_cid->vf_legacy &
904 QED_QCID_LEGACY_VF_RX_PROD);
906 p_ramrod->vf_rx_prod_index = p_cid->vf_qid;
907 DP_VERBOSE(p_hwfn, QED_MSG_SP,
908 "Queue%s is meant for VF rxq[%02x]\n",
909 b_legacy_vf ? " [legacy]" : "", p_cid->vf_qid);
910 p_ramrod->vf_rx_prod_use_zone_a = b_legacy_vf;
913 return qed_spq_post(p_hwfn, p_ent, NULL);
917 qed_eth_pf_rx_queue_start(struct qed_hwfn *p_hwfn,
918 struct qed_queue_cid *p_cid,
920 dma_addr_t bd_chain_phys_addr,
921 dma_addr_t cqe_pbl_addr,
922 u16 cqe_pbl_size, void __iomem **pp_prod)
924 u32 init_prod_val = 0;
926 *pp_prod = p_hwfn->regview +
927 GTT_BAR0_MAP_REG_MSDM_RAM +
928 MSTORM_ETH_PF_PRODS_OFFSET(p_cid->abs.queue_id);
930 /* Init the rcq, rx bd and rx sge (if valid) producers to 0 */
931 __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32),
932 (u32 *)(&init_prod_val));
934 return qed_eth_rxq_start_ramrod(p_hwfn, p_cid,
937 cqe_pbl_addr, cqe_pbl_size);
941 qed_eth_rx_queue_start(struct qed_hwfn *p_hwfn,
943 struct qed_queue_start_common_params *p_params,
945 dma_addr_t bd_chain_phys_addr,
946 dma_addr_t cqe_pbl_addr,
948 struct qed_rxq_start_ret_params *p_ret_params)
950 struct qed_queue_cid *p_cid;
953 /* Allocate a CID for the queue */
954 p_cid = qed_eth_queue_to_cid_pf(p_hwfn, opaque_fid, true, p_params);
958 if (IS_PF(p_hwfn->cdev)) {
959 rc = qed_eth_pf_rx_queue_start(p_hwfn, p_cid,
962 cqe_pbl_addr, cqe_pbl_size,
963 &p_ret_params->p_prod);
965 rc = qed_vf_pf_rxq_start(p_hwfn, p_cid,
969 cqe_pbl_size, &p_ret_params->p_prod);
972 /* Provide the caller with a reference to as handler */
974 qed_eth_queue_cid_release(p_hwfn, p_cid);
976 p_ret_params->p_handle = (void *)p_cid;
981 int qed_sp_eth_rx_queues_update(struct qed_hwfn *p_hwfn,
982 void **pp_rxq_handles,
985 u8 complete_event_flg,
986 enum spq_mode comp_mode,
987 struct qed_spq_comp_cb *p_comp_data)
989 struct rx_queue_update_ramrod_data *p_ramrod = NULL;
990 struct qed_spq_entry *p_ent = NULL;
991 struct qed_sp_init_data init_data;
992 struct qed_queue_cid *p_cid;
996 memset(&init_data, 0, sizeof(init_data));
997 init_data.comp_mode = comp_mode;
998 init_data.p_comp_data = p_comp_data;
1000 for (i = 0; i < num_rxqs; i++) {
1001 p_cid = ((struct qed_queue_cid **)pp_rxq_handles)[i];
1004 init_data.cid = p_cid->cid;
1005 init_data.opaque_fid = p_cid->opaque_fid;
1007 rc = qed_sp_init_request(p_hwfn, &p_ent,
1008 ETH_RAMROD_RX_QUEUE_UPDATE,
1009 PROTOCOLID_ETH, &init_data);
1013 p_ramrod = &p_ent->ramrod.rx_queue_update;
1014 p_ramrod->vport_id = p_cid->abs.vport_id;
1016 p_ramrod->rx_queue_id = cpu_to_le16(p_cid->abs.queue_id);
1017 p_ramrod->complete_cqe_flg = complete_cqe_flg;
1018 p_ramrod->complete_event_flg = complete_event_flg;
1020 rc = qed_spq_post(p_hwfn, p_ent, NULL);
1029 qed_eth_pf_rx_queue_stop(struct qed_hwfn *p_hwfn,
1030 struct qed_queue_cid *p_cid,
1031 bool b_eq_completion_only, bool b_cqe_completion)
1033 struct rx_queue_stop_ramrod_data *p_ramrod = NULL;
1034 struct qed_spq_entry *p_ent = NULL;
1035 struct qed_sp_init_data init_data;
1038 memset(&init_data, 0, sizeof(init_data));
1039 init_data.cid = p_cid->cid;
1040 init_data.opaque_fid = p_cid->opaque_fid;
1041 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1043 rc = qed_sp_init_request(p_hwfn, &p_ent,
1044 ETH_RAMROD_RX_QUEUE_STOP,
1045 PROTOCOLID_ETH, &init_data);
1049 p_ramrod = &p_ent->ramrod.rx_queue_stop;
1050 p_ramrod->vport_id = p_cid->abs.vport_id;
1051 p_ramrod->rx_queue_id = cpu_to_le16(p_cid->abs.queue_id);
1053 /* Cleaning the queue requires the completion to arrive there.
1054 * In addition, VFs require the answer to come as eqe to PF.
1056 p_ramrod->complete_cqe_flg = ((p_cid->vfid == QED_QUEUE_CID_SELF) &&
1057 !b_eq_completion_only) ||
1059 p_ramrod->complete_event_flg = (p_cid->vfid != QED_QUEUE_CID_SELF) ||
1060 b_eq_completion_only;
1062 return qed_spq_post(p_hwfn, p_ent, NULL);
1065 int qed_eth_rx_queue_stop(struct qed_hwfn *p_hwfn,
1067 bool eq_completion_only, bool cqe_completion)
1069 struct qed_queue_cid *p_cid = (struct qed_queue_cid *)p_rxq;
1072 if (IS_PF(p_hwfn->cdev))
1073 rc = qed_eth_pf_rx_queue_stop(p_hwfn, p_cid,
1077 rc = qed_vf_pf_rxq_stop(p_hwfn, p_cid, cqe_completion);
1080 qed_eth_queue_cid_release(p_hwfn, p_cid);
1085 qed_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn,
1086 struct qed_queue_cid *p_cid,
1087 dma_addr_t pbl_addr, u16 pbl_size, u16 pq_id)
1089 struct tx_queue_start_ramrod_data *p_ramrod = NULL;
1090 struct qed_spq_entry *p_ent = NULL;
1091 struct qed_sp_init_data init_data;
1095 memset(&init_data, 0, sizeof(init_data));
1096 init_data.cid = p_cid->cid;
1097 init_data.opaque_fid = p_cid->opaque_fid;
1098 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1100 rc = qed_sp_init_request(p_hwfn, &p_ent,
1101 ETH_RAMROD_TX_QUEUE_START,
1102 PROTOCOLID_ETH, &init_data);
1106 p_ramrod = &p_ent->ramrod.tx_queue_start;
1107 p_ramrod->vport_id = p_cid->abs.vport_id;
1109 p_ramrod->sb_id = cpu_to_le16(p_cid->sb_igu_id);
1110 p_ramrod->sb_index = p_cid->sb_idx;
1111 p_ramrod->stats_counter_id = p_cid->abs.stats_id;
1113 p_ramrod->queue_zone_id = cpu_to_le16(p_cid->abs.queue_id);
1114 p_ramrod->same_as_last_id = cpu_to_le16(p_cid->abs.queue_id);
1116 p_ramrod->pbl_size = cpu_to_le16(pbl_size);
1117 DMA_REGPAIR_LE(p_ramrod->pbl_base_addr, pbl_addr);
1119 p_ramrod->qm_pq_id = cpu_to_le16(pq_id);
1121 return qed_spq_post(p_hwfn, p_ent, NULL);
1125 qed_eth_pf_tx_queue_start(struct qed_hwfn *p_hwfn,
1126 struct qed_queue_cid *p_cid,
1128 dma_addr_t pbl_addr,
1129 u16 pbl_size, void __iomem **pp_doorbell)
1134 rc = qed_eth_txq_start_ramrod(p_hwfn, p_cid,
1136 qed_get_cm_pq_idx_mcos(p_hwfn, tc));
1140 /* Provide the caller with the necessary return values */
1141 *pp_doorbell = p_hwfn->doorbells +
1142 qed_db_addr(p_cid->cid, DQ_DEMS_LEGACY);
1148 qed_eth_tx_queue_start(struct qed_hwfn *p_hwfn,
1150 struct qed_queue_start_common_params *p_params,
1152 dma_addr_t pbl_addr,
1154 struct qed_txq_start_ret_params *p_ret_params)
1156 struct qed_queue_cid *p_cid;
1159 p_cid = qed_eth_queue_to_cid_pf(p_hwfn, opaque_fid, false, p_params);
1163 if (IS_PF(p_hwfn->cdev))
1164 rc = qed_eth_pf_tx_queue_start(p_hwfn, p_cid, tc,
1166 &p_ret_params->p_doorbell);
1168 rc = qed_vf_pf_txq_start(p_hwfn, p_cid,
1170 &p_ret_params->p_doorbell);
1173 qed_eth_queue_cid_release(p_hwfn, p_cid);
1175 p_ret_params->p_handle = (void *)p_cid;
1181 qed_eth_pf_tx_queue_stop(struct qed_hwfn *p_hwfn, struct qed_queue_cid *p_cid)
1183 struct qed_spq_entry *p_ent = NULL;
1184 struct qed_sp_init_data init_data;
1187 memset(&init_data, 0, sizeof(init_data));
1188 init_data.cid = p_cid->cid;
1189 init_data.opaque_fid = p_cid->opaque_fid;
1190 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1192 rc = qed_sp_init_request(p_hwfn, &p_ent,
1193 ETH_RAMROD_TX_QUEUE_STOP,
1194 PROTOCOLID_ETH, &init_data);
1198 return qed_spq_post(p_hwfn, p_ent, NULL);
1201 int qed_eth_tx_queue_stop(struct qed_hwfn *p_hwfn, void *p_handle)
1203 struct qed_queue_cid *p_cid = (struct qed_queue_cid *)p_handle;
1206 if (IS_PF(p_hwfn->cdev))
1207 rc = qed_eth_pf_tx_queue_stop(p_hwfn, p_cid);
1209 rc = qed_vf_pf_txq_stop(p_hwfn, p_cid);
1212 qed_eth_queue_cid_release(p_hwfn, p_cid);
1216 static enum eth_filter_action qed_filter_action(enum qed_filter_opcode opcode)
1218 enum eth_filter_action action = MAX_ETH_FILTER_ACTION;
1221 case QED_FILTER_ADD:
1222 action = ETH_FILTER_ACTION_ADD;
1224 case QED_FILTER_REMOVE:
1225 action = ETH_FILTER_ACTION_REMOVE;
1227 case QED_FILTER_FLUSH:
1228 action = ETH_FILTER_ACTION_REMOVE_ALL;
1231 action = MAX_ETH_FILTER_ACTION;
1238 qed_filter_ucast_common(struct qed_hwfn *p_hwfn,
1240 struct qed_filter_ucast *p_filter_cmd,
1241 struct vport_filter_update_ramrod_data **pp_ramrod,
1242 struct qed_spq_entry **pp_ent,
1243 enum spq_mode comp_mode,
1244 struct qed_spq_comp_cb *p_comp_data)
1246 u8 vport_to_add_to = 0, vport_to_remove_from = 0;
1247 struct vport_filter_update_ramrod_data *p_ramrod;
1248 struct eth_filter_cmd *p_first_filter;
1249 struct eth_filter_cmd *p_second_filter;
1250 struct qed_sp_init_data init_data;
1251 enum eth_filter_action action;
1254 rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_remove_from,
1255 &vport_to_remove_from);
1259 rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_add_to,
1265 memset(&init_data, 0, sizeof(init_data));
1266 init_data.cid = qed_spq_get_cid(p_hwfn);
1267 init_data.opaque_fid = opaque_fid;
1268 init_data.comp_mode = comp_mode;
1269 init_data.p_comp_data = p_comp_data;
1271 rc = qed_sp_init_request(p_hwfn, pp_ent,
1272 ETH_RAMROD_FILTERS_UPDATE,
1273 PROTOCOLID_ETH, &init_data);
1277 *pp_ramrod = &(*pp_ent)->ramrod.vport_filter_update;
1278 p_ramrod = *pp_ramrod;
1279 p_ramrod->filter_cmd_hdr.rx = p_filter_cmd->is_rx_filter ? 1 : 0;
1280 p_ramrod->filter_cmd_hdr.tx = p_filter_cmd->is_tx_filter ? 1 : 0;
1282 switch (p_filter_cmd->opcode) {
1283 case QED_FILTER_REPLACE:
1284 case QED_FILTER_MOVE:
1285 p_ramrod->filter_cmd_hdr.cmd_cnt = 2; break;
1287 p_ramrod->filter_cmd_hdr.cmd_cnt = 1; break;
1290 p_first_filter = &p_ramrod->filter_cmds[0];
1291 p_second_filter = &p_ramrod->filter_cmds[1];
1293 switch (p_filter_cmd->type) {
1294 case QED_FILTER_MAC:
1295 p_first_filter->type = ETH_FILTER_TYPE_MAC; break;
1296 case QED_FILTER_VLAN:
1297 p_first_filter->type = ETH_FILTER_TYPE_VLAN; break;
1298 case QED_FILTER_MAC_VLAN:
1299 p_first_filter->type = ETH_FILTER_TYPE_PAIR; break;
1300 case QED_FILTER_INNER_MAC:
1301 p_first_filter->type = ETH_FILTER_TYPE_INNER_MAC; break;
1302 case QED_FILTER_INNER_VLAN:
1303 p_first_filter->type = ETH_FILTER_TYPE_INNER_VLAN; break;
1304 case QED_FILTER_INNER_PAIR:
1305 p_first_filter->type = ETH_FILTER_TYPE_INNER_PAIR; break;
1306 case QED_FILTER_INNER_MAC_VNI_PAIR:
1307 p_first_filter->type = ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR;
1309 case QED_FILTER_MAC_VNI_PAIR:
1310 p_first_filter->type = ETH_FILTER_TYPE_MAC_VNI_PAIR; break;
1311 case QED_FILTER_VNI:
1312 p_first_filter->type = ETH_FILTER_TYPE_VNI; break;
1315 if ((p_first_filter->type == ETH_FILTER_TYPE_MAC) ||
1316 (p_first_filter->type == ETH_FILTER_TYPE_PAIR) ||
1317 (p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC) ||
1318 (p_first_filter->type == ETH_FILTER_TYPE_INNER_PAIR) ||
1319 (p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR) ||
1320 (p_first_filter->type == ETH_FILTER_TYPE_MAC_VNI_PAIR)) {
1321 qed_set_fw_mac_addr(&p_first_filter->mac_msb,
1322 &p_first_filter->mac_mid,
1323 &p_first_filter->mac_lsb,
1324 (u8 *)p_filter_cmd->mac);
1327 if ((p_first_filter->type == ETH_FILTER_TYPE_VLAN) ||
1328 (p_first_filter->type == ETH_FILTER_TYPE_PAIR) ||
1329 (p_first_filter->type == ETH_FILTER_TYPE_INNER_VLAN) ||
1330 (p_first_filter->type == ETH_FILTER_TYPE_INNER_PAIR))
1331 p_first_filter->vlan_id = cpu_to_le16(p_filter_cmd->vlan);
1333 if ((p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR) ||
1334 (p_first_filter->type == ETH_FILTER_TYPE_MAC_VNI_PAIR) ||
1335 (p_first_filter->type == ETH_FILTER_TYPE_VNI))
1336 p_first_filter->vni = cpu_to_le32(p_filter_cmd->vni);
1338 if (p_filter_cmd->opcode == QED_FILTER_MOVE) {
1339 p_second_filter->type = p_first_filter->type;
1340 p_second_filter->mac_msb = p_first_filter->mac_msb;
1341 p_second_filter->mac_mid = p_first_filter->mac_mid;
1342 p_second_filter->mac_lsb = p_first_filter->mac_lsb;
1343 p_second_filter->vlan_id = p_first_filter->vlan_id;
1344 p_second_filter->vni = p_first_filter->vni;
1346 p_first_filter->action = ETH_FILTER_ACTION_REMOVE;
1348 p_first_filter->vport_id = vport_to_remove_from;
1350 p_second_filter->action = ETH_FILTER_ACTION_ADD;
1351 p_second_filter->vport_id = vport_to_add_to;
1352 } else if (p_filter_cmd->opcode == QED_FILTER_REPLACE) {
1353 p_first_filter->vport_id = vport_to_add_to;
1354 memcpy(p_second_filter, p_first_filter,
1355 sizeof(*p_second_filter));
1356 p_first_filter->action = ETH_FILTER_ACTION_REMOVE_ALL;
1357 p_second_filter->action = ETH_FILTER_ACTION_ADD;
1359 action = qed_filter_action(p_filter_cmd->opcode);
1361 if (action == MAX_ETH_FILTER_ACTION) {
1363 "%d is not supported yet\n",
1364 p_filter_cmd->opcode);
1368 p_first_filter->action = action;
1369 p_first_filter->vport_id = (p_filter_cmd->opcode ==
1370 QED_FILTER_REMOVE) ?
1371 vport_to_remove_from :
1378 int qed_sp_eth_filter_ucast(struct qed_hwfn *p_hwfn,
1380 struct qed_filter_ucast *p_filter_cmd,
1381 enum spq_mode comp_mode,
1382 struct qed_spq_comp_cb *p_comp_data)
1384 struct vport_filter_update_ramrod_data *p_ramrod = NULL;
1385 struct qed_spq_entry *p_ent = NULL;
1386 struct eth_filter_cmd_header *p_header;
1389 rc = qed_filter_ucast_common(p_hwfn, opaque_fid, p_filter_cmd,
1391 comp_mode, p_comp_data);
1393 DP_ERR(p_hwfn, "Uni. filter command failed %d\n", rc);
1396 p_header = &p_ramrod->filter_cmd_hdr;
1397 p_header->assert_on_error = p_filter_cmd->assert_on_error;
1399 rc = qed_spq_post(p_hwfn, p_ent, NULL);
1401 DP_ERR(p_hwfn, "Unicast filter ADD command failed %d\n", rc);
1405 DP_VERBOSE(p_hwfn, QED_MSG_SP,
1406 "Unicast filter configured, opcode = %s, type = %s, cmd_cnt = %d, is_rx_filter = %d, is_tx_filter = %d\n",
1407 (p_filter_cmd->opcode == QED_FILTER_ADD) ? "ADD" :
1408 ((p_filter_cmd->opcode == QED_FILTER_REMOVE) ?
1410 ((p_filter_cmd->opcode == QED_FILTER_MOVE) ?
1411 "MOVE" : "REPLACE")),
1412 (p_filter_cmd->type == QED_FILTER_MAC) ? "MAC" :
1413 ((p_filter_cmd->type == QED_FILTER_VLAN) ?
1414 "VLAN" : "MAC & VLAN"),
1415 p_ramrod->filter_cmd_hdr.cmd_cnt,
1416 p_filter_cmd->is_rx_filter,
1417 p_filter_cmd->is_tx_filter);
1418 DP_VERBOSE(p_hwfn, QED_MSG_SP,
1419 "vport_to_add_to = %d, vport_to_remove_from = %d, mac = %2x:%2x:%2x:%2x:%2x:%2x, vlan = %d\n",
1420 p_filter_cmd->vport_to_add_to,
1421 p_filter_cmd->vport_to_remove_from,
1422 p_filter_cmd->mac[0],
1423 p_filter_cmd->mac[1],
1424 p_filter_cmd->mac[2],
1425 p_filter_cmd->mac[3],
1426 p_filter_cmd->mac[4],
1427 p_filter_cmd->mac[5],
1428 p_filter_cmd->vlan);
1433 /*******************************************************************************
1435 * Calculates crc 32 on a buffer
1436 * Note: crc32_length MUST be aligned to 8
1438 ******************************************************************************/
1439 static u32 qed_calc_crc32c(u8 *crc32_packet,
1440 u32 crc32_length, u32 crc32_seed, u8 complement)
1442 u32 byte = 0, bit = 0, crc32_result = crc32_seed;
1443 u8 msb = 0, current_byte = 0;
1445 if ((!crc32_packet) ||
1446 (crc32_length == 0) ||
1447 ((crc32_length % 8) != 0))
1448 return crc32_result;
1449 for (byte = 0; byte < crc32_length; byte++) {
1450 current_byte = crc32_packet[byte];
1451 for (bit = 0; bit < 8; bit++) {
1452 msb = (u8)(crc32_result >> 31);
1453 crc32_result = crc32_result << 1;
1454 if (msb != (0x1 & (current_byte >> bit))) {
1455 crc32_result = crc32_result ^ CRC32_POLY;
1456 crc32_result |= 1; /*crc32_result[0] = 1;*/
1460 return crc32_result;
1463 static u32 qed_crc32c_le(u32 seed, u8 *mac, u32 len)
1465 u32 packet_buf[2] = { 0 };
1467 memcpy((u8 *)(&packet_buf[0]), &mac[0], 6);
1468 return qed_calc_crc32c((u8 *)packet_buf, 8, seed, 0);
1471 u8 qed_mcast_bin_from_mac(u8 *mac)
1473 u32 crc = qed_crc32c_le(ETH_MULTICAST_BIN_FROM_MAC_SEED,
1480 qed_sp_eth_filter_mcast(struct qed_hwfn *p_hwfn,
1482 struct qed_filter_mcast *p_filter_cmd,
1483 enum spq_mode comp_mode,
1484 struct qed_spq_comp_cb *p_comp_data)
1486 struct vport_update_ramrod_data *p_ramrod = NULL;
1487 u32 bins[ETH_MULTICAST_MAC_BINS_IN_REGS];
1488 struct qed_spq_entry *p_ent = NULL;
1489 struct qed_sp_init_data init_data;
1490 u8 abs_vport_id = 0;
1493 if (p_filter_cmd->opcode == QED_FILTER_ADD)
1494 rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_add_to,
1497 rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_remove_from,
1503 memset(&init_data, 0, sizeof(init_data));
1504 init_data.cid = qed_spq_get_cid(p_hwfn);
1505 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1506 init_data.comp_mode = comp_mode;
1507 init_data.p_comp_data = p_comp_data;
1509 rc = qed_sp_init_request(p_hwfn, &p_ent,
1510 ETH_RAMROD_VPORT_UPDATE,
1511 PROTOCOLID_ETH, &init_data);
1513 DP_ERR(p_hwfn, "Multi-cast command failed %d\n", rc);
1517 p_ramrod = &p_ent->ramrod.vport_update;
1518 p_ramrod->common.update_approx_mcast_flg = 1;
1520 /* explicitly clear out the entire vector */
1521 memset(&p_ramrod->approx_mcast.bins, 0,
1522 sizeof(p_ramrod->approx_mcast.bins));
1523 memset(bins, 0, sizeof(bins));
1524 /* filter ADD op is explicit set op and it removes
1525 * any existing filters for the vport
1527 if (p_filter_cmd->opcode == QED_FILTER_ADD) {
1528 for (i = 0; i < p_filter_cmd->num_mc_addrs; i++) {
1531 bit = qed_mcast_bin_from_mac(p_filter_cmd->mac[i]);
1532 nbits = sizeof(u32) * BITS_PER_BYTE;
1533 bins[bit / nbits] |= 1 << (bit % nbits);
1536 /* Convert to correct endianity */
1537 for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) {
1538 struct vport_update_ramrod_mcast *p_ramrod_bins;
1540 p_ramrod_bins = &p_ramrod->approx_mcast;
1541 p_ramrod_bins->bins[i] = cpu_to_le32(bins[i]);
1545 p_ramrod->common.vport_id = abs_vport_id;
1547 return qed_spq_post(p_hwfn, p_ent, NULL);
1550 static int qed_filter_mcast_cmd(struct qed_dev *cdev,
1551 struct qed_filter_mcast *p_filter_cmd,
1552 enum spq_mode comp_mode,
1553 struct qed_spq_comp_cb *p_comp_data)
1558 /* only ADD and REMOVE operations are supported for multi-cast */
1559 if ((p_filter_cmd->opcode != QED_FILTER_ADD &&
1560 (p_filter_cmd->opcode != QED_FILTER_REMOVE)) ||
1561 (p_filter_cmd->num_mc_addrs > QED_MAX_MC_ADDRS))
1564 for_each_hwfn(cdev, i) {
1565 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1570 qed_vf_pf_filter_mcast(p_hwfn, p_filter_cmd);
1574 opaque_fid = p_hwfn->hw_info.opaque_fid;
1576 rc = qed_sp_eth_filter_mcast(p_hwfn,
1579 comp_mode, p_comp_data);
1584 static int qed_filter_ucast_cmd(struct qed_dev *cdev,
1585 struct qed_filter_ucast *p_filter_cmd,
1586 enum spq_mode comp_mode,
1587 struct qed_spq_comp_cb *p_comp_data)
1592 for_each_hwfn(cdev, i) {
1593 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1597 rc = qed_vf_pf_filter_ucast(p_hwfn, p_filter_cmd);
1601 opaque_fid = p_hwfn->hw_info.opaque_fid;
1603 rc = qed_sp_eth_filter_ucast(p_hwfn,
1606 comp_mode, p_comp_data);
1614 /* Statistics related code */
1615 static void __qed_get_vport_pstats_addrlen(struct qed_hwfn *p_hwfn,
1617 u32 *p_len, u16 statistics_bin)
1619 if (IS_PF(p_hwfn->cdev)) {
1620 *p_addr = BAR0_MAP_REG_PSDM_RAM +
1621 PSTORM_QUEUE_STAT_OFFSET(statistics_bin);
1622 *p_len = sizeof(struct eth_pstorm_per_queue_stat);
1624 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
1625 struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
1627 *p_addr = p_resp->pfdev_info.stats_info.pstats.address;
1628 *p_len = p_resp->pfdev_info.stats_info.pstats.len;
1632 static noinline_for_stack void
1633 __qed_get_vport_pstats(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
1634 struct qed_eth_stats *p_stats, u16 statistics_bin)
1636 struct eth_pstorm_per_queue_stat pstats;
1637 u32 pstats_addr = 0, pstats_len = 0;
1639 __qed_get_vport_pstats_addrlen(p_hwfn, &pstats_addr, &pstats_len,
1642 memset(&pstats, 0, sizeof(pstats));
1643 qed_memcpy_from(p_hwfn, p_ptt, &pstats, pstats_addr, pstats_len);
1645 p_stats->common.tx_ucast_bytes +=
1646 HILO_64_REGPAIR(pstats.sent_ucast_bytes);
1647 p_stats->common.tx_mcast_bytes +=
1648 HILO_64_REGPAIR(pstats.sent_mcast_bytes);
1649 p_stats->common.tx_bcast_bytes +=
1650 HILO_64_REGPAIR(pstats.sent_bcast_bytes);
1651 p_stats->common.tx_ucast_pkts +=
1652 HILO_64_REGPAIR(pstats.sent_ucast_pkts);
1653 p_stats->common.tx_mcast_pkts +=
1654 HILO_64_REGPAIR(pstats.sent_mcast_pkts);
1655 p_stats->common.tx_bcast_pkts +=
1656 HILO_64_REGPAIR(pstats.sent_bcast_pkts);
1657 p_stats->common.tx_err_drop_pkts +=
1658 HILO_64_REGPAIR(pstats.error_drop_pkts);
1661 static noinline_for_stack void
1662 __qed_get_vport_tstats(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
1663 struct qed_eth_stats *p_stats, u16 statistics_bin)
1665 struct tstorm_per_port_stat tstats;
1666 u32 tstats_addr, tstats_len;
1668 if (IS_PF(p_hwfn->cdev)) {
1669 tstats_addr = BAR0_MAP_REG_TSDM_RAM +
1670 TSTORM_PORT_STAT_OFFSET(MFW_PORT(p_hwfn));
1671 tstats_len = sizeof(struct tstorm_per_port_stat);
1673 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
1674 struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
1676 tstats_addr = p_resp->pfdev_info.stats_info.tstats.address;
1677 tstats_len = p_resp->pfdev_info.stats_info.tstats.len;
1680 memset(&tstats, 0, sizeof(tstats));
1681 qed_memcpy_from(p_hwfn, p_ptt, &tstats, tstats_addr, tstats_len);
1683 p_stats->common.mftag_filter_discards +=
1684 HILO_64_REGPAIR(tstats.mftag_filter_discard);
1685 p_stats->common.mac_filter_discards +=
1686 HILO_64_REGPAIR(tstats.eth_mac_filter_discard);
1689 static void __qed_get_vport_ustats_addrlen(struct qed_hwfn *p_hwfn,
1691 u32 *p_len, u16 statistics_bin)
1693 if (IS_PF(p_hwfn->cdev)) {
1694 *p_addr = BAR0_MAP_REG_USDM_RAM +
1695 USTORM_QUEUE_STAT_OFFSET(statistics_bin);
1696 *p_len = sizeof(struct eth_ustorm_per_queue_stat);
1698 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
1699 struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
1701 *p_addr = p_resp->pfdev_info.stats_info.ustats.address;
1702 *p_len = p_resp->pfdev_info.stats_info.ustats.len;
1706 static noinline_for_stack
1707 void __qed_get_vport_ustats(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
1708 struct qed_eth_stats *p_stats, u16 statistics_bin)
1710 struct eth_ustorm_per_queue_stat ustats;
1711 u32 ustats_addr = 0, ustats_len = 0;
1713 __qed_get_vport_ustats_addrlen(p_hwfn, &ustats_addr, &ustats_len,
1716 memset(&ustats, 0, sizeof(ustats));
1717 qed_memcpy_from(p_hwfn, p_ptt, &ustats, ustats_addr, ustats_len);
1719 p_stats->common.rx_ucast_bytes +=
1720 HILO_64_REGPAIR(ustats.rcv_ucast_bytes);
1721 p_stats->common.rx_mcast_bytes +=
1722 HILO_64_REGPAIR(ustats.rcv_mcast_bytes);
1723 p_stats->common.rx_bcast_bytes +=
1724 HILO_64_REGPAIR(ustats.rcv_bcast_bytes);
1725 p_stats->common.rx_ucast_pkts += HILO_64_REGPAIR(ustats.rcv_ucast_pkts);
1726 p_stats->common.rx_mcast_pkts += HILO_64_REGPAIR(ustats.rcv_mcast_pkts);
1727 p_stats->common.rx_bcast_pkts += HILO_64_REGPAIR(ustats.rcv_bcast_pkts);
1730 static void __qed_get_vport_mstats_addrlen(struct qed_hwfn *p_hwfn,
1732 u32 *p_len, u16 statistics_bin)
1734 if (IS_PF(p_hwfn->cdev)) {
1735 *p_addr = BAR0_MAP_REG_MSDM_RAM +
1736 MSTORM_QUEUE_STAT_OFFSET(statistics_bin);
1737 *p_len = sizeof(struct eth_mstorm_per_queue_stat);
1739 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
1740 struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
1742 *p_addr = p_resp->pfdev_info.stats_info.mstats.address;
1743 *p_len = p_resp->pfdev_info.stats_info.mstats.len;
1747 static noinline_for_stack void
1748 __qed_get_vport_mstats(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
1749 struct qed_eth_stats *p_stats, u16 statistics_bin)
1751 struct eth_mstorm_per_queue_stat mstats;
1752 u32 mstats_addr = 0, mstats_len = 0;
1754 __qed_get_vport_mstats_addrlen(p_hwfn, &mstats_addr, &mstats_len,
1757 memset(&mstats, 0, sizeof(mstats));
1758 qed_memcpy_from(p_hwfn, p_ptt, &mstats, mstats_addr, mstats_len);
1760 p_stats->common.no_buff_discards +=
1761 HILO_64_REGPAIR(mstats.no_buff_discard);
1762 p_stats->common.packet_too_big_discard +=
1763 HILO_64_REGPAIR(mstats.packet_too_big_discard);
1764 p_stats->common.ttl0_discard += HILO_64_REGPAIR(mstats.ttl0_discard);
1765 p_stats->common.tpa_coalesced_pkts +=
1766 HILO_64_REGPAIR(mstats.tpa_coalesced_pkts);
1767 p_stats->common.tpa_coalesced_events +=
1768 HILO_64_REGPAIR(mstats.tpa_coalesced_events);
1769 p_stats->common.tpa_aborts_num +=
1770 HILO_64_REGPAIR(mstats.tpa_aborts_num);
1771 p_stats->common.tpa_coalesced_bytes +=
1772 HILO_64_REGPAIR(mstats.tpa_coalesced_bytes);
1775 static noinline_for_stack void
1776 __qed_get_vport_port_stats(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
1777 struct qed_eth_stats *p_stats)
1779 struct qed_eth_stats_common *p_common = &p_stats->common;
1780 struct port_stats port_stats;
1783 memset(&port_stats, 0, sizeof(port_stats));
1785 qed_memcpy_from(p_hwfn, p_ptt, &port_stats,
1786 p_hwfn->mcp_info->port_addr +
1787 offsetof(struct public_port, stats),
1788 sizeof(port_stats));
1790 p_common->rx_64_byte_packets += port_stats.eth.r64;
1791 p_common->rx_65_to_127_byte_packets += port_stats.eth.r127;
1792 p_common->rx_128_to_255_byte_packets += port_stats.eth.r255;
1793 p_common->rx_256_to_511_byte_packets += port_stats.eth.r511;
1794 p_common->rx_512_to_1023_byte_packets += port_stats.eth.r1023;
1795 p_common->rx_1024_to_1518_byte_packets += port_stats.eth.r1518;
1796 p_common->rx_crc_errors += port_stats.eth.rfcs;
1797 p_common->rx_mac_crtl_frames += port_stats.eth.rxcf;
1798 p_common->rx_pause_frames += port_stats.eth.rxpf;
1799 p_common->rx_pfc_frames += port_stats.eth.rxpp;
1800 p_common->rx_align_errors += port_stats.eth.raln;
1801 p_common->rx_carrier_errors += port_stats.eth.rfcr;
1802 p_common->rx_oversize_packets += port_stats.eth.rovr;
1803 p_common->rx_jabbers += port_stats.eth.rjbr;
1804 p_common->rx_undersize_packets += port_stats.eth.rund;
1805 p_common->rx_fragments += port_stats.eth.rfrg;
1806 p_common->tx_64_byte_packets += port_stats.eth.t64;
1807 p_common->tx_65_to_127_byte_packets += port_stats.eth.t127;
1808 p_common->tx_128_to_255_byte_packets += port_stats.eth.t255;
1809 p_common->tx_256_to_511_byte_packets += port_stats.eth.t511;
1810 p_common->tx_512_to_1023_byte_packets += port_stats.eth.t1023;
1811 p_common->tx_1024_to_1518_byte_packets += port_stats.eth.t1518;
1812 p_common->tx_pause_frames += port_stats.eth.txpf;
1813 p_common->tx_pfc_frames += port_stats.eth.txpp;
1814 p_common->rx_mac_bytes += port_stats.eth.rbyte;
1815 p_common->rx_mac_uc_packets += port_stats.eth.rxuca;
1816 p_common->rx_mac_mc_packets += port_stats.eth.rxmca;
1817 p_common->rx_mac_bc_packets += port_stats.eth.rxbca;
1818 p_common->rx_mac_frames_ok += port_stats.eth.rxpok;
1819 p_common->tx_mac_bytes += port_stats.eth.tbyte;
1820 p_common->tx_mac_uc_packets += port_stats.eth.txuca;
1821 p_common->tx_mac_mc_packets += port_stats.eth.txmca;
1822 p_common->tx_mac_bc_packets += port_stats.eth.txbca;
1823 p_common->tx_mac_ctrl_frames += port_stats.eth.txcf;
1824 for (j = 0; j < 8; j++) {
1825 p_common->brb_truncates += port_stats.brb.brb_truncate[j];
1826 p_common->brb_discards += port_stats.brb.brb_discard[j];
1829 if (QED_IS_BB(p_hwfn->cdev)) {
1830 struct qed_eth_stats_bb *p_bb = &p_stats->bb;
1832 p_bb->rx_1519_to_1522_byte_packets +=
1833 port_stats.eth.u0.bb0.r1522;
1834 p_bb->rx_1519_to_2047_byte_packets +=
1835 port_stats.eth.u0.bb0.r2047;
1836 p_bb->rx_2048_to_4095_byte_packets +=
1837 port_stats.eth.u0.bb0.r4095;
1838 p_bb->rx_4096_to_9216_byte_packets +=
1839 port_stats.eth.u0.bb0.r9216;
1840 p_bb->rx_9217_to_16383_byte_packets +=
1841 port_stats.eth.u0.bb0.r16383;
1842 p_bb->tx_1519_to_2047_byte_packets +=
1843 port_stats.eth.u1.bb1.t2047;
1844 p_bb->tx_2048_to_4095_byte_packets +=
1845 port_stats.eth.u1.bb1.t4095;
1846 p_bb->tx_4096_to_9216_byte_packets +=
1847 port_stats.eth.u1.bb1.t9216;
1848 p_bb->tx_9217_to_16383_byte_packets +=
1849 port_stats.eth.u1.bb1.t16383;
1850 p_bb->tx_lpi_entry_count += port_stats.eth.u2.bb2.tlpiec;
1851 p_bb->tx_total_collisions += port_stats.eth.u2.bb2.tncl;
1853 struct qed_eth_stats_ah *p_ah = &p_stats->ah;
1855 p_ah->rx_1519_to_max_byte_packets +=
1856 port_stats.eth.u0.ah0.r1519_to_max;
1857 p_ah->tx_1519_to_max_byte_packets =
1858 port_stats.eth.u1.ah1.t1519_to_max;
1862 static void __qed_get_vport_stats(struct qed_hwfn *p_hwfn,
1863 struct qed_ptt *p_ptt,
1864 struct qed_eth_stats *stats,
1865 u16 statistics_bin, bool b_get_port_stats)
1867 __qed_get_vport_mstats(p_hwfn, p_ptt, stats, statistics_bin);
1868 __qed_get_vport_ustats(p_hwfn, p_ptt, stats, statistics_bin);
1869 __qed_get_vport_tstats(p_hwfn, p_ptt, stats, statistics_bin);
1870 __qed_get_vport_pstats(p_hwfn, p_ptt, stats, statistics_bin);
1872 if (b_get_port_stats && p_hwfn->mcp_info)
1873 __qed_get_vport_port_stats(p_hwfn, p_ptt, stats);
1876 static void _qed_get_vport_stats(struct qed_dev *cdev,
1877 struct qed_eth_stats *stats)
1882 memset(stats, 0, sizeof(*stats));
1884 for_each_hwfn(cdev, i) {
1885 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1886 struct qed_ptt *p_ptt = IS_PF(cdev) ? qed_ptt_acquire(p_hwfn)
1890 /* The main vport index is relative first */
1891 if (qed_fw_vport(p_hwfn, 0, &fw_vport)) {
1892 DP_ERR(p_hwfn, "No vport available!\n");
1897 if (IS_PF(cdev) && !p_ptt) {
1898 DP_ERR(p_hwfn, "Failed to acquire ptt\n");
1902 __qed_get_vport_stats(p_hwfn, p_ptt, stats, fw_vport,
1903 IS_PF(cdev) ? true : false);
1906 if (IS_PF(cdev) && p_ptt)
1907 qed_ptt_release(p_hwfn, p_ptt);
1911 void qed_get_vport_stats(struct qed_dev *cdev, struct qed_eth_stats *stats)
1916 memset(stats, 0, sizeof(*stats));
1920 _qed_get_vport_stats(cdev, stats);
1922 if (!cdev->reset_stats)
1925 /* Reduce the statistics baseline */
1926 for (i = 0; i < sizeof(struct qed_eth_stats) / sizeof(u64); i++)
1927 ((u64 *)stats)[i] -= ((u64 *)cdev->reset_stats)[i];
1930 /* zeroes V-PORT specific portion of stats (Port stats remains untouched) */
1931 void qed_reset_vport_stats(struct qed_dev *cdev)
1935 for_each_hwfn(cdev, i) {
1936 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1937 struct eth_mstorm_per_queue_stat mstats;
1938 struct eth_ustorm_per_queue_stat ustats;
1939 struct eth_pstorm_per_queue_stat pstats;
1940 struct qed_ptt *p_ptt = IS_PF(cdev) ? qed_ptt_acquire(p_hwfn)
1942 u32 addr = 0, len = 0;
1944 if (IS_PF(cdev) && !p_ptt) {
1945 DP_ERR(p_hwfn, "Failed to acquire ptt\n");
1949 memset(&mstats, 0, sizeof(mstats));
1950 __qed_get_vport_mstats_addrlen(p_hwfn, &addr, &len, 0);
1951 qed_memcpy_to(p_hwfn, p_ptt, addr, &mstats, len);
1953 memset(&ustats, 0, sizeof(ustats));
1954 __qed_get_vport_ustats_addrlen(p_hwfn, &addr, &len, 0);
1955 qed_memcpy_to(p_hwfn, p_ptt, addr, &ustats, len);
1957 memset(&pstats, 0, sizeof(pstats));
1958 __qed_get_vport_pstats_addrlen(p_hwfn, &addr, &len, 0);
1959 qed_memcpy_to(p_hwfn, p_ptt, addr, &pstats, len);
1962 qed_ptt_release(p_hwfn, p_ptt);
1965 /* PORT statistics are not necessarily reset, so we need to
1966 * read and create a baseline for future statistics.
1968 if (!cdev->reset_stats)
1969 DP_INFO(cdev, "Reset stats not allocated\n");
1971 _qed_get_vport_stats(cdev, cdev->reset_stats);
1975 qed_arfs_mode_configure(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
1976 struct qed_arfs_config_params *p_cfg_params)
1978 if (p_cfg_params->arfs_enable) {
1979 qed_set_rfs_mode_enable(p_hwfn, p_ptt, p_hwfn->rel_pf_id,
1980 p_cfg_params->tcp, p_cfg_params->udp,
1981 p_cfg_params->ipv4, p_cfg_params->ipv6);
1982 DP_VERBOSE(p_hwfn, QED_MSG_SP,
1983 "tcp = %s, udp = %s, ipv4 = %s, ipv6 =%s\n",
1984 p_cfg_params->tcp ? "Enable" : "Disable",
1985 p_cfg_params->udp ? "Enable" : "Disable",
1986 p_cfg_params->ipv4 ? "Enable" : "Disable",
1987 p_cfg_params->ipv6 ? "Enable" : "Disable");
1989 qed_set_rfs_mode_disable(p_hwfn, p_ptt, p_hwfn->rel_pf_id);
1992 DP_VERBOSE(p_hwfn, QED_MSG_SP, "Configured ARFS mode : %s\n",
1993 p_cfg_params->arfs_enable ? "Enable" : "Disable");
1997 qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
1998 struct qed_spq_comp_cb *p_cb,
1999 dma_addr_t p_addr, u16 length, u16 qid,
2000 u8 vport_id, bool b_is_add)
2002 struct rx_update_gft_filter_data *p_ramrod = NULL;
2003 struct qed_spq_entry *p_ent = NULL;
2004 struct qed_sp_init_data init_data;
2005 u16 abs_rx_q_id = 0;
2006 u8 abs_vport_id = 0;
2009 rc = qed_fw_vport(p_hwfn, vport_id, &abs_vport_id);
2013 rc = qed_fw_l2_queue(p_hwfn, qid, &abs_rx_q_id);
2018 memset(&init_data, 0, sizeof(init_data));
2019 init_data.cid = qed_spq_get_cid(p_hwfn);
2021 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
2024 init_data.comp_mode = QED_SPQ_MODE_CB;
2025 init_data.p_comp_data = p_cb;
2027 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
2030 rc = qed_sp_init_request(p_hwfn, &p_ent,
2031 ETH_RAMROD_GFT_UPDATE_FILTER,
2032 PROTOCOLID_ETH, &init_data);
2036 p_ramrod = &p_ent->ramrod.rx_update_gft;
2037 DMA_REGPAIR_LE(p_ramrod->pkt_hdr_addr, p_addr);
2038 p_ramrod->pkt_hdr_length = cpu_to_le16(length);
2039 p_ramrod->rx_qid_or_action_icid = cpu_to_le16(abs_rx_q_id);
2040 p_ramrod->vport_id = abs_vport_id;
2041 p_ramrod->filter_type = RFS_FILTER_TYPE;
2042 p_ramrod->filter_action = b_is_add ? GFT_ADD_FILTER : GFT_DELETE_FILTER;
2044 DP_VERBOSE(p_hwfn, QED_MSG_SP,
2045 "V[%0x], Q[%04x] - %s filter from 0x%llx [length %04xb]\n",
2046 abs_vport_id, abs_rx_q_id,
2047 b_is_add ? "Adding" : "Removing", (u64)p_addr, length);
2049 return qed_spq_post(p_hwfn, p_ent, NULL);
2052 int qed_get_rxq_coalesce(struct qed_hwfn *p_hwfn,
2053 struct qed_ptt *p_ptt,
2054 struct qed_queue_cid *p_cid, u16 *p_rx_coal)
2056 u32 coalesce, address, is_valid;
2057 struct cau_sb_entry sb_entry;
2061 rc = qed_dmae_grc2host(p_hwfn, p_ptt, CAU_REG_SB_VAR_MEMORY +
2062 p_cid->sb_igu_id * sizeof(u64),
2063 (u64)(uintptr_t)&sb_entry, 2, 0);
2065 DP_ERR(p_hwfn, "dmae_grc2host failed %d\n", rc);
2069 timer_res = GET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES0);
2071 address = BAR0_MAP_REG_USDM_RAM +
2072 USTORM_ETH_QUEUE_ZONE_OFFSET(p_cid->abs.queue_id);
2073 coalesce = qed_rd(p_hwfn, p_ptt, address);
2075 is_valid = GET_FIELD(coalesce, COALESCING_TIMESET_VALID);
2079 coalesce = GET_FIELD(coalesce, COALESCING_TIMESET_TIMESET);
2080 *p_rx_coal = (u16)(coalesce << timer_res);
2085 int qed_get_txq_coalesce(struct qed_hwfn *p_hwfn,
2086 struct qed_ptt *p_ptt,
2087 struct qed_queue_cid *p_cid, u16 *p_tx_coal)
2089 u32 coalesce, address, is_valid;
2090 struct cau_sb_entry sb_entry;
2094 rc = qed_dmae_grc2host(p_hwfn, p_ptt, CAU_REG_SB_VAR_MEMORY +
2095 p_cid->sb_igu_id * sizeof(u64),
2096 (u64)(uintptr_t)&sb_entry, 2, 0);
2098 DP_ERR(p_hwfn, "dmae_grc2host failed %d\n", rc);
2102 timer_res = GET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES1);
2104 address = BAR0_MAP_REG_XSDM_RAM +
2105 XSTORM_ETH_QUEUE_ZONE_OFFSET(p_cid->abs.queue_id);
2106 coalesce = qed_rd(p_hwfn, p_ptt, address);
2108 is_valid = GET_FIELD(coalesce, COALESCING_TIMESET_VALID);
2112 coalesce = GET_FIELD(coalesce, COALESCING_TIMESET_TIMESET);
2113 *p_tx_coal = (u16)(coalesce << timer_res);
2118 int qed_get_queue_coalesce(struct qed_hwfn *p_hwfn, u16 *p_coal, void *handle)
2120 struct qed_queue_cid *p_cid = handle;
2121 struct qed_ptt *p_ptt;
2124 if (IS_VF(p_hwfn->cdev)) {
2125 rc = qed_vf_pf_get_coalesce(p_hwfn, p_coal, p_cid);
2127 DP_NOTICE(p_hwfn, "Unable to read queue coalescing\n");
2132 p_ptt = qed_ptt_acquire(p_hwfn);
2136 if (p_cid->b_is_rx) {
2137 rc = qed_get_rxq_coalesce(p_hwfn, p_ptt, p_cid, p_coal);
2141 rc = qed_get_txq_coalesce(p_hwfn, p_ptt, p_cid, p_coal);
2147 qed_ptt_release(p_hwfn, p_ptt);
2152 static int qed_fill_eth_dev_info(struct qed_dev *cdev,
2153 struct qed_dev_eth_info *info)
2157 memset(info, 0, sizeof(*info));
2162 int max_vf_vlan_filters = 0;
2163 int max_vf_mac_filters = 0;
2165 if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
2168 /* Since the feature controls only queue-zones,
2169 * make sure we have the contexts [rx, xdp, tcs] to
2172 for_each_hwfn(cdev, i) {
2173 struct qed_hwfn *hwfn = &cdev->hwfns[i];
2174 u16 l2_queues = (u16)FEAT_NUM(hwfn,
2178 cids = hwfn->pf_params.eth_pf_params.num_cons;
2179 cids /= (2 + info->num_tc);
2180 num_queues += min_t(u16, l2_queues, cids);
2183 /* queues might theoretically be >256, but interrupts'
2184 * upper-limit guarantes that it would fit in a u8.
2186 if (cdev->int_params.fp_msix_cnt) {
2187 u8 irqs = cdev->int_params.fp_msix_cnt;
2189 info->num_queues = (u8)min_t(u16,
2193 info->num_queues = cdev->num_hwfns;
2196 if (IS_QED_SRIOV(cdev)) {
2197 max_vf_vlan_filters = cdev->p_iov_info->total_vfs *
2198 QED_ETH_VF_NUM_VLAN_FILTERS;
2199 max_vf_mac_filters = cdev->p_iov_info->total_vfs *
2200 QED_ETH_VF_NUM_MAC_FILTERS;
2202 info->num_vlan_filters = RESC_NUM(QED_LEADING_HWFN(cdev),
2204 max_vf_vlan_filters;
2205 info->num_mac_filters = RESC_NUM(QED_LEADING_HWFN(cdev),
2209 ether_addr_copy(info->port_mac,
2210 cdev->hwfns[0].hw_info.hw_mac_addr);
2212 info->xdp_supported = true;
2216 /* Determine queues & XDP support */
2217 for_each_hwfn(cdev, i) {
2218 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
2221 qed_vf_get_num_cids(p_hwfn, &cids);
2222 qed_vf_get_num_rxqs(p_hwfn, &queues);
2223 info->num_queues += queues;
2227 /* Enable VF XDP in case PF guarntees sufficient connections */
2228 if (total_cids >= info->num_queues * 3)
2229 info->xdp_supported = true;
2231 qed_vf_get_num_vlan_filters(&cdev->hwfns[0],
2232 (u8 *)&info->num_vlan_filters);
2233 qed_vf_get_num_mac_filters(&cdev->hwfns[0],
2234 (u8 *)&info->num_mac_filters);
2235 qed_vf_get_port_mac(&cdev->hwfns[0], info->port_mac);
2237 info->is_legacy = !!cdev->hwfns[0].vf_iov_info->b_pre_fp_hsi;
2240 qed_fill_dev_info(cdev, &info->common);
2243 eth_zero_addr(info->common.hw_mac);
2248 static void qed_register_eth_ops(struct qed_dev *cdev,
2249 struct qed_eth_cb_ops *ops, void *cookie)
2251 cdev->protocol_ops.eth = ops;
2252 cdev->ops_cookie = cookie;
2254 /* For VF, we start bulletin reading */
2256 qed_vf_start_iov_wq(cdev);
2259 static bool qed_check_mac(struct qed_dev *cdev, u8 *mac)
2264 return qed_vf_check_mac(&cdev->hwfns[0], mac);
2267 static int qed_start_vport(struct qed_dev *cdev,
2268 struct qed_start_vport_params *params)
2272 for_each_hwfn(cdev, i) {
2273 struct qed_sp_vport_start_params start = { 0 };
2274 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
2276 start.tpa_mode = params->gro_enable ? QED_TPA_MODE_GRO :
2278 start.remove_inner_vlan = params->remove_inner_vlan;
2279 start.only_untagged = true; /* untagged only */
2280 start.drop_ttl0 = params->drop_ttl0;
2281 start.opaque_fid = p_hwfn->hw_info.opaque_fid;
2282 start.concrete_fid = p_hwfn->hw_info.concrete_fid;
2283 start.handle_ptp_pkts = params->handle_ptp_pkts;
2284 start.vport_id = params->vport_id;
2285 start.max_buffers_per_cqe = 16;
2286 start.mtu = params->mtu;
2288 rc = qed_sp_vport_start(p_hwfn, &start);
2290 DP_ERR(cdev, "Failed to start VPORT\n");
2294 rc = qed_hw_start_fastpath(p_hwfn);
2296 DP_ERR(cdev, "Failed to start VPORT fastpath\n");
2300 DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
2301 "Started V-PORT %d with MTU %d\n",
2302 start.vport_id, start.mtu);
2305 if (params->clear_stats)
2306 qed_reset_vport_stats(cdev);
2311 static int qed_stop_vport(struct qed_dev *cdev, u8 vport_id)
2315 for_each_hwfn(cdev, i) {
2316 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
2318 rc = qed_sp_vport_stop(p_hwfn,
2319 p_hwfn->hw_info.opaque_fid, vport_id);
2322 DP_ERR(cdev, "Failed to stop VPORT\n");
2329 static int qed_update_vport_rss(struct qed_dev *cdev,
2330 struct qed_update_vport_rss_params *input,
2331 struct qed_rss_params *rss)
2335 /* Update configuration with what's correct regardless of CMT */
2336 rss->update_rss_config = 1;
2337 rss->rss_enable = 1;
2338 rss->update_rss_capabilities = 1;
2339 rss->update_rss_ind_table = 1;
2340 rss->update_rss_key = 1;
2341 rss->rss_caps = input->rss_caps;
2342 memcpy(rss->rss_key, input->rss_key, QED_RSS_KEY_SIZE * sizeof(u32));
2344 /* In regular scenario, we'd simply need to take input handlers.
2345 * But in CMT, we'd have to split the handlers according to the
2346 * engine they were configured on. We'd then have to understand
2347 * whether RSS is really required, since 2-queues on CMT doesn't
2350 if (cdev->num_hwfns == 1) {
2351 memcpy(rss->rss_ind_table,
2352 input->rss_ind_table,
2353 QED_RSS_IND_TABLE_SIZE * sizeof(void *));
2354 rss->rss_table_size_log = 7;
2358 /* Start by copying the non-spcific information to the 2nd copy */
2359 memcpy(&rss[1], &rss[0], sizeof(struct qed_rss_params));
2361 /* CMT should be round-robin */
2362 for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) {
2363 struct qed_queue_cid *cid = input->rss_ind_table[i];
2364 struct qed_rss_params *t_rss;
2366 if (cid->p_owner == QED_LEADING_HWFN(cdev))
2371 t_rss->rss_ind_table[i / cdev->num_hwfns] = cid;
2374 /* Make sure RSS is actually required */
2375 for_each_hwfn(cdev, fn) {
2376 for (i = 1; i < QED_RSS_IND_TABLE_SIZE / cdev->num_hwfns; i++) {
2377 if (rss[fn].rss_ind_table[i] !=
2378 rss[fn].rss_ind_table[0])
2381 if (i == QED_RSS_IND_TABLE_SIZE / cdev->num_hwfns) {
2382 DP_VERBOSE(cdev, NETIF_MSG_IFUP,
2383 "CMT - 1 queue per-hwfn; Disabling RSS\n");
2386 rss[fn].rss_table_size_log = 6;
2392 static int qed_update_vport(struct qed_dev *cdev,
2393 struct qed_update_vport_params *params)
2395 struct qed_sp_vport_update_params sp_params;
2396 struct qed_rss_params *rss;
2402 rss = vzalloc(sizeof(*rss) * cdev->num_hwfns);
2406 memset(&sp_params, 0, sizeof(sp_params));
2408 /* Translate protocol params into sp params */
2409 sp_params.vport_id = params->vport_id;
2410 sp_params.update_vport_active_rx_flg = params->update_vport_active_flg;
2411 sp_params.update_vport_active_tx_flg = params->update_vport_active_flg;
2412 sp_params.vport_active_rx_flg = params->vport_active_flg;
2413 sp_params.vport_active_tx_flg = params->vport_active_flg;
2414 sp_params.update_tx_switching_flg = params->update_tx_switching_flg;
2415 sp_params.tx_switching_flg = params->tx_switching_flg;
2416 sp_params.accept_any_vlan = params->accept_any_vlan;
2417 sp_params.update_accept_any_vlan_flg =
2418 params->update_accept_any_vlan_flg;
2420 /* Prepare the RSS configuration */
2421 if (params->update_rss_flg)
2422 if (qed_update_vport_rss(cdev, ¶ms->rss_params, rss))
2423 params->update_rss_flg = 0;
2425 for_each_hwfn(cdev, i) {
2426 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
2428 if (params->update_rss_flg)
2429 sp_params.rss_params = &rss[i];
2431 sp_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
2432 rc = qed_sp_vport_update(p_hwfn, &sp_params,
2433 QED_SPQ_MODE_EBLOCK,
2436 DP_ERR(cdev, "Failed to update VPORT\n");
2440 DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
2441 "Updated V-PORT %d: active_flag %d [update %d]\n",
2442 params->vport_id, params->vport_active_flg,
2443 params->update_vport_active_flg);
2451 static int qed_start_rxq(struct qed_dev *cdev,
2453 struct qed_queue_start_common_params *p_params,
2455 dma_addr_t bd_chain_phys_addr,
2456 dma_addr_t cqe_pbl_addr,
2458 struct qed_rxq_start_ret_params *ret_params)
2460 struct qed_hwfn *p_hwfn;
2463 hwfn_index = rss_num % cdev->num_hwfns;
2464 p_hwfn = &cdev->hwfns[hwfn_index];
2466 p_params->queue_id = p_params->queue_id / cdev->num_hwfns;
2467 p_params->stats_id = p_params->vport_id;
2469 rc = qed_eth_rx_queue_start(p_hwfn,
2470 p_hwfn->hw_info.opaque_fid,
2474 cqe_pbl_addr, cqe_pbl_size, ret_params);
2476 DP_ERR(cdev, "Failed to start RXQ#%d\n", p_params->queue_id);
2480 DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
2481 "Started RX-Q %d [rss_num %d] on V-PORT %d and SB igu %d\n",
2482 p_params->queue_id, rss_num, p_params->vport_id,
2483 p_params->p_sb->igu_sb_id);
2488 static int qed_stop_rxq(struct qed_dev *cdev, u8 rss_id, void *handle)
2491 struct qed_hwfn *p_hwfn;
2493 hwfn_index = rss_id % cdev->num_hwfns;
2494 p_hwfn = &cdev->hwfns[hwfn_index];
2496 rc = qed_eth_rx_queue_stop(p_hwfn, handle, false, false);
2498 DP_ERR(cdev, "Failed to stop RXQ#%02x\n", rss_id);
2505 static int qed_start_txq(struct qed_dev *cdev,
2507 struct qed_queue_start_common_params *p_params,
2508 dma_addr_t pbl_addr,
2510 struct qed_txq_start_ret_params *ret_params)
2512 struct qed_hwfn *p_hwfn;
2515 hwfn_index = rss_num % cdev->num_hwfns;
2516 p_hwfn = &cdev->hwfns[hwfn_index];
2517 p_params->queue_id = p_params->queue_id / cdev->num_hwfns;
2518 p_params->stats_id = p_params->vport_id;
2520 rc = qed_eth_tx_queue_start(p_hwfn,
2521 p_hwfn->hw_info.opaque_fid,
2523 pbl_addr, pbl_size, ret_params);
2526 DP_ERR(cdev, "Failed to start TXQ#%d\n", p_params->queue_id);
2530 DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
2531 "Started TX-Q %d [rss_num %d] on V-PORT %d and SB igu %d\n",
2532 p_params->queue_id, rss_num, p_params->vport_id,
2533 p_params->p_sb->igu_sb_id);
2538 #define QED_HW_STOP_RETRY_LIMIT (10)
2539 static int qed_fastpath_stop(struct qed_dev *cdev)
2543 rc = qed_hw_stop_fastpath(cdev);
2545 DP_ERR(cdev, "Failed to stop Fastpath\n");
2552 static int qed_stop_txq(struct qed_dev *cdev, u8 rss_id, void *handle)
2554 struct qed_hwfn *p_hwfn;
2557 hwfn_index = rss_id % cdev->num_hwfns;
2558 p_hwfn = &cdev->hwfns[hwfn_index];
2560 rc = qed_eth_tx_queue_stop(p_hwfn, handle);
2562 DP_ERR(cdev, "Failed to stop TXQ#%02x\n", rss_id);
2569 static int qed_tunn_configure(struct qed_dev *cdev,
2570 struct qed_tunn_params *tunn_params)
2572 struct qed_tunnel_info tunn_info;
2575 memset(&tunn_info, 0, sizeof(tunn_info));
2576 if (tunn_params->update_vxlan_port) {
2577 tunn_info.vxlan_port.b_update_port = true;
2578 tunn_info.vxlan_port.port = tunn_params->vxlan_port;
2581 if (tunn_params->update_geneve_port) {
2582 tunn_info.geneve_port.b_update_port = true;
2583 tunn_info.geneve_port.port = tunn_params->geneve_port;
2586 for_each_hwfn(cdev, i) {
2587 struct qed_hwfn *hwfn = &cdev->hwfns[i];
2588 struct qed_ptt *p_ptt;
2589 struct qed_tunnel_info *tun;
2591 tun = &hwfn->cdev->tunnel;
2593 p_ptt = qed_ptt_acquire(hwfn);
2600 rc = qed_sp_pf_update_tunn_cfg(hwfn, p_ptt, &tunn_info,
2601 QED_SPQ_MODE_EBLOCK, NULL);
2604 qed_ptt_release(hwfn, p_ptt);
2608 if (IS_PF_SRIOV(hwfn)) {
2609 u16 vxlan_port, geneve_port;
2612 vxlan_port = tun->vxlan_port.port;
2613 geneve_port = tun->geneve_port.port;
2615 qed_for_each_vf(hwfn, j) {
2616 qed_iov_bulletin_set_udp_ports(hwfn, j,
2621 qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG);
2624 qed_ptt_release(hwfn, p_ptt);
2630 static int qed_configure_filter_rx_mode(struct qed_dev *cdev,
2631 enum qed_filter_rx_mode_type type)
2633 struct qed_filter_accept_flags accept_flags;
2635 memset(&accept_flags, 0, sizeof(accept_flags));
2637 accept_flags.update_rx_mode_config = 1;
2638 accept_flags.update_tx_mode_config = 1;
2639 accept_flags.rx_accept_filter = QED_ACCEPT_UCAST_MATCHED |
2640 QED_ACCEPT_MCAST_MATCHED |
2642 accept_flags.tx_accept_filter = QED_ACCEPT_UCAST_MATCHED |
2643 QED_ACCEPT_MCAST_MATCHED |
2646 if (type == QED_FILTER_RX_MODE_TYPE_PROMISC) {
2647 accept_flags.rx_accept_filter |= QED_ACCEPT_UCAST_UNMATCHED |
2648 QED_ACCEPT_MCAST_UNMATCHED;
2649 accept_flags.tx_accept_filter |= QED_ACCEPT_UCAST_UNMATCHED |
2650 QED_ACCEPT_MCAST_UNMATCHED;
2651 } else if (type == QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC) {
2652 accept_flags.rx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED;
2653 accept_flags.tx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED;
2656 return qed_filter_accept_cmd(cdev, 0, accept_flags, false, false,
2657 QED_SPQ_MODE_CB, NULL);
2660 static int qed_configure_filter_ucast(struct qed_dev *cdev,
2661 struct qed_filter_ucast_params *params)
2663 struct qed_filter_ucast ucast;
2665 if (!params->vlan_valid && !params->mac_valid) {
2667 "Tried configuring a unicast filter, but both MAC and VLAN are not set\n");
2671 memset(&ucast, 0, sizeof(ucast));
2672 switch (params->type) {
2673 case QED_FILTER_XCAST_TYPE_ADD:
2674 ucast.opcode = QED_FILTER_ADD;
2676 case QED_FILTER_XCAST_TYPE_DEL:
2677 ucast.opcode = QED_FILTER_REMOVE;
2679 case QED_FILTER_XCAST_TYPE_REPLACE:
2680 ucast.opcode = QED_FILTER_REPLACE;
2683 DP_NOTICE(cdev, "Unknown unicast filter type %d\n",
2687 if (params->vlan_valid && params->mac_valid) {
2688 ucast.type = QED_FILTER_MAC_VLAN;
2689 ether_addr_copy(ucast.mac, params->mac);
2690 ucast.vlan = params->vlan;
2691 } else if (params->mac_valid) {
2692 ucast.type = QED_FILTER_MAC;
2693 ether_addr_copy(ucast.mac, params->mac);
2695 ucast.type = QED_FILTER_VLAN;
2696 ucast.vlan = params->vlan;
2699 ucast.is_rx_filter = true;
2700 ucast.is_tx_filter = true;
2702 return qed_filter_ucast_cmd(cdev, &ucast, QED_SPQ_MODE_CB, NULL);
2705 static int qed_configure_filter_mcast(struct qed_dev *cdev,
2706 struct qed_filter_mcast_params *params)
2708 struct qed_filter_mcast mcast;
2711 memset(&mcast, 0, sizeof(mcast));
2712 switch (params->type) {
2713 case QED_FILTER_XCAST_TYPE_ADD:
2714 mcast.opcode = QED_FILTER_ADD;
2716 case QED_FILTER_XCAST_TYPE_DEL:
2717 mcast.opcode = QED_FILTER_REMOVE;
2720 DP_NOTICE(cdev, "Unknown multicast filter type %d\n",
2724 mcast.num_mc_addrs = params->num;
2725 for (i = 0; i < mcast.num_mc_addrs; i++)
2726 ether_addr_copy(mcast.mac[i], params->mac[i]);
2728 return qed_filter_mcast_cmd(cdev, &mcast, QED_SPQ_MODE_CB, NULL);
2731 static int qed_configure_filter(struct qed_dev *cdev,
2732 struct qed_filter_params *params)
2734 enum qed_filter_rx_mode_type accept_flags;
2736 switch (params->type) {
2737 case QED_FILTER_TYPE_UCAST:
2738 return qed_configure_filter_ucast(cdev, ¶ms->filter.ucast);
2739 case QED_FILTER_TYPE_MCAST:
2740 return qed_configure_filter_mcast(cdev, ¶ms->filter.mcast);
2741 case QED_FILTER_TYPE_RX_MODE:
2742 accept_flags = params->filter.accept_flags;
2743 return qed_configure_filter_rx_mode(cdev, accept_flags);
2745 DP_NOTICE(cdev, "Unknown filter type %d\n", (int)params->type);
2750 static int qed_configure_arfs_searcher(struct qed_dev *cdev, bool en_searcher)
2752 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
2753 struct qed_arfs_config_params arfs_config_params;
2755 memset(&arfs_config_params, 0, sizeof(arfs_config_params));
2756 arfs_config_params.tcp = true;
2757 arfs_config_params.udp = true;
2758 arfs_config_params.ipv4 = true;
2759 arfs_config_params.ipv6 = true;
2760 arfs_config_params.arfs_enable = en_searcher;
2762 qed_arfs_mode_configure(p_hwfn, p_hwfn->p_arfs_ptt,
2763 &arfs_config_params);
2768 qed_arfs_sp_response_handler(struct qed_hwfn *p_hwfn,
2769 void *cookie, union event_ring_data *data,
2772 struct qed_common_cb_ops *op = p_hwfn->cdev->protocol_ops.common;
2773 void *dev = p_hwfn->cdev->ops_cookie;
2775 op->arfs_filter_op(dev, cookie, fw_return_code);
2778 static int qed_ntuple_arfs_filter_config(struct qed_dev *cdev, void *cookie,
2779 dma_addr_t mapping, u16 length,
2780 u16 vport_id, u16 rx_queue_id,
2783 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
2784 struct qed_spq_comp_cb cb;
2787 cb.function = qed_arfs_sp_response_handler;
2790 rc = qed_configure_rfs_ntuple_filter(p_hwfn, p_hwfn->p_arfs_ptt,
2791 &cb, mapping, length, rx_queue_id,
2792 vport_id, add_filter);
2795 "Failed to issue a-RFS filter configuration\n");
2797 DP_VERBOSE(p_hwfn, NETIF_MSG_DRV,
2798 "Successfully issued a-RFS filter configuration\n");
2803 static int qed_get_coalesce(struct qed_dev *cdev, u16 *coal, void *handle)
2805 struct qed_queue_cid *p_cid = handle;
2806 struct qed_hwfn *p_hwfn;
2809 p_hwfn = p_cid->p_owner;
2810 rc = qed_get_queue_coalesce(p_hwfn, coal, handle);
2812 DP_NOTICE(p_hwfn, "Unable to read queue coalescing\n");
2817 static int qed_fp_cqe_completion(struct qed_dev *dev,
2818 u8 rss_id, struct eth_slow_path_rx_cqe *cqe)
2820 return qed_eth_cqe_completion(&dev->hwfns[rss_id % dev->num_hwfns],
2824 #ifdef CONFIG_QED_SRIOV
2825 extern const struct qed_iov_hv_ops qed_iov_ops_pass;
2829 extern const struct qed_eth_dcbnl_ops qed_dcbnl_ops_pass;
2832 extern const struct qed_eth_ptp_ops qed_ptp_ops_pass;
2834 static const struct qed_eth_ops qed_eth_ops_pass = {
2835 .common = &qed_common_ops_pass,
2836 #ifdef CONFIG_QED_SRIOV
2837 .iov = &qed_iov_ops_pass,
2840 .dcb = &qed_dcbnl_ops_pass,
2842 .ptp = &qed_ptp_ops_pass,
2843 .fill_dev_info = &qed_fill_eth_dev_info,
2844 .register_ops = &qed_register_eth_ops,
2845 .check_mac = &qed_check_mac,
2846 .vport_start = &qed_start_vport,
2847 .vport_stop = &qed_stop_vport,
2848 .vport_update = &qed_update_vport,
2849 .q_rx_start = &qed_start_rxq,
2850 .q_rx_stop = &qed_stop_rxq,
2851 .q_tx_start = &qed_start_txq,
2852 .q_tx_stop = &qed_stop_txq,
2853 .filter_config = &qed_configure_filter,
2854 .fastpath_stop = &qed_fastpath_stop,
2855 .eth_cqe_completion = &qed_fp_cqe_completion,
2856 .get_vport_stats = &qed_get_vport_stats,
2857 .tunn_config = &qed_tunn_configure,
2858 .ntuple_filter_config = &qed_ntuple_arfs_filter_config,
2859 .configure_arfs_searcher = &qed_configure_arfs_searcher,
2860 .get_coalesce = &qed_get_coalesce,
2863 const struct qed_eth_ops *qed_get_eth_ops(void)
2865 return &qed_eth_ops_pass;
2867 EXPORT_SYMBOL(qed_get_eth_ops);
2869 void qed_put_eth_ops(void)
2871 /* TODO - reference count for module? */
2873 EXPORT_SYMBOL(qed_put_eth_ops);