GNU Linux-libre 4.19.211-gnu1
[releases.git] / drivers / net / ethernet / qlogic / qed / qed_l2.c
1 /* QLogic qed NIC Driver
2  * Copyright (c) 2015-2017  QLogic Corporation
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and /or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32
33 #include <linux/types.h>
34 #include <asm/byteorder.h>
35 #include <asm/param.h>
36 #include <linux/delay.h>
37 #include <linux/dma-mapping.h>
38 #include <linux/etherdevice.h>
39 #include <linux/interrupt.h>
40 #include <linux/kernel.h>
41 #include <linux/module.h>
42 #include <linux/pci.h>
43 #include <linux/slab.h>
44 #include <linux/stddef.h>
45 #include <linux/string.h>
46 #include <linux/workqueue.h>
47 #include <linux/bitops.h>
48 #include <linux/bug.h>
49 #include <linux/vmalloc.h>
50 #include "qed.h"
51 #include <linux/qed/qed_chain.h>
52 #include "qed_cxt.h"
53 #include "qed_dev_api.h"
54 #include <linux/qed/qed_eth_if.h>
55 #include "qed_hsi.h"
56 #include "qed_hw.h"
57 #include "qed_int.h"
58 #include "qed_l2.h"
59 #include "qed_mcp.h"
60 #include "qed_reg_addr.h"
61 #include "qed_sp.h"
62 #include "qed_sriov.h"
63
64
65 #define QED_MAX_SGES_NUM 16
66 #define CRC32_POLY 0x1edc6f41
67
68 struct qed_l2_info {
69         u32 queues;
70         unsigned long **pp_qid_usage;
71
72         /* The lock is meant to synchronize access to the qid usage */
73         struct mutex lock;
74 };
75
76 int qed_l2_alloc(struct qed_hwfn *p_hwfn)
77 {
78         struct qed_l2_info *p_l2_info;
79         unsigned long **pp_qids;
80         u32 i;
81
82         if (!QED_IS_L2_PERSONALITY(p_hwfn))
83                 return 0;
84
85         p_l2_info = kzalloc(sizeof(*p_l2_info), GFP_KERNEL);
86         if (!p_l2_info)
87                 return -ENOMEM;
88         p_hwfn->p_l2_info = p_l2_info;
89
90         if (IS_PF(p_hwfn->cdev)) {
91                 p_l2_info->queues = RESC_NUM(p_hwfn, QED_L2_QUEUE);
92         } else {
93                 u8 rx = 0, tx = 0;
94
95                 qed_vf_get_num_rxqs(p_hwfn, &rx);
96                 qed_vf_get_num_txqs(p_hwfn, &tx);
97
98                 p_l2_info->queues = max_t(u8, rx, tx);
99         }
100
101         pp_qids = kcalloc(p_l2_info->queues, sizeof(unsigned long *),
102                           GFP_KERNEL);
103         if (!pp_qids)
104                 return -ENOMEM;
105         p_l2_info->pp_qid_usage = pp_qids;
106
107         for (i = 0; i < p_l2_info->queues; i++) {
108                 pp_qids[i] = kzalloc(MAX_QUEUES_PER_QZONE / 8, GFP_KERNEL);
109                 if (!pp_qids[i])
110                         return -ENOMEM;
111         }
112
113         return 0;
114 }
115
116 void qed_l2_setup(struct qed_hwfn *p_hwfn)
117 {
118         if (!QED_IS_L2_PERSONALITY(p_hwfn))
119                 return;
120
121         mutex_init(&p_hwfn->p_l2_info->lock);
122 }
123
124 void qed_l2_free(struct qed_hwfn *p_hwfn)
125 {
126         u32 i;
127
128         if (!QED_IS_L2_PERSONALITY(p_hwfn))
129                 return;
130
131         if (!p_hwfn->p_l2_info)
132                 return;
133
134         if (!p_hwfn->p_l2_info->pp_qid_usage)
135                 goto out_l2_info;
136
137         /* Free until hit first uninitialized entry */
138         for (i = 0; i < p_hwfn->p_l2_info->queues; i++) {
139                 if (!p_hwfn->p_l2_info->pp_qid_usage[i])
140                         break;
141                 kfree(p_hwfn->p_l2_info->pp_qid_usage[i]);
142         }
143
144         kfree(p_hwfn->p_l2_info->pp_qid_usage);
145
146 out_l2_info:
147         kfree(p_hwfn->p_l2_info);
148         p_hwfn->p_l2_info = NULL;
149 }
150
151 static bool qed_eth_queue_qid_usage_add(struct qed_hwfn *p_hwfn,
152                                         struct qed_queue_cid *p_cid)
153 {
154         struct qed_l2_info *p_l2_info = p_hwfn->p_l2_info;
155         u16 queue_id = p_cid->rel.queue_id;
156         bool b_rc = true;
157         u8 first;
158
159         mutex_lock(&p_l2_info->lock);
160
161         if (queue_id >= p_l2_info->queues) {
162                 DP_NOTICE(p_hwfn,
163                           "Requested to increase usage for qzone %04x out of %08x\n",
164                           queue_id, p_l2_info->queues);
165                 b_rc = false;
166                 goto out;
167         }
168
169         first = (u8)find_first_zero_bit(p_l2_info->pp_qid_usage[queue_id],
170                                         MAX_QUEUES_PER_QZONE);
171         if (first >= MAX_QUEUES_PER_QZONE) {
172                 b_rc = false;
173                 goto out;
174         }
175
176         __set_bit(first, p_l2_info->pp_qid_usage[queue_id]);
177         p_cid->qid_usage_idx = first;
178
179 out:
180         mutex_unlock(&p_l2_info->lock);
181         return b_rc;
182 }
183
184 static void qed_eth_queue_qid_usage_del(struct qed_hwfn *p_hwfn,
185                                         struct qed_queue_cid *p_cid)
186 {
187         mutex_lock(&p_hwfn->p_l2_info->lock);
188
189         clear_bit(p_cid->qid_usage_idx,
190                   p_hwfn->p_l2_info->pp_qid_usage[p_cid->rel.queue_id]);
191
192         mutex_unlock(&p_hwfn->p_l2_info->lock);
193 }
194
195 void qed_eth_queue_cid_release(struct qed_hwfn *p_hwfn,
196                                struct qed_queue_cid *p_cid)
197 {
198         bool b_legacy_vf = !!(p_cid->vf_legacy & QED_QCID_LEGACY_VF_CID);
199
200         if (IS_PF(p_hwfn->cdev) && !b_legacy_vf)
201                 _qed_cxt_release_cid(p_hwfn, p_cid->cid, p_cid->vfid);
202
203         /* For PF's VFs we maintain the index inside queue-zone in IOV */
204         if (p_cid->vfid == QED_QUEUE_CID_SELF)
205                 qed_eth_queue_qid_usage_del(p_hwfn, p_cid);
206
207         vfree(p_cid);
208 }
209
210 /* The internal is only meant to be directly called by PFs initializeing CIDs
211  * for their VFs.
212  */
213 static struct qed_queue_cid *
214 _qed_eth_queue_to_cid(struct qed_hwfn *p_hwfn,
215                       u16 opaque_fid,
216                       u32 cid,
217                       struct qed_queue_start_common_params *p_params,
218                       bool b_is_rx,
219                       struct qed_queue_cid_vf_params *p_vf_params)
220 {
221         struct qed_queue_cid *p_cid;
222         int rc;
223
224         p_cid = vzalloc(sizeof(*p_cid));
225         if (!p_cid)
226                 return NULL;
227
228         p_cid->opaque_fid = opaque_fid;
229         p_cid->cid = cid;
230         p_cid->p_owner = p_hwfn;
231
232         /* Fill in parameters */
233         p_cid->rel.vport_id = p_params->vport_id;
234         p_cid->rel.queue_id = p_params->queue_id;
235         p_cid->rel.stats_id = p_params->stats_id;
236         p_cid->sb_igu_id = p_params->p_sb->igu_sb_id;
237         p_cid->b_is_rx = b_is_rx;
238         p_cid->sb_idx = p_params->sb_idx;
239
240         /* Fill-in bits related to VFs' queues if information was provided */
241         if (p_vf_params) {
242                 p_cid->vfid = p_vf_params->vfid;
243                 p_cid->vf_qid = p_vf_params->vf_qid;
244                 p_cid->vf_legacy = p_vf_params->vf_legacy;
245         } else {
246                 p_cid->vfid = QED_QUEUE_CID_SELF;
247         }
248
249         /* Don't try calculating the absolute indices for VFs */
250         if (IS_VF(p_hwfn->cdev)) {
251                 p_cid->abs = p_cid->rel;
252                 goto out;
253         }
254
255         /* Calculate the engine-absolute indices of the resources.
256          * This would guarantee they're valid later on.
257          * In some cases [SBs] we already have the right values.
258          */
259         rc = qed_fw_vport(p_hwfn, p_cid->rel.vport_id, &p_cid->abs.vport_id);
260         if (rc)
261                 goto fail;
262
263         rc = qed_fw_l2_queue(p_hwfn, p_cid->rel.queue_id, &p_cid->abs.queue_id);
264         if (rc)
265                 goto fail;
266
267         /* In case of a PF configuring its VF's queues, the stats-id is already
268          * absolute [since there's a single index that's suitable per-VF].
269          */
270         if (p_cid->vfid == QED_QUEUE_CID_SELF) {
271                 rc = qed_fw_vport(p_hwfn, p_cid->rel.stats_id,
272                                   &p_cid->abs.stats_id);
273                 if (rc)
274                         goto fail;
275         } else {
276                 p_cid->abs.stats_id = p_cid->rel.stats_id;
277         }
278
279 out:
280         /* VF-images have provided the qid_usage_idx on their own.
281          * Otherwise, we need to allocate a unique one.
282          */
283         if (!p_vf_params) {
284                 if (!qed_eth_queue_qid_usage_add(p_hwfn, p_cid))
285                         goto fail;
286         } else {
287                 p_cid->qid_usage_idx = p_vf_params->qid_usage_idx;
288         }
289
290         DP_VERBOSE(p_hwfn,
291                    QED_MSG_SP,
292                    "opaque_fid: %04x CID %08x vport %02x [%02x] qzone %04x.%02x [%04x] stats %02x [%02x] SB %04x PI %02x\n",
293                    p_cid->opaque_fid,
294                    p_cid->cid,
295                    p_cid->rel.vport_id,
296                    p_cid->abs.vport_id,
297                    p_cid->rel.queue_id,
298                    p_cid->qid_usage_idx,
299                    p_cid->abs.queue_id,
300                    p_cid->rel.stats_id,
301                    p_cid->abs.stats_id, p_cid->sb_igu_id, p_cid->sb_idx);
302
303         return p_cid;
304
305 fail:
306         vfree(p_cid);
307         return NULL;
308 }
309
310 struct qed_queue_cid *
311 qed_eth_queue_to_cid(struct qed_hwfn *p_hwfn,
312                      u16 opaque_fid,
313                      struct qed_queue_start_common_params *p_params,
314                      bool b_is_rx,
315                      struct qed_queue_cid_vf_params *p_vf_params)
316 {
317         struct qed_queue_cid *p_cid;
318         u8 vfid = QED_CXT_PF_CID;
319         bool b_legacy_vf = false;
320         u32 cid = 0;
321
322         /* In case of legacy VFs, The CID can be derived from the additional
323          * VF parameters - the VF assumes queue X uses CID X, so we can simply
324          * use the vf_qid for this purpose as well.
325          */
326         if (p_vf_params) {
327                 vfid = p_vf_params->vfid;
328
329                 if (p_vf_params->vf_legacy & QED_QCID_LEGACY_VF_CID) {
330                         b_legacy_vf = true;
331                         cid = p_vf_params->vf_qid;
332                 }
333         }
334
335         /* Get a unique firmware CID for this queue, in case it's a PF.
336          * VF's don't need a CID as the queue configuration will be done
337          * by PF.
338          */
339         if (IS_PF(p_hwfn->cdev) && !b_legacy_vf) {
340                 if (_qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH,
341                                          &cid, vfid)) {
342                         DP_NOTICE(p_hwfn, "Failed to acquire cid\n");
343                         return NULL;
344                 }
345         }
346
347         p_cid = _qed_eth_queue_to_cid(p_hwfn, opaque_fid, cid,
348                                       p_params, b_is_rx, p_vf_params);
349         if (!p_cid && IS_PF(p_hwfn->cdev) && !b_legacy_vf)
350                 _qed_cxt_release_cid(p_hwfn, cid, vfid);
351
352         return p_cid;
353 }
354
355 static struct qed_queue_cid *
356 qed_eth_queue_to_cid_pf(struct qed_hwfn *p_hwfn,
357                         u16 opaque_fid,
358                         bool b_is_rx,
359                         struct qed_queue_start_common_params *p_params)
360 {
361         return qed_eth_queue_to_cid(p_hwfn, opaque_fid, p_params, b_is_rx,
362                                     NULL);
363 }
364
365 int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn,
366                            struct qed_sp_vport_start_params *p_params)
367 {
368         struct vport_start_ramrod_data *p_ramrod = NULL;
369         struct qed_spq_entry *p_ent =  NULL;
370         struct qed_sp_init_data init_data;
371         u8 abs_vport_id = 0;
372         int rc = -EINVAL;
373         u16 rx_mode = 0;
374
375         rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
376         if (rc)
377                 return rc;
378
379         memset(&init_data, 0, sizeof(init_data));
380         init_data.cid = qed_spq_get_cid(p_hwfn);
381         init_data.opaque_fid = p_params->opaque_fid;
382         init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
383
384         rc = qed_sp_init_request(p_hwfn, &p_ent,
385                                  ETH_RAMROD_VPORT_START,
386                                  PROTOCOLID_ETH, &init_data);
387         if (rc)
388                 return rc;
389
390         p_ramrod                = &p_ent->ramrod.vport_start;
391         p_ramrod->vport_id      = abs_vport_id;
392
393         p_ramrod->mtu                   = cpu_to_le16(p_params->mtu);
394         p_ramrod->handle_ptp_pkts       = p_params->handle_ptp_pkts;
395         p_ramrod->inner_vlan_removal_en = p_params->remove_inner_vlan;
396         p_ramrod->drop_ttl0_en          = p_params->drop_ttl0;
397         p_ramrod->untagged              = p_params->only_untagged;
398
399         SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_UCAST_DROP_ALL, 1);
400         SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_MCAST_DROP_ALL, 1);
401
402         p_ramrod->rx_mode.state = cpu_to_le16(rx_mode);
403
404         /* TPA related fields */
405         memset(&p_ramrod->tpa_param, 0, sizeof(struct eth_vport_tpa_param));
406
407         p_ramrod->tpa_param.max_buff_num = p_params->max_buffers_per_cqe;
408
409         switch (p_params->tpa_mode) {
410         case QED_TPA_MODE_GRO:
411                 p_ramrod->tpa_param.tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM;
412                 p_ramrod->tpa_param.tpa_max_size = (u16)-1;
413                 p_ramrod->tpa_param.tpa_min_size_to_cont = p_params->mtu / 2;
414                 p_ramrod->tpa_param.tpa_min_size_to_start = p_params->mtu / 2;
415                 p_ramrod->tpa_param.tpa_ipv4_en_flg = 1;
416                 p_ramrod->tpa_param.tpa_ipv6_en_flg = 1;
417                 p_ramrod->tpa_param.tpa_pkt_split_flg = 1;
418                 p_ramrod->tpa_param.tpa_gro_consistent_flg = 1;
419                 break;
420         default:
421                 break;
422         }
423
424         p_ramrod->tx_switching_en = p_params->tx_switching;
425
426         p_ramrod->ctl_frame_mac_check_en = !!p_params->check_mac;
427         p_ramrod->ctl_frame_ethtype_check_en = !!p_params->check_ethtype;
428
429         /* Software Function ID in hwfn (PFs are 0 - 15, VFs are 16 - 135) */
430         p_ramrod->sw_fid = qed_concrete_to_sw_fid(p_hwfn->cdev,
431                                                   p_params->concrete_fid);
432
433         return qed_spq_post(p_hwfn, p_ent, NULL);
434 }
435
436 static int qed_sp_vport_start(struct qed_hwfn *p_hwfn,
437                               struct qed_sp_vport_start_params *p_params)
438 {
439         if (IS_VF(p_hwfn->cdev)) {
440                 return qed_vf_pf_vport_start(p_hwfn, p_params->vport_id,
441                                              p_params->mtu,
442                                              p_params->remove_inner_vlan,
443                                              p_params->tpa_mode,
444                                              p_params->max_buffers_per_cqe,
445                                              p_params->only_untagged);
446         }
447
448         return qed_sp_eth_vport_start(p_hwfn, p_params);
449 }
450
451 static int
452 qed_sp_vport_update_rss(struct qed_hwfn *p_hwfn,
453                         struct vport_update_ramrod_data *p_ramrod,
454                         struct qed_rss_params *p_rss)
455 {
456         struct eth_vport_rss_config *p_config;
457         u16 capabilities = 0;
458         int i, table_size;
459         int rc = 0;
460
461         if (!p_rss) {
462                 p_ramrod->common.update_rss_flg = 0;
463                 return rc;
464         }
465         p_config = &p_ramrod->rss_config;
466
467         BUILD_BUG_ON(QED_RSS_IND_TABLE_SIZE != ETH_RSS_IND_TABLE_ENTRIES_NUM);
468
469         rc = qed_fw_rss_eng(p_hwfn, p_rss->rss_eng_id, &p_config->rss_id);
470         if (rc)
471                 return rc;
472
473         p_ramrod->common.update_rss_flg = p_rss->update_rss_config;
474         p_config->update_rss_capabilities = p_rss->update_rss_capabilities;
475         p_config->update_rss_ind_table = p_rss->update_rss_ind_table;
476         p_config->update_rss_key = p_rss->update_rss_key;
477
478         p_config->rss_mode = p_rss->rss_enable ?
479                              ETH_VPORT_RSS_MODE_REGULAR :
480                              ETH_VPORT_RSS_MODE_DISABLED;
481
482         SET_FIELD(capabilities,
483                   ETH_VPORT_RSS_CONFIG_IPV4_CAPABILITY,
484                   !!(p_rss->rss_caps & QED_RSS_IPV4));
485         SET_FIELD(capabilities,
486                   ETH_VPORT_RSS_CONFIG_IPV6_CAPABILITY,
487                   !!(p_rss->rss_caps & QED_RSS_IPV6));
488         SET_FIELD(capabilities,
489                   ETH_VPORT_RSS_CONFIG_IPV4_TCP_CAPABILITY,
490                   !!(p_rss->rss_caps & QED_RSS_IPV4_TCP));
491         SET_FIELD(capabilities,
492                   ETH_VPORT_RSS_CONFIG_IPV6_TCP_CAPABILITY,
493                   !!(p_rss->rss_caps & QED_RSS_IPV6_TCP));
494         SET_FIELD(capabilities,
495                   ETH_VPORT_RSS_CONFIG_IPV4_UDP_CAPABILITY,
496                   !!(p_rss->rss_caps & QED_RSS_IPV4_UDP));
497         SET_FIELD(capabilities,
498                   ETH_VPORT_RSS_CONFIG_IPV6_UDP_CAPABILITY,
499                   !!(p_rss->rss_caps & QED_RSS_IPV6_UDP));
500         p_config->tbl_size = p_rss->rss_table_size_log;
501
502         p_config->capabilities = cpu_to_le16(capabilities);
503
504         DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP,
505                    "update rss flag %d, rss_mode = %d, update_caps = %d, capabilities = %d, update_ind = %d, update_rss_key = %d\n",
506                    p_ramrod->common.update_rss_flg,
507                    p_config->rss_mode,
508                    p_config->update_rss_capabilities,
509                    p_config->capabilities,
510                    p_config->update_rss_ind_table, p_config->update_rss_key);
511
512         table_size = min_t(int, QED_RSS_IND_TABLE_SIZE,
513                            1 << p_config->tbl_size);
514         for (i = 0; i < table_size; i++) {
515                 struct qed_queue_cid *p_queue = p_rss->rss_ind_table[i];
516
517                 if (!p_queue)
518                         return -EINVAL;
519
520                 p_config->indirection_table[i] =
521                     cpu_to_le16(p_queue->abs.queue_id);
522         }
523
524         DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP,
525                    "Configured RSS indirection table [%d entries]:\n",
526                    table_size);
527         for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i += 0x10) {
528                 DP_VERBOSE(p_hwfn,
529                            NETIF_MSG_IFUP,
530                            "%04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x\n",
531                            le16_to_cpu(p_config->indirection_table[i]),
532                            le16_to_cpu(p_config->indirection_table[i + 1]),
533                            le16_to_cpu(p_config->indirection_table[i + 2]),
534                            le16_to_cpu(p_config->indirection_table[i + 3]),
535                            le16_to_cpu(p_config->indirection_table[i + 4]),
536                            le16_to_cpu(p_config->indirection_table[i + 5]),
537                            le16_to_cpu(p_config->indirection_table[i + 6]),
538                            le16_to_cpu(p_config->indirection_table[i + 7]),
539                            le16_to_cpu(p_config->indirection_table[i + 8]),
540                            le16_to_cpu(p_config->indirection_table[i + 9]),
541                            le16_to_cpu(p_config->indirection_table[i + 10]),
542                            le16_to_cpu(p_config->indirection_table[i + 11]),
543                            le16_to_cpu(p_config->indirection_table[i + 12]),
544                            le16_to_cpu(p_config->indirection_table[i + 13]),
545                            le16_to_cpu(p_config->indirection_table[i + 14]),
546                            le16_to_cpu(p_config->indirection_table[i + 15]));
547         }
548
549         for (i = 0; i < 10; i++)
550                 p_config->rss_key[i] = cpu_to_le32(p_rss->rss_key[i]);
551
552         return rc;
553 }
554
555 static void
556 qed_sp_update_accept_mode(struct qed_hwfn *p_hwfn,
557                           struct vport_update_ramrod_data *p_ramrod,
558                           struct qed_filter_accept_flags accept_flags)
559 {
560         p_ramrod->common.update_rx_mode_flg =
561                 accept_flags.update_rx_mode_config;
562
563         p_ramrod->common.update_tx_mode_flg =
564                 accept_flags.update_tx_mode_config;
565
566         /* Set Rx mode accept flags */
567         if (p_ramrod->common.update_rx_mode_flg) {
568                 u8 accept_filter = accept_flags.rx_accept_filter;
569                 u16 state = 0;
570
571                 SET_FIELD(state, ETH_VPORT_RX_MODE_UCAST_DROP_ALL,
572                           !(!!(accept_filter & QED_ACCEPT_UCAST_MATCHED) ||
573                             !!(accept_filter & QED_ACCEPT_UCAST_UNMATCHED)));
574
575                 SET_FIELD(state, ETH_VPORT_RX_MODE_UCAST_ACCEPT_UNMATCHED,
576                           !!(accept_filter & QED_ACCEPT_UCAST_UNMATCHED));
577
578                 SET_FIELD(state, ETH_VPORT_RX_MODE_MCAST_DROP_ALL,
579                           !(!!(accept_filter & QED_ACCEPT_MCAST_MATCHED) ||
580                             !!(accept_filter & QED_ACCEPT_MCAST_UNMATCHED)));
581
582                 SET_FIELD(state, ETH_VPORT_RX_MODE_MCAST_ACCEPT_ALL,
583                           (!!(accept_filter & QED_ACCEPT_MCAST_MATCHED) &&
584                            !!(accept_filter & QED_ACCEPT_MCAST_UNMATCHED)));
585
586                 SET_FIELD(state, ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL,
587                           !!(accept_filter & QED_ACCEPT_BCAST));
588
589                 SET_FIELD(state, ETH_VPORT_RX_MODE_ACCEPT_ANY_VNI,
590                           !!(accept_filter & QED_ACCEPT_ANY_VNI));
591
592                 p_ramrod->rx_mode.state = cpu_to_le16(state);
593                 DP_VERBOSE(p_hwfn, QED_MSG_SP,
594                            "p_ramrod->rx_mode.state = 0x%x\n", state);
595         }
596
597         /* Set Tx mode accept flags */
598         if (p_ramrod->common.update_tx_mode_flg) {
599                 u8 accept_filter = accept_flags.tx_accept_filter;
600                 u16 state = 0;
601
602                 SET_FIELD(state, ETH_VPORT_TX_MODE_UCAST_DROP_ALL,
603                           !!(accept_filter & QED_ACCEPT_NONE));
604
605                 SET_FIELD(state, ETH_VPORT_TX_MODE_MCAST_DROP_ALL,
606                           !!(accept_filter & QED_ACCEPT_NONE));
607
608                 SET_FIELD(state, ETH_VPORT_TX_MODE_MCAST_ACCEPT_ALL,
609                           (!!(accept_filter & QED_ACCEPT_MCAST_MATCHED) &&
610                            !!(accept_filter & QED_ACCEPT_MCAST_UNMATCHED)));
611
612                 SET_FIELD(state, ETH_VPORT_TX_MODE_UCAST_ACCEPT_ALL,
613                           (!!(accept_filter & QED_ACCEPT_UCAST_MATCHED) &&
614                            !!(accept_filter & QED_ACCEPT_UCAST_UNMATCHED)));
615
616                 SET_FIELD(state, ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL,
617                           !!(accept_filter & QED_ACCEPT_BCAST));
618
619                 p_ramrod->tx_mode.state = cpu_to_le16(state);
620                 DP_VERBOSE(p_hwfn, QED_MSG_SP,
621                            "p_ramrod->tx_mode.state = 0x%x\n", state);
622         }
623 }
624
625 static void
626 qed_sp_vport_update_sge_tpa(struct qed_hwfn *p_hwfn,
627                             struct vport_update_ramrod_data *p_ramrod,
628                             struct qed_sge_tpa_params *p_params)
629 {
630         struct eth_vport_tpa_param *p_tpa;
631
632         if (!p_params) {
633                 p_ramrod->common.update_tpa_param_flg = 0;
634                 p_ramrod->common.update_tpa_en_flg = 0;
635                 p_ramrod->common.update_tpa_param_flg = 0;
636                 return;
637         }
638
639         p_ramrod->common.update_tpa_en_flg = p_params->update_tpa_en_flg;
640         p_tpa = &p_ramrod->tpa_param;
641         p_tpa->tpa_ipv4_en_flg = p_params->tpa_ipv4_en_flg;
642         p_tpa->tpa_ipv6_en_flg = p_params->tpa_ipv6_en_flg;
643         p_tpa->tpa_ipv4_tunn_en_flg = p_params->tpa_ipv4_tunn_en_flg;
644         p_tpa->tpa_ipv6_tunn_en_flg = p_params->tpa_ipv6_tunn_en_flg;
645
646         p_ramrod->common.update_tpa_param_flg = p_params->update_tpa_param_flg;
647         p_tpa->max_buff_num = p_params->max_buffers_per_cqe;
648         p_tpa->tpa_pkt_split_flg = p_params->tpa_pkt_split_flg;
649         p_tpa->tpa_hdr_data_split_flg = p_params->tpa_hdr_data_split_flg;
650         p_tpa->tpa_gro_consistent_flg = p_params->tpa_gro_consistent_flg;
651         p_tpa->tpa_max_aggs_num = p_params->tpa_max_aggs_num;
652         p_tpa->tpa_max_size = p_params->tpa_max_size;
653         p_tpa->tpa_min_size_to_start = p_params->tpa_min_size_to_start;
654         p_tpa->tpa_min_size_to_cont = p_params->tpa_min_size_to_cont;
655 }
656
657 static void
658 qed_sp_update_mcast_bin(struct qed_hwfn *p_hwfn,
659                         struct vport_update_ramrod_data *p_ramrod,
660                         struct qed_sp_vport_update_params *p_params)
661 {
662         int i;
663
664         memset(&p_ramrod->approx_mcast.bins, 0,
665                sizeof(p_ramrod->approx_mcast.bins));
666
667         if (!p_params->update_approx_mcast_flg)
668                 return;
669
670         p_ramrod->common.update_approx_mcast_flg = 1;
671         for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) {
672                 u32 *p_bins = p_params->bins;
673
674                 p_ramrod->approx_mcast.bins[i] = cpu_to_le32(p_bins[i]);
675         }
676 }
677
678 int qed_sp_vport_update(struct qed_hwfn *p_hwfn,
679                         struct qed_sp_vport_update_params *p_params,
680                         enum spq_mode comp_mode,
681                         struct qed_spq_comp_cb *p_comp_data)
682 {
683         struct qed_rss_params *p_rss_params = p_params->rss_params;
684         struct vport_update_ramrod_data_cmn *p_cmn;
685         struct qed_sp_init_data init_data;
686         struct vport_update_ramrod_data *p_ramrod = NULL;
687         struct qed_spq_entry *p_ent = NULL;
688         u8 abs_vport_id = 0, val;
689         int rc = -EINVAL;
690
691         if (IS_VF(p_hwfn->cdev)) {
692                 rc = qed_vf_pf_vport_update(p_hwfn, p_params);
693                 return rc;
694         }
695
696         rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
697         if (rc)
698                 return rc;
699
700         memset(&init_data, 0, sizeof(init_data));
701         init_data.cid = qed_spq_get_cid(p_hwfn);
702         init_data.opaque_fid = p_params->opaque_fid;
703         init_data.comp_mode = comp_mode;
704         init_data.p_comp_data = p_comp_data;
705
706         rc = qed_sp_init_request(p_hwfn, &p_ent,
707                                  ETH_RAMROD_VPORT_UPDATE,
708                                  PROTOCOLID_ETH, &init_data);
709         if (rc)
710                 return rc;
711
712         /* Copy input params to ramrod according to FW struct */
713         p_ramrod = &p_ent->ramrod.vport_update;
714         p_cmn = &p_ramrod->common;
715
716         p_cmn->vport_id = abs_vport_id;
717         p_cmn->rx_active_flg = p_params->vport_active_rx_flg;
718         p_cmn->update_rx_active_flg = p_params->update_vport_active_rx_flg;
719         p_cmn->tx_active_flg = p_params->vport_active_tx_flg;
720         p_cmn->update_tx_active_flg = p_params->update_vport_active_tx_flg;
721         p_cmn->accept_any_vlan = p_params->accept_any_vlan;
722         val = p_params->update_accept_any_vlan_flg;
723         p_cmn->update_accept_any_vlan_flg = val;
724
725         p_cmn->inner_vlan_removal_en = p_params->inner_vlan_removal_flg;
726         val = p_params->update_inner_vlan_removal_flg;
727         p_cmn->update_inner_vlan_removal_en_flg = val;
728
729         p_cmn->default_vlan_en = p_params->default_vlan_enable_flg;
730         val = p_params->update_default_vlan_enable_flg;
731         p_cmn->update_default_vlan_en_flg = val;
732
733         p_cmn->default_vlan = cpu_to_le16(p_params->default_vlan);
734         p_cmn->update_default_vlan_flg = p_params->update_default_vlan_flg;
735
736         p_cmn->silent_vlan_removal_en = p_params->silent_vlan_removal_flg;
737
738         p_ramrod->common.tx_switching_en = p_params->tx_switching_flg;
739         p_cmn->update_tx_switching_en_flg = p_params->update_tx_switching_flg;
740
741         p_cmn->anti_spoofing_en = p_params->anti_spoofing_en;
742         val = p_params->update_anti_spoofing_en_flg;
743         p_ramrod->common.update_anti_spoofing_en_flg = val;
744
745         rc = qed_sp_vport_update_rss(p_hwfn, p_ramrod, p_rss_params);
746         if (rc) {
747                 qed_sp_destroy_request(p_hwfn, p_ent);
748                 return rc;
749         }
750
751         if (p_params->update_ctl_frame_check) {
752                 p_cmn->ctl_frame_mac_check_en = p_params->mac_chk_en;
753                 p_cmn->ctl_frame_ethtype_check_en = p_params->ethtype_chk_en;
754         }
755
756         /* Update mcast bins for VFs, PF doesn't use this functionality */
757         qed_sp_update_mcast_bin(p_hwfn, p_ramrod, p_params);
758
759         qed_sp_update_accept_mode(p_hwfn, p_ramrod, p_params->accept_flags);
760         qed_sp_vport_update_sge_tpa(p_hwfn, p_ramrod, p_params->sge_tpa_params);
761         return qed_spq_post(p_hwfn, p_ent, NULL);
762 }
763
764 int qed_sp_vport_stop(struct qed_hwfn *p_hwfn, u16 opaque_fid, u8 vport_id)
765 {
766         struct vport_stop_ramrod_data *p_ramrod;
767         struct qed_sp_init_data init_data;
768         struct qed_spq_entry *p_ent;
769         u8 abs_vport_id = 0;
770         int rc;
771
772         if (IS_VF(p_hwfn->cdev))
773                 return qed_vf_pf_vport_stop(p_hwfn);
774
775         rc = qed_fw_vport(p_hwfn, vport_id, &abs_vport_id);
776         if (rc)
777                 return rc;
778
779         memset(&init_data, 0, sizeof(init_data));
780         init_data.cid = qed_spq_get_cid(p_hwfn);
781         init_data.opaque_fid = opaque_fid;
782         init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
783
784         rc = qed_sp_init_request(p_hwfn, &p_ent,
785                                  ETH_RAMROD_VPORT_STOP,
786                                  PROTOCOLID_ETH, &init_data);
787         if (rc)
788                 return rc;
789
790         p_ramrod = &p_ent->ramrod.vport_stop;
791         p_ramrod->vport_id = abs_vport_id;
792
793         return qed_spq_post(p_hwfn, p_ent, NULL);
794 }
795
796 static int
797 qed_vf_pf_accept_flags(struct qed_hwfn *p_hwfn,
798                        struct qed_filter_accept_flags *p_accept_flags)
799 {
800         struct qed_sp_vport_update_params s_params;
801
802         memset(&s_params, 0, sizeof(s_params));
803         memcpy(&s_params.accept_flags, p_accept_flags,
804                sizeof(struct qed_filter_accept_flags));
805
806         return qed_vf_pf_vport_update(p_hwfn, &s_params);
807 }
808
809 static int qed_filter_accept_cmd(struct qed_dev *cdev,
810                                  u8 vport,
811                                  struct qed_filter_accept_flags accept_flags,
812                                  u8 update_accept_any_vlan,
813                                  u8 accept_any_vlan,
814                                  enum spq_mode comp_mode,
815                                  struct qed_spq_comp_cb *p_comp_data)
816 {
817         struct qed_sp_vport_update_params vport_update_params;
818         int i, rc;
819
820         /* Prepare and send the vport rx_mode change */
821         memset(&vport_update_params, 0, sizeof(vport_update_params));
822         vport_update_params.vport_id = vport;
823         vport_update_params.accept_flags = accept_flags;
824         vport_update_params.update_accept_any_vlan_flg = update_accept_any_vlan;
825         vport_update_params.accept_any_vlan = accept_any_vlan;
826
827         for_each_hwfn(cdev, i) {
828                 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
829
830                 vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
831
832                 if (IS_VF(cdev)) {
833                         rc = qed_vf_pf_accept_flags(p_hwfn, &accept_flags);
834                         if (rc)
835                                 return rc;
836                         continue;
837                 }
838
839                 rc = qed_sp_vport_update(p_hwfn, &vport_update_params,
840                                          comp_mode, p_comp_data);
841                 if (rc) {
842                         DP_ERR(cdev, "Update rx_mode failed %d\n", rc);
843                         return rc;
844                 }
845
846                 DP_VERBOSE(p_hwfn, QED_MSG_SP,
847                            "Accept filter configured, flags = [Rx]%x [Tx]%x\n",
848                            accept_flags.rx_accept_filter,
849                            accept_flags.tx_accept_filter);
850                 if (update_accept_any_vlan)
851                         DP_VERBOSE(p_hwfn, QED_MSG_SP,
852                                    "accept_any_vlan=%d configured\n",
853                                    accept_any_vlan);
854         }
855
856         return 0;
857 }
858
859 int qed_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn,
860                              struct qed_queue_cid *p_cid,
861                              u16 bd_max_bytes,
862                              dma_addr_t bd_chain_phys_addr,
863                              dma_addr_t cqe_pbl_addr, u16 cqe_pbl_size)
864 {
865         struct rx_queue_start_ramrod_data *p_ramrod = NULL;
866         struct qed_spq_entry *p_ent = NULL;
867         struct qed_sp_init_data init_data;
868         int rc = -EINVAL;
869
870         DP_VERBOSE(p_hwfn, QED_MSG_SP,
871                    "opaque_fid=0x%x, cid=0x%x, rx_qzone=0x%x, vport_id=0x%x, sb_id=0x%x\n",
872                    p_cid->opaque_fid, p_cid->cid,
873                    p_cid->abs.queue_id, p_cid->abs.vport_id, p_cid->sb_igu_id);
874
875         /* Get SPQ entry */
876         memset(&init_data, 0, sizeof(init_data));
877         init_data.cid = p_cid->cid;
878         init_data.opaque_fid = p_cid->opaque_fid;
879         init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
880
881         rc = qed_sp_init_request(p_hwfn, &p_ent,
882                                  ETH_RAMROD_RX_QUEUE_START,
883                                  PROTOCOLID_ETH, &init_data);
884         if (rc)
885                 return rc;
886
887         p_ramrod = &p_ent->ramrod.rx_queue_start;
888
889         p_ramrod->sb_id = cpu_to_le16(p_cid->sb_igu_id);
890         p_ramrod->sb_index = p_cid->sb_idx;
891         p_ramrod->vport_id = p_cid->abs.vport_id;
892         p_ramrod->stats_counter_id = p_cid->abs.stats_id;
893         p_ramrod->rx_queue_id = cpu_to_le16(p_cid->abs.queue_id);
894         p_ramrod->complete_cqe_flg = 0;
895         p_ramrod->complete_event_flg = 1;
896
897         p_ramrod->bd_max_bytes = cpu_to_le16(bd_max_bytes);
898         DMA_REGPAIR_LE(p_ramrod->bd_base, bd_chain_phys_addr);
899
900         p_ramrod->num_of_pbl_pages = cpu_to_le16(cqe_pbl_size);
901         DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr, cqe_pbl_addr);
902
903         if (p_cid->vfid != QED_QUEUE_CID_SELF) {
904                 bool b_legacy_vf = !!(p_cid->vf_legacy &
905                                       QED_QCID_LEGACY_VF_RX_PROD);
906
907                 p_ramrod->vf_rx_prod_index = p_cid->vf_qid;
908                 DP_VERBOSE(p_hwfn, QED_MSG_SP,
909                            "Queue%s is meant for VF rxq[%02x]\n",
910                            b_legacy_vf ? " [legacy]" : "", p_cid->vf_qid);
911                 p_ramrod->vf_rx_prod_use_zone_a = b_legacy_vf;
912         }
913
914         return qed_spq_post(p_hwfn, p_ent, NULL);
915 }
916
917 static int
918 qed_eth_pf_rx_queue_start(struct qed_hwfn *p_hwfn,
919                           struct qed_queue_cid *p_cid,
920                           u16 bd_max_bytes,
921                           dma_addr_t bd_chain_phys_addr,
922                           dma_addr_t cqe_pbl_addr,
923                           u16 cqe_pbl_size, void __iomem **pp_prod)
924 {
925         u32 init_prod_val = 0;
926
927         *pp_prod = p_hwfn->regview +
928                    GTT_BAR0_MAP_REG_MSDM_RAM +
929                     MSTORM_ETH_PF_PRODS_OFFSET(p_cid->abs.queue_id);
930
931         /* Init the rcq, rx bd and rx sge (if valid) producers to 0 */
932         __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32),
933                           (u32 *)(&init_prod_val));
934
935         return qed_eth_rxq_start_ramrod(p_hwfn, p_cid,
936                                         bd_max_bytes,
937                                         bd_chain_phys_addr,
938                                         cqe_pbl_addr, cqe_pbl_size);
939 }
940
941 static int
942 qed_eth_rx_queue_start(struct qed_hwfn *p_hwfn,
943                        u16 opaque_fid,
944                        struct qed_queue_start_common_params *p_params,
945                        u16 bd_max_bytes,
946                        dma_addr_t bd_chain_phys_addr,
947                        dma_addr_t cqe_pbl_addr,
948                        u16 cqe_pbl_size,
949                        struct qed_rxq_start_ret_params *p_ret_params)
950 {
951         struct qed_queue_cid *p_cid;
952         int rc;
953
954         /* Allocate a CID for the queue */
955         p_cid = qed_eth_queue_to_cid_pf(p_hwfn, opaque_fid, true, p_params);
956         if (!p_cid)
957                 return -ENOMEM;
958
959         if (IS_PF(p_hwfn->cdev)) {
960                 rc = qed_eth_pf_rx_queue_start(p_hwfn, p_cid,
961                                                bd_max_bytes,
962                                                bd_chain_phys_addr,
963                                                cqe_pbl_addr, cqe_pbl_size,
964                                                &p_ret_params->p_prod);
965         } else {
966                 rc = qed_vf_pf_rxq_start(p_hwfn, p_cid,
967                                          bd_max_bytes,
968                                          bd_chain_phys_addr,
969                                          cqe_pbl_addr,
970                                          cqe_pbl_size, &p_ret_params->p_prod);
971         }
972
973         /* Provide the caller with a reference to as handler */
974         if (rc)
975                 qed_eth_queue_cid_release(p_hwfn, p_cid);
976         else
977                 p_ret_params->p_handle = (void *)p_cid;
978
979         return rc;
980 }
981
982 int qed_sp_eth_rx_queues_update(struct qed_hwfn *p_hwfn,
983                                 void **pp_rxq_handles,
984                                 u8 num_rxqs,
985                                 u8 complete_cqe_flg,
986                                 u8 complete_event_flg,
987                                 enum spq_mode comp_mode,
988                                 struct qed_spq_comp_cb *p_comp_data)
989 {
990         struct rx_queue_update_ramrod_data *p_ramrod = NULL;
991         struct qed_spq_entry *p_ent = NULL;
992         struct qed_sp_init_data init_data;
993         struct qed_queue_cid *p_cid;
994         int rc = -EINVAL;
995         u8 i;
996
997         memset(&init_data, 0, sizeof(init_data));
998         init_data.comp_mode = comp_mode;
999         init_data.p_comp_data = p_comp_data;
1000
1001         for (i = 0; i < num_rxqs; i++) {
1002                 p_cid = ((struct qed_queue_cid **)pp_rxq_handles)[i];
1003
1004                 /* Get SPQ entry */
1005                 init_data.cid = p_cid->cid;
1006                 init_data.opaque_fid = p_cid->opaque_fid;
1007
1008                 rc = qed_sp_init_request(p_hwfn, &p_ent,
1009                                          ETH_RAMROD_RX_QUEUE_UPDATE,
1010                                          PROTOCOLID_ETH, &init_data);
1011                 if (rc)
1012                         return rc;
1013
1014                 p_ramrod = &p_ent->ramrod.rx_queue_update;
1015                 p_ramrod->vport_id = p_cid->abs.vport_id;
1016
1017                 p_ramrod->rx_queue_id = cpu_to_le16(p_cid->abs.queue_id);
1018                 p_ramrod->complete_cqe_flg = complete_cqe_flg;
1019                 p_ramrod->complete_event_flg = complete_event_flg;
1020
1021                 rc = qed_spq_post(p_hwfn, p_ent, NULL);
1022                 if (rc)
1023                         return rc;
1024         }
1025
1026         return rc;
1027 }
1028
1029 static int
1030 qed_eth_pf_rx_queue_stop(struct qed_hwfn *p_hwfn,
1031                          struct qed_queue_cid *p_cid,
1032                          bool b_eq_completion_only, bool b_cqe_completion)
1033 {
1034         struct rx_queue_stop_ramrod_data *p_ramrod = NULL;
1035         struct qed_spq_entry *p_ent = NULL;
1036         struct qed_sp_init_data init_data;
1037         int rc;
1038
1039         memset(&init_data, 0, sizeof(init_data));
1040         init_data.cid = p_cid->cid;
1041         init_data.opaque_fid = p_cid->opaque_fid;
1042         init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1043
1044         rc = qed_sp_init_request(p_hwfn, &p_ent,
1045                                  ETH_RAMROD_RX_QUEUE_STOP,
1046                                  PROTOCOLID_ETH, &init_data);
1047         if (rc)
1048                 return rc;
1049
1050         p_ramrod = &p_ent->ramrod.rx_queue_stop;
1051         p_ramrod->vport_id = p_cid->abs.vport_id;
1052         p_ramrod->rx_queue_id = cpu_to_le16(p_cid->abs.queue_id);
1053
1054         /* Cleaning the queue requires the completion to arrive there.
1055          * In addition, VFs require the answer to come as eqe to PF.
1056          */
1057         p_ramrod->complete_cqe_flg = ((p_cid->vfid == QED_QUEUE_CID_SELF) &&
1058                                       !b_eq_completion_only) ||
1059                                      b_cqe_completion;
1060         p_ramrod->complete_event_flg = (p_cid->vfid != QED_QUEUE_CID_SELF) ||
1061                                        b_eq_completion_only;
1062
1063         return qed_spq_post(p_hwfn, p_ent, NULL);
1064 }
1065
1066 int qed_eth_rx_queue_stop(struct qed_hwfn *p_hwfn,
1067                           void *p_rxq,
1068                           bool eq_completion_only, bool cqe_completion)
1069 {
1070         struct qed_queue_cid *p_cid = (struct qed_queue_cid *)p_rxq;
1071         int rc = -EINVAL;
1072
1073         if (IS_PF(p_hwfn->cdev))
1074                 rc = qed_eth_pf_rx_queue_stop(p_hwfn, p_cid,
1075                                               eq_completion_only,
1076                                               cqe_completion);
1077         else
1078                 rc = qed_vf_pf_rxq_stop(p_hwfn, p_cid, cqe_completion);
1079
1080         if (!rc)
1081                 qed_eth_queue_cid_release(p_hwfn, p_cid);
1082         return rc;
1083 }
1084
1085 int
1086 qed_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn,
1087                          struct qed_queue_cid *p_cid,
1088                          dma_addr_t pbl_addr, u16 pbl_size, u16 pq_id)
1089 {
1090         struct tx_queue_start_ramrod_data *p_ramrod = NULL;
1091         struct qed_spq_entry *p_ent = NULL;
1092         struct qed_sp_init_data init_data;
1093         int rc = -EINVAL;
1094
1095         /* Get SPQ entry */
1096         memset(&init_data, 0, sizeof(init_data));
1097         init_data.cid = p_cid->cid;
1098         init_data.opaque_fid = p_cid->opaque_fid;
1099         init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1100
1101         rc = qed_sp_init_request(p_hwfn, &p_ent,
1102                                  ETH_RAMROD_TX_QUEUE_START,
1103                                  PROTOCOLID_ETH, &init_data);
1104         if (rc)
1105                 return rc;
1106
1107         p_ramrod = &p_ent->ramrod.tx_queue_start;
1108         p_ramrod->vport_id = p_cid->abs.vport_id;
1109
1110         p_ramrod->sb_id = cpu_to_le16(p_cid->sb_igu_id);
1111         p_ramrod->sb_index = p_cid->sb_idx;
1112         p_ramrod->stats_counter_id = p_cid->abs.stats_id;
1113
1114         p_ramrod->queue_zone_id = cpu_to_le16(p_cid->abs.queue_id);
1115         p_ramrod->same_as_last_id = cpu_to_le16(p_cid->abs.queue_id);
1116
1117         p_ramrod->pbl_size = cpu_to_le16(pbl_size);
1118         DMA_REGPAIR_LE(p_ramrod->pbl_base_addr, pbl_addr);
1119
1120         p_ramrod->qm_pq_id = cpu_to_le16(pq_id);
1121
1122         return qed_spq_post(p_hwfn, p_ent, NULL);
1123 }
1124
1125 static int
1126 qed_eth_pf_tx_queue_start(struct qed_hwfn *p_hwfn,
1127                           struct qed_queue_cid *p_cid,
1128                           u8 tc,
1129                           dma_addr_t pbl_addr,
1130                           u16 pbl_size, void __iomem **pp_doorbell)
1131 {
1132         int rc;
1133
1134
1135         rc = qed_eth_txq_start_ramrod(p_hwfn, p_cid,
1136                                       pbl_addr, pbl_size,
1137                                       qed_get_cm_pq_idx_mcos(p_hwfn, tc));
1138         if (rc)
1139                 return rc;
1140
1141         /* Provide the caller with the necessary return values */
1142         *pp_doorbell = p_hwfn->doorbells +
1143                        qed_db_addr(p_cid->cid, DQ_DEMS_LEGACY);
1144
1145         return 0;
1146 }
1147
1148 static int
1149 qed_eth_tx_queue_start(struct qed_hwfn *p_hwfn,
1150                        u16 opaque_fid,
1151                        struct qed_queue_start_common_params *p_params,
1152                        u8 tc,
1153                        dma_addr_t pbl_addr,
1154                        u16 pbl_size,
1155                        struct qed_txq_start_ret_params *p_ret_params)
1156 {
1157         struct qed_queue_cid *p_cid;
1158         int rc;
1159
1160         p_cid = qed_eth_queue_to_cid_pf(p_hwfn, opaque_fid, false, p_params);
1161         if (!p_cid)
1162                 return -EINVAL;
1163
1164         if (IS_PF(p_hwfn->cdev))
1165                 rc = qed_eth_pf_tx_queue_start(p_hwfn, p_cid, tc,
1166                                                pbl_addr, pbl_size,
1167                                                &p_ret_params->p_doorbell);
1168         else
1169                 rc = qed_vf_pf_txq_start(p_hwfn, p_cid,
1170                                          pbl_addr, pbl_size,
1171                                          &p_ret_params->p_doorbell);
1172
1173         if (rc)
1174                 qed_eth_queue_cid_release(p_hwfn, p_cid);
1175         else
1176                 p_ret_params->p_handle = (void *)p_cid;
1177
1178         return rc;
1179 }
1180
1181 static int
1182 qed_eth_pf_tx_queue_stop(struct qed_hwfn *p_hwfn, struct qed_queue_cid *p_cid)
1183 {
1184         struct qed_spq_entry *p_ent = NULL;
1185         struct qed_sp_init_data init_data;
1186         int rc;
1187
1188         memset(&init_data, 0, sizeof(init_data));
1189         init_data.cid = p_cid->cid;
1190         init_data.opaque_fid = p_cid->opaque_fid;
1191         init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1192
1193         rc = qed_sp_init_request(p_hwfn, &p_ent,
1194                                  ETH_RAMROD_TX_QUEUE_STOP,
1195                                  PROTOCOLID_ETH, &init_data);
1196         if (rc)
1197                 return rc;
1198
1199         return qed_spq_post(p_hwfn, p_ent, NULL);
1200 }
1201
1202 int qed_eth_tx_queue_stop(struct qed_hwfn *p_hwfn, void *p_handle)
1203 {
1204         struct qed_queue_cid *p_cid = (struct qed_queue_cid *)p_handle;
1205         int rc;
1206
1207         if (IS_PF(p_hwfn->cdev))
1208                 rc = qed_eth_pf_tx_queue_stop(p_hwfn, p_cid);
1209         else
1210                 rc = qed_vf_pf_txq_stop(p_hwfn, p_cid);
1211
1212         if (!rc)
1213                 qed_eth_queue_cid_release(p_hwfn, p_cid);
1214         return rc;
1215 }
1216
1217 static enum eth_filter_action qed_filter_action(enum qed_filter_opcode opcode)
1218 {
1219         enum eth_filter_action action = MAX_ETH_FILTER_ACTION;
1220
1221         switch (opcode) {
1222         case QED_FILTER_ADD:
1223                 action = ETH_FILTER_ACTION_ADD;
1224                 break;
1225         case QED_FILTER_REMOVE:
1226                 action = ETH_FILTER_ACTION_REMOVE;
1227                 break;
1228         case QED_FILTER_FLUSH:
1229                 action = ETH_FILTER_ACTION_REMOVE_ALL;
1230                 break;
1231         default:
1232                 action = MAX_ETH_FILTER_ACTION;
1233         }
1234
1235         return action;
1236 }
1237
1238 static int
1239 qed_filter_ucast_common(struct qed_hwfn *p_hwfn,
1240                         u16 opaque_fid,
1241                         struct qed_filter_ucast *p_filter_cmd,
1242                         struct vport_filter_update_ramrod_data **pp_ramrod,
1243                         struct qed_spq_entry **pp_ent,
1244                         enum spq_mode comp_mode,
1245                         struct qed_spq_comp_cb *p_comp_data)
1246 {
1247         u8 vport_to_add_to = 0, vport_to_remove_from = 0;
1248         struct vport_filter_update_ramrod_data *p_ramrod;
1249         struct eth_filter_cmd *p_first_filter;
1250         struct eth_filter_cmd *p_second_filter;
1251         struct qed_sp_init_data init_data;
1252         enum eth_filter_action action;
1253         int rc;
1254
1255         rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_remove_from,
1256                           &vport_to_remove_from);
1257         if (rc)
1258                 return rc;
1259
1260         rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_add_to,
1261                           &vport_to_add_to);
1262         if (rc)
1263                 return rc;
1264
1265         /* Get SPQ entry */
1266         memset(&init_data, 0, sizeof(init_data));
1267         init_data.cid = qed_spq_get_cid(p_hwfn);
1268         init_data.opaque_fid = opaque_fid;
1269         init_data.comp_mode = comp_mode;
1270         init_data.p_comp_data = p_comp_data;
1271
1272         rc = qed_sp_init_request(p_hwfn, pp_ent,
1273                                  ETH_RAMROD_FILTERS_UPDATE,
1274                                  PROTOCOLID_ETH, &init_data);
1275         if (rc)
1276                 return rc;
1277
1278         *pp_ramrod = &(*pp_ent)->ramrod.vport_filter_update;
1279         p_ramrod = *pp_ramrod;
1280         p_ramrod->filter_cmd_hdr.rx = p_filter_cmd->is_rx_filter ? 1 : 0;
1281         p_ramrod->filter_cmd_hdr.tx = p_filter_cmd->is_tx_filter ? 1 : 0;
1282
1283         switch (p_filter_cmd->opcode) {
1284         case QED_FILTER_REPLACE:
1285         case QED_FILTER_MOVE:
1286                 p_ramrod->filter_cmd_hdr.cmd_cnt = 2; break;
1287         default:
1288                 p_ramrod->filter_cmd_hdr.cmd_cnt = 1; break;
1289         }
1290
1291         p_first_filter  = &p_ramrod->filter_cmds[0];
1292         p_second_filter = &p_ramrod->filter_cmds[1];
1293
1294         switch (p_filter_cmd->type) {
1295         case QED_FILTER_MAC:
1296                 p_first_filter->type = ETH_FILTER_TYPE_MAC; break;
1297         case QED_FILTER_VLAN:
1298                 p_first_filter->type = ETH_FILTER_TYPE_VLAN; break;
1299         case QED_FILTER_MAC_VLAN:
1300                 p_first_filter->type = ETH_FILTER_TYPE_PAIR; break;
1301         case QED_FILTER_INNER_MAC:
1302                 p_first_filter->type = ETH_FILTER_TYPE_INNER_MAC; break;
1303         case QED_FILTER_INNER_VLAN:
1304                 p_first_filter->type = ETH_FILTER_TYPE_INNER_VLAN; break;
1305         case QED_FILTER_INNER_PAIR:
1306                 p_first_filter->type = ETH_FILTER_TYPE_INNER_PAIR; break;
1307         case QED_FILTER_INNER_MAC_VNI_PAIR:
1308                 p_first_filter->type = ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR;
1309                 break;
1310         case QED_FILTER_MAC_VNI_PAIR:
1311                 p_first_filter->type = ETH_FILTER_TYPE_MAC_VNI_PAIR; break;
1312         case QED_FILTER_VNI:
1313                 p_first_filter->type = ETH_FILTER_TYPE_VNI; break;
1314         }
1315
1316         if ((p_first_filter->type == ETH_FILTER_TYPE_MAC) ||
1317             (p_first_filter->type == ETH_FILTER_TYPE_PAIR) ||
1318             (p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC) ||
1319             (p_first_filter->type == ETH_FILTER_TYPE_INNER_PAIR) ||
1320             (p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR) ||
1321             (p_first_filter->type == ETH_FILTER_TYPE_MAC_VNI_PAIR)) {
1322                 qed_set_fw_mac_addr(&p_first_filter->mac_msb,
1323                                     &p_first_filter->mac_mid,
1324                                     &p_first_filter->mac_lsb,
1325                                     (u8 *)p_filter_cmd->mac);
1326         }
1327
1328         if ((p_first_filter->type == ETH_FILTER_TYPE_VLAN) ||
1329             (p_first_filter->type == ETH_FILTER_TYPE_PAIR) ||
1330             (p_first_filter->type == ETH_FILTER_TYPE_INNER_VLAN) ||
1331             (p_first_filter->type == ETH_FILTER_TYPE_INNER_PAIR))
1332                 p_first_filter->vlan_id = cpu_to_le16(p_filter_cmd->vlan);
1333
1334         if ((p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR) ||
1335             (p_first_filter->type == ETH_FILTER_TYPE_MAC_VNI_PAIR) ||
1336             (p_first_filter->type == ETH_FILTER_TYPE_VNI))
1337                 p_first_filter->vni = cpu_to_le32(p_filter_cmd->vni);
1338
1339         if (p_filter_cmd->opcode == QED_FILTER_MOVE) {
1340                 p_second_filter->type = p_first_filter->type;
1341                 p_second_filter->mac_msb = p_first_filter->mac_msb;
1342                 p_second_filter->mac_mid = p_first_filter->mac_mid;
1343                 p_second_filter->mac_lsb = p_first_filter->mac_lsb;
1344                 p_second_filter->vlan_id = p_first_filter->vlan_id;
1345                 p_second_filter->vni = p_first_filter->vni;
1346
1347                 p_first_filter->action = ETH_FILTER_ACTION_REMOVE;
1348
1349                 p_first_filter->vport_id = vport_to_remove_from;
1350
1351                 p_second_filter->action = ETH_FILTER_ACTION_ADD;
1352                 p_second_filter->vport_id = vport_to_add_to;
1353         } else if (p_filter_cmd->opcode == QED_FILTER_REPLACE) {
1354                 p_first_filter->vport_id = vport_to_add_to;
1355                 memcpy(p_second_filter, p_first_filter,
1356                        sizeof(*p_second_filter));
1357                 p_first_filter->action  = ETH_FILTER_ACTION_REMOVE_ALL;
1358                 p_second_filter->action = ETH_FILTER_ACTION_ADD;
1359         } else {
1360                 action = qed_filter_action(p_filter_cmd->opcode);
1361
1362                 if (action == MAX_ETH_FILTER_ACTION) {
1363                         DP_NOTICE(p_hwfn,
1364                                   "%d is not supported yet\n",
1365                                   p_filter_cmd->opcode);
1366                         qed_sp_destroy_request(p_hwfn, *pp_ent);
1367                         return -EINVAL;
1368                 }
1369
1370                 p_first_filter->action = action;
1371                 p_first_filter->vport_id = (p_filter_cmd->opcode ==
1372                                             QED_FILTER_REMOVE) ?
1373                                            vport_to_remove_from :
1374                                            vport_to_add_to;
1375         }
1376
1377         return 0;
1378 }
1379
1380 int qed_sp_eth_filter_ucast(struct qed_hwfn *p_hwfn,
1381                             u16 opaque_fid,
1382                             struct qed_filter_ucast *p_filter_cmd,
1383                             enum spq_mode comp_mode,
1384                             struct qed_spq_comp_cb *p_comp_data)
1385 {
1386         struct vport_filter_update_ramrod_data  *p_ramrod       = NULL;
1387         struct qed_spq_entry                    *p_ent          = NULL;
1388         struct eth_filter_cmd_header            *p_header;
1389         int                                     rc;
1390
1391         rc = qed_filter_ucast_common(p_hwfn, opaque_fid, p_filter_cmd,
1392                                      &p_ramrod, &p_ent,
1393                                      comp_mode, p_comp_data);
1394         if (rc) {
1395                 DP_ERR(p_hwfn, "Uni. filter command failed %d\n", rc);
1396                 return rc;
1397         }
1398         p_header = &p_ramrod->filter_cmd_hdr;
1399         p_header->assert_on_error = p_filter_cmd->assert_on_error;
1400
1401         rc = qed_spq_post(p_hwfn, p_ent, NULL);
1402         if (rc) {
1403                 DP_ERR(p_hwfn, "Unicast filter ADD command failed %d\n", rc);
1404                 return rc;
1405         }
1406
1407         DP_VERBOSE(p_hwfn, QED_MSG_SP,
1408                    "Unicast filter configured, opcode = %s, type = %s, cmd_cnt = %d, is_rx_filter = %d, is_tx_filter = %d\n",
1409                    (p_filter_cmd->opcode == QED_FILTER_ADD) ? "ADD" :
1410                    ((p_filter_cmd->opcode == QED_FILTER_REMOVE) ?
1411                    "REMOVE" :
1412                    ((p_filter_cmd->opcode == QED_FILTER_MOVE) ?
1413                     "MOVE" : "REPLACE")),
1414                    (p_filter_cmd->type == QED_FILTER_MAC) ? "MAC" :
1415                    ((p_filter_cmd->type == QED_FILTER_VLAN) ?
1416                     "VLAN" : "MAC & VLAN"),
1417                    p_ramrod->filter_cmd_hdr.cmd_cnt,
1418                    p_filter_cmd->is_rx_filter,
1419                    p_filter_cmd->is_tx_filter);
1420         DP_VERBOSE(p_hwfn, QED_MSG_SP,
1421                    "vport_to_add_to = %d, vport_to_remove_from = %d, mac = %2x:%2x:%2x:%2x:%2x:%2x, vlan = %d\n",
1422                    p_filter_cmd->vport_to_add_to,
1423                    p_filter_cmd->vport_to_remove_from,
1424                    p_filter_cmd->mac[0],
1425                    p_filter_cmd->mac[1],
1426                    p_filter_cmd->mac[2],
1427                    p_filter_cmd->mac[3],
1428                    p_filter_cmd->mac[4],
1429                    p_filter_cmd->mac[5],
1430                    p_filter_cmd->vlan);
1431
1432         return 0;
1433 }
1434
1435 /*******************************************************************************
1436  * Description:
1437  *         Calculates crc 32 on a buffer
1438  *         Note: crc32_length MUST be aligned to 8
1439  * Return:
1440  ******************************************************************************/
1441 static u32 qed_calc_crc32c(u8 *crc32_packet,
1442                            u32 crc32_length, u32 crc32_seed, u8 complement)
1443 {
1444         u32 byte = 0, bit = 0, crc32_result = crc32_seed;
1445         u8 msb = 0, current_byte = 0;
1446
1447         if ((!crc32_packet) ||
1448             (crc32_length == 0) ||
1449             ((crc32_length % 8) != 0))
1450                 return crc32_result;
1451         for (byte = 0; byte < crc32_length; byte++) {
1452                 current_byte = crc32_packet[byte];
1453                 for (bit = 0; bit < 8; bit++) {
1454                         msb = (u8)(crc32_result >> 31);
1455                         crc32_result = crc32_result << 1;
1456                         if (msb != (0x1 & (current_byte >> bit))) {
1457                                 crc32_result = crc32_result ^ CRC32_POLY;
1458                                 crc32_result |= 1; /*crc32_result[0] = 1;*/
1459                         }
1460                 }
1461         }
1462         return crc32_result;
1463 }
1464
1465 static u32 qed_crc32c_le(u32 seed, u8 *mac, u32 len)
1466 {
1467         u32 packet_buf[2] = { 0 };
1468
1469         memcpy((u8 *)(&packet_buf[0]), &mac[0], 6);
1470         return qed_calc_crc32c((u8 *)packet_buf, 8, seed, 0);
1471 }
1472
1473 u8 qed_mcast_bin_from_mac(u8 *mac)
1474 {
1475         u32 crc = qed_crc32c_le(ETH_MULTICAST_BIN_FROM_MAC_SEED,
1476                                 mac, ETH_ALEN);
1477
1478         return crc & 0xff;
1479 }
1480
1481 static int
1482 qed_sp_eth_filter_mcast(struct qed_hwfn *p_hwfn,
1483                         u16 opaque_fid,
1484                         struct qed_filter_mcast *p_filter_cmd,
1485                         enum spq_mode comp_mode,
1486                         struct qed_spq_comp_cb *p_comp_data)
1487 {
1488         struct vport_update_ramrod_data *p_ramrod = NULL;
1489         u32 bins[ETH_MULTICAST_MAC_BINS_IN_REGS];
1490         struct qed_spq_entry *p_ent = NULL;
1491         struct qed_sp_init_data init_data;
1492         u8 abs_vport_id = 0;
1493         int rc, i;
1494
1495         if (p_filter_cmd->opcode == QED_FILTER_ADD)
1496                 rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_add_to,
1497                                   &abs_vport_id);
1498         else
1499                 rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_remove_from,
1500                                   &abs_vport_id);
1501         if (rc)
1502                 return rc;
1503
1504         /* Get SPQ entry */
1505         memset(&init_data, 0, sizeof(init_data));
1506         init_data.cid = qed_spq_get_cid(p_hwfn);
1507         init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1508         init_data.comp_mode = comp_mode;
1509         init_data.p_comp_data = p_comp_data;
1510
1511         rc = qed_sp_init_request(p_hwfn, &p_ent,
1512                                  ETH_RAMROD_VPORT_UPDATE,
1513                                  PROTOCOLID_ETH, &init_data);
1514         if (rc) {
1515                 DP_ERR(p_hwfn, "Multi-cast command failed %d\n", rc);
1516                 return rc;
1517         }
1518
1519         p_ramrod = &p_ent->ramrod.vport_update;
1520         p_ramrod->common.update_approx_mcast_flg = 1;
1521
1522         /* explicitly clear out the entire vector */
1523         memset(&p_ramrod->approx_mcast.bins, 0,
1524                sizeof(p_ramrod->approx_mcast.bins));
1525         memset(bins, 0, sizeof(bins));
1526         /* filter ADD op is explicit set op and it removes
1527          *  any existing filters for the vport
1528          */
1529         if (p_filter_cmd->opcode == QED_FILTER_ADD) {
1530                 for (i = 0; i < p_filter_cmd->num_mc_addrs; i++) {
1531                         u32 bit, nbits;
1532
1533                         bit = qed_mcast_bin_from_mac(p_filter_cmd->mac[i]);
1534                         nbits = sizeof(u32) * BITS_PER_BYTE;
1535                         bins[bit / nbits] |= 1 << (bit % nbits);
1536                 }
1537
1538                 /* Convert to correct endianity */
1539                 for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) {
1540                         struct vport_update_ramrod_mcast *p_ramrod_bins;
1541
1542                         p_ramrod_bins = &p_ramrod->approx_mcast;
1543                         p_ramrod_bins->bins[i] = cpu_to_le32(bins[i]);
1544                 }
1545         }
1546
1547         p_ramrod->common.vport_id = abs_vport_id;
1548
1549         return qed_spq_post(p_hwfn, p_ent, NULL);
1550 }
1551
1552 static int qed_filter_mcast_cmd(struct qed_dev *cdev,
1553                                 struct qed_filter_mcast *p_filter_cmd,
1554                                 enum spq_mode comp_mode,
1555                                 struct qed_spq_comp_cb *p_comp_data)
1556 {
1557         int rc = 0;
1558         int i;
1559
1560         /* only ADD and REMOVE operations are supported for multi-cast */
1561         if ((p_filter_cmd->opcode != QED_FILTER_ADD &&
1562              (p_filter_cmd->opcode != QED_FILTER_REMOVE)) ||
1563             (p_filter_cmd->num_mc_addrs > QED_MAX_MC_ADDRS))
1564                 return -EINVAL;
1565
1566         for_each_hwfn(cdev, i) {
1567                 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1568
1569                 u16 opaque_fid;
1570
1571                 if (IS_VF(cdev)) {
1572                         qed_vf_pf_filter_mcast(p_hwfn, p_filter_cmd);
1573                         continue;
1574                 }
1575
1576                 opaque_fid = p_hwfn->hw_info.opaque_fid;
1577
1578                 rc = qed_sp_eth_filter_mcast(p_hwfn,
1579                                              opaque_fid,
1580                                              p_filter_cmd,
1581                                              comp_mode, p_comp_data);
1582         }
1583         return rc;
1584 }
1585
1586 static int qed_filter_ucast_cmd(struct qed_dev *cdev,
1587                                 struct qed_filter_ucast *p_filter_cmd,
1588                                 enum spq_mode comp_mode,
1589                                 struct qed_spq_comp_cb *p_comp_data)
1590 {
1591         int rc = 0;
1592         int i;
1593
1594         for_each_hwfn(cdev, i) {
1595                 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1596                 u16 opaque_fid;
1597
1598                 if (IS_VF(cdev)) {
1599                         rc = qed_vf_pf_filter_ucast(p_hwfn, p_filter_cmd);
1600                         continue;
1601                 }
1602
1603                 opaque_fid = p_hwfn->hw_info.opaque_fid;
1604
1605                 rc = qed_sp_eth_filter_ucast(p_hwfn,
1606                                              opaque_fid,
1607                                              p_filter_cmd,
1608                                              comp_mode, p_comp_data);
1609                 if (rc)
1610                         break;
1611         }
1612
1613         return rc;
1614 }
1615
1616 /* Statistics related code */
1617 static void __qed_get_vport_pstats_addrlen(struct qed_hwfn *p_hwfn,
1618                                            u32 *p_addr,
1619                                            u32 *p_len, u16 statistics_bin)
1620 {
1621         if (IS_PF(p_hwfn->cdev)) {
1622                 *p_addr = BAR0_MAP_REG_PSDM_RAM +
1623                     PSTORM_QUEUE_STAT_OFFSET(statistics_bin);
1624                 *p_len = sizeof(struct eth_pstorm_per_queue_stat);
1625         } else {
1626                 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
1627                 struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
1628
1629                 *p_addr = p_resp->pfdev_info.stats_info.pstats.address;
1630                 *p_len = p_resp->pfdev_info.stats_info.pstats.len;
1631         }
1632 }
1633
1634 static noinline_for_stack void
1635 __qed_get_vport_pstats(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
1636                        struct qed_eth_stats *p_stats, u16 statistics_bin)
1637 {
1638         struct eth_pstorm_per_queue_stat pstats;
1639         u32 pstats_addr = 0, pstats_len = 0;
1640
1641         __qed_get_vport_pstats_addrlen(p_hwfn, &pstats_addr, &pstats_len,
1642                                        statistics_bin);
1643
1644         memset(&pstats, 0, sizeof(pstats));
1645         qed_memcpy_from(p_hwfn, p_ptt, &pstats, pstats_addr, pstats_len);
1646
1647         p_stats->common.tx_ucast_bytes +=
1648             HILO_64_REGPAIR(pstats.sent_ucast_bytes);
1649         p_stats->common.tx_mcast_bytes +=
1650             HILO_64_REGPAIR(pstats.sent_mcast_bytes);
1651         p_stats->common.tx_bcast_bytes +=
1652             HILO_64_REGPAIR(pstats.sent_bcast_bytes);
1653         p_stats->common.tx_ucast_pkts +=
1654             HILO_64_REGPAIR(pstats.sent_ucast_pkts);
1655         p_stats->common.tx_mcast_pkts +=
1656             HILO_64_REGPAIR(pstats.sent_mcast_pkts);
1657         p_stats->common.tx_bcast_pkts +=
1658             HILO_64_REGPAIR(pstats.sent_bcast_pkts);
1659         p_stats->common.tx_err_drop_pkts +=
1660             HILO_64_REGPAIR(pstats.error_drop_pkts);
1661 }
1662
1663 static noinline_for_stack void
1664 __qed_get_vport_tstats(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
1665                        struct qed_eth_stats *p_stats, u16 statistics_bin)
1666 {
1667         struct tstorm_per_port_stat tstats;
1668         u32 tstats_addr, tstats_len;
1669
1670         if (IS_PF(p_hwfn->cdev)) {
1671                 tstats_addr = BAR0_MAP_REG_TSDM_RAM +
1672                     TSTORM_PORT_STAT_OFFSET(MFW_PORT(p_hwfn));
1673                 tstats_len = sizeof(struct tstorm_per_port_stat);
1674         } else {
1675                 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
1676                 struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
1677
1678                 tstats_addr = p_resp->pfdev_info.stats_info.tstats.address;
1679                 tstats_len = p_resp->pfdev_info.stats_info.tstats.len;
1680         }
1681
1682         memset(&tstats, 0, sizeof(tstats));
1683         qed_memcpy_from(p_hwfn, p_ptt, &tstats, tstats_addr, tstats_len);
1684
1685         p_stats->common.mftag_filter_discards +=
1686             HILO_64_REGPAIR(tstats.mftag_filter_discard);
1687         p_stats->common.mac_filter_discards +=
1688             HILO_64_REGPAIR(tstats.eth_mac_filter_discard);
1689         p_stats->common.gft_filter_drop +=
1690                 HILO_64_REGPAIR(tstats.eth_gft_drop_pkt);
1691 }
1692
1693 static void __qed_get_vport_ustats_addrlen(struct qed_hwfn *p_hwfn,
1694                                            u32 *p_addr,
1695                                            u32 *p_len, u16 statistics_bin)
1696 {
1697         if (IS_PF(p_hwfn->cdev)) {
1698                 *p_addr = BAR0_MAP_REG_USDM_RAM +
1699                     USTORM_QUEUE_STAT_OFFSET(statistics_bin);
1700                 *p_len = sizeof(struct eth_ustorm_per_queue_stat);
1701         } else {
1702                 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
1703                 struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
1704
1705                 *p_addr = p_resp->pfdev_info.stats_info.ustats.address;
1706                 *p_len = p_resp->pfdev_info.stats_info.ustats.len;
1707         }
1708 }
1709
1710 static noinline_for_stack
1711 void __qed_get_vport_ustats(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
1712                             struct qed_eth_stats *p_stats, u16 statistics_bin)
1713 {
1714         struct eth_ustorm_per_queue_stat ustats;
1715         u32 ustats_addr = 0, ustats_len = 0;
1716
1717         __qed_get_vport_ustats_addrlen(p_hwfn, &ustats_addr, &ustats_len,
1718                                        statistics_bin);
1719
1720         memset(&ustats, 0, sizeof(ustats));
1721         qed_memcpy_from(p_hwfn, p_ptt, &ustats, ustats_addr, ustats_len);
1722
1723         p_stats->common.rx_ucast_bytes +=
1724             HILO_64_REGPAIR(ustats.rcv_ucast_bytes);
1725         p_stats->common.rx_mcast_bytes +=
1726             HILO_64_REGPAIR(ustats.rcv_mcast_bytes);
1727         p_stats->common.rx_bcast_bytes +=
1728             HILO_64_REGPAIR(ustats.rcv_bcast_bytes);
1729         p_stats->common.rx_ucast_pkts += HILO_64_REGPAIR(ustats.rcv_ucast_pkts);
1730         p_stats->common.rx_mcast_pkts += HILO_64_REGPAIR(ustats.rcv_mcast_pkts);
1731         p_stats->common.rx_bcast_pkts += HILO_64_REGPAIR(ustats.rcv_bcast_pkts);
1732 }
1733
1734 static void __qed_get_vport_mstats_addrlen(struct qed_hwfn *p_hwfn,
1735                                            u32 *p_addr,
1736                                            u32 *p_len, u16 statistics_bin)
1737 {
1738         if (IS_PF(p_hwfn->cdev)) {
1739                 *p_addr = BAR0_MAP_REG_MSDM_RAM +
1740                     MSTORM_QUEUE_STAT_OFFSET(statistics_bin);
1741                 *p_len = sizeof(struct eth_mstorm_per_queue_stat);
1742         } else {
1743                 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
1744                 struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
1745
1746                 *p_addr = p_resp->pfdev_info.stats_info.mstats.address;
1747                 *p_len = p_resp->pfdev_info.stats_info.mstats.len;
1748         }
1749 }
1750
1751 static noinline_for_stack void
1752 __qed_get_vport_mstats(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
1753                        struct qed_eth_stats *p_stats, u16 statistics_bin)
1754 {
1755         struct eth_mstorm_per_queue_stat mstats;
1756         u32 mstats_addr = 0, mstats_len = 0;
1757
1758         __qed_get_vport_mstats_addrlen(p_hwfn, &mstats_addr, &mstats_len,
1759                                        statistics_bin);
1760
1761         memset(&mstats, 0, sizeof(mstats));
1762         qed_memcpy_from(p_hwfn, p_ptt, &mstats, mstats_addr, mstats_len);
1763
1764         p_stats->common.no_buff_discards +=
1765             HILO_64_REGPAIR(mstats.no_buff_discard);
1766         p_stats->common.packet_too_big_discard +=
1767             HILO_64_REGPAIR(mstats.packet_too_big_discard);
1768         p_stats->common.ttl0_discard += HILO_64_REGPAIR(mstats.ttl0_discard);
1769         p_stats->common.tpa_coalesced_pkts +=
1770             HILO_64_REGPAIR(mstats.tpa_coalesced_pkts);
1771         p_stats->common.tpa_coalesced_events +=
1772             HILO_64_REGPAIR(mstats.tpa_coalesced_events);
1773         p_stats->common.tpa_aborts_num +=
1774             HILO_64_REGPAIR(mstats.tpa_aborts_num);
1775         p_stats->common.tpa_coalesced_bytes +=
1776             HILO_64_REGPAIR(mstats.tpa_coalesced_bytes);
1777 }
1778
1779 static noinline_for_stack void
1780 __qed_get_vport_port_stats(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
1781                            struct qed_eth_stats *p_stats)
1782 {
1783         struct qed_eth_stats_common *p_common = &p_stats->common;
1784         struct port_stats port_stats;
1785         int j;
1786
1787         memset(&port_stats, 0, sizeof(port_stats));
1788
1789         qed_memcpy_from(p_hwfn, p_ptt, &port_stats,
1790                         p_hwfn->mcp_info->port_addr +
1791                         offsetof(struct public_port, stats),
1792                         sizeof(port_stats));
1793
1794         p_common->rx_64_byte_packets += port_stats.eth.r64;
1795         p_common->rx_65_to_127_byte_packets += port_stats.eth.r127;
1796         p_common->rx_128_to_255_byte_packets += port_stats.eth.r255;
1797         p_common->rx_256_to_511_byte_packets += port_stats.eth.r511;
1798         p_common->rx_512_to_1023_byte_packets += port_stats.eth.r1023;
1799         p_common->rx_1024_to_1518_byte_packets += port_stats.eth.r1518;
1800         p_common->rx_crc_errors += port_stats.eth.rfcs;
1801         p_common->rx_mac_crtl_frames += port_stats.eth.rxcf;
1802         p_common->rx_pause_frames += port_stats.eth.rxpf;
1803         p_common->rx_pfc_frames += port_stats.eth.rxpp;
1804         p_common->rx_align_errors += port_stats.eth.raln;
1805         p_common->rx_carrier_errors += port_stats.eth.rfcr;
1806         p_common->rx_oversize_packets += port_stats.eth.rovr;
1807         p_common->rx_jabbers += port_stats.eth.rjbr;
1808         p_common->rx_undersize_packets += port_stats.eth.rund;
1809         p_common->rx_fragments += port_stats.eth.rfrg;
1810         p_common->tx_64_byte_packets += port_stats.eth.t64;
1811         p_common->tx_65_to_127_byte_packets += port_stats.eth.t127;
1812         p_common->tx_128_to_255_byte_packets += port_stats.eth.t255;
1813         p_common->tx_256_to_511_byte_packets += port_stats.eth.t511;
1814         p_common->tx_512_to_1023_byte_packets += port_stats.eth.t1023;
1815         p_common->tx_1024_to_1518_byte_packets += port_stats.eth.t1518;
1816         p_common->tx_pause_frames += port_stats.eth.txpf;
1817         p_common->tx_pfc_frames += port_stats.eth.txpp;
1818         p_common->rx_mac_bytes += port_stats.eth.rbyte;
1819         p_common->rx_mac_uc_packets += port_stats.eth.rxuca;
1820         p_common->rx_mac_mc_packets += port_stats.eth.rxmca;
1821         p_common->rx_mac_bc_packets += port_stats.eth.rxbca;
1822         p_common->rx_mac_frames_ok += port_stats.eth.rxpok;
1823         p_common->tx_mac_bytes += port_stats.eth.tbyte;
1824         p_common->tx_mac_uc_packets += port_stats.eth.txuca;
1825         p_common->tx_mac_mc_packets += port_stats.eth.txmca;
1826         p_common->tx_mac_bc_packets += port_stats.eth.txbca;
1827         p_common->tx_mac_ctrl_frames += port_stats.eth.txcf;
1828         for (j = 0; j < 8; j++) {
1829                 p_common->brb_truncates += port_stats.brb.brb_truncate[j];
1830                 p_common->brb_discards += port_stats.brb.brb_discard[j];
1831         }
1832
1833         if (QED_IS_BB(p_hwfn->cdev)) {
1834                 struct qed_eth_stats_bb *p_bb = &p_stats->bb;
1835
1836                 p_bb->rx_1519_to_1522_byte_packets +=
1837                     port_stats.eth.u0.bb0.r1522;
1838                 p_bb->rx_1519_to_2047_byte_packets +=
1839                     port_stats.eth.u0.bb0.r2047;
1840                 p_bb->rx_2048_to_4095_byte_packets +=
1841                     port_stats.eth.u0.bb0.r4095;
1842                 p_bb->rx_4096_to_9216_byte_packets +=
1843                     port_stats.eth.u0.bb0.r9216;
1844                 p_bb->rx_9217_to_16383_byte_packets +=
1845                     port_stats.eth.u0.bb0.r16383;
1846                 p_bb->tx_1519_to_2047_byte_packets +=
1847                     port_stats.eth.u1.bb1.t2047;
1848                 p_bb->tx_2048_to_4095_byte_packets +=
1849                     port_stats.eth.u1.bb1.t4095;
1850                 p_bb->tx_4096_to_9216_byte_packets +=
1851                     port_stats.eth.u1.bb1.t9216;
1852                 p_bb->tx_9217_to_16383_byte_packets +=
1853                     port_stats.eth.u1.bb1.t16383;
1854                 p_bb->tx_lpi_entry_count += port_stats.eth.u2.bb2.tlpiec;
1855                 p_bb->tx_total_collisions += port_stats.eth.u2.bb2.tncl;
1856         } else {
1857                 struct qed_eth_stats_ah *p_ah = &p_stats->ah;
1858
1859                 p_ah->rx_1519_to_max_byte_packets +=
1860                     port_stats.eth.u0.ah0.r1519_to_max;
1861                 p_ah->tx_1519_to_max_byte_packets =
1862                     port_stats.eth.u1.ah1.t1519_to_max;
1863         }
1864
1865         p_common->link_change_count = qed_rd(p_hwfn, p_ptt,
1866                                              p_hwfn->mcp_info->port_addr +
1867                                              offsetof(struct public_port,
1868                                                       link_change_count));
1869 }
1870
1871 static void __qed_get_vport_stats(struct qed_hwfn *p_hwfn,
1872                                   struct qed_ptt *p_ptt,
1873                                   struct qed_eth_stats *stats,
1874                                   u16 statistics_bin, bool b_get_port_stats)
1875 {
1876         __qed_get_vport_mstats(p_hwfn, p_ptt, stats, statistics_bin);
1877         __qed_get_vport_ustats(p_hwfn, p_ptt, stats, statistics_bin);
1878         __qed_get_vport_tstats(p_hwfn, p_ptt, stats, statistics_bin);
1879         __qed_get_vport_pstats(p_hwfn, p_ptt, stats, statistics_bin);
1880
1881         if (b_get_port_stats && p_hwfn->mcp_info)
1882                 __qed_get_vport_port_stats(p_hwfn, p_ptt, stats);
1883 }
1884
1885 static void _qed_get_vport_stats(struct qed_dev *cdev,
1886                                  struct qed_eth_stats *stats)
1887 {
1888         u8 fw_vport = 0;
1889         int i;
1890
1891         memset(stats, 0, sizeof(*stats));
1892
1893         for_each_hwfn(cdev, i) {
1894                 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1895                 struct qed_ptt *p_ptt = IS_PF(cdev) ? qed_ptt_acquire(p_hwfn)
1896                                                     :  NULL;
1897
1898                 if (IS_PF(cdev)) {
1899                         /* The main vport index is relative first */
1900                         if (qed_fw_vport(p_hwfn, 0, &fw_vport)) {
1901                                 DP_ERR(p_hwfn, "No vport available!\n");
1902                                 goto out;
1903                         }
1904                 }
1905
1906                 if (IS_PF(cdev) && !p_ptt) {
1907                         DP_ERR(p_hwfn, "Failed to acquire ptt\n");
1908                         continue;
1909                 }
1910
1911                 __qed_get_vport_stats(p_hwfn, p_ptt, stats, fw_vport,
1912                                       IS_PF(cdev) ? true : false);
1913
1914 out:
1915                 if (IS_PF(cdev) && p_ptt)
1916                         qed_ptt_release(p_hwfn, p_ptt);
1917         }
1918 }
1919
1920 void qed_get_vport_stats(struct qed_dev *cdev, struct qed_eth_stats *stats)
1921 {
1922         u32 i;
1923
1924         if (!cdev) {
1925                 memset(stats, 0, sizeof(*stats));
1926                 return;
1927         }
1928
1929         _qed_get_vport_stats(cdev, stats);
1930
1931         if (!cdev->reset_stats)
1932                 return;
1933
1934         /* Reduce the statistics baseline */
1935         for (i = 0; i < sizeof(struct qed_eth_stats) / sizeof(u64); i++)
1936                 ((u64 *)stats)[i] -= ((u64 *)cdev->reset_stats)[i];
1937 }
1938
1939 /* zeroes V-PORT specific portion of stats (Port stats remains untouched) */
1940 void qed_reset_vport_stats(struct qed_dev *cdev)
1941 {
1942         int i;
1943
1944         for_each_hwfn(cdev, i) {
1945                 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1946                 struct eth_mstorm_per_queue_stat mstats;
1947                 struct eth_ustorm_per_queue_stat ustats;
1948                 struct eth_pstorm_per_queue_stat pstats;
1949                 struct qed_ptt *p_ptt = IS_PF(cdev) ? qed_ptt_acquire(p_hwfn)
1950                                                     : NULL;
1951                 u32 addr = 0, len = 0;
1952
1953                 if (IS_PF(cdev) && !p_ptt) {
1954                         DP_ERR(p_hwfn, "Failed to acquire ptt\n");
1955                         continue;
1956                 }
1957
1958                 memset(&mstats, 0, sizeof(mstats));
1959                 __qed_get_vport_mstats_addrlen(p_hwfn, &addr, &len, 0);
1960                 qed_memcpy_to(p_hwfn, p_ptt, addr, &mstats, len);
1961
1962                 memset(&ustats, 0, sizeof(ustats));
1963                 __qed_get_vport_ustats_addrlen(p_hwfn, &addr, &len, 0);
1964                 qed_memcpy_to(p_hwfn, p_ptt, addr, &ustats, len);
1965
1966                 memset(&pstats, 0, sizeof(pstats));
1967                 __qed_get_vport_pstats_addrlen(p_hwfn, &addr, &len, 0);
1968                 qed_memcpy_to(p_hwfn, p_ptt, addr, &pstats, len);
1969
1970                 if (IS_PF(cdev))
1971                         qed_ptt_release(p_hwfn, p_ptt);
1972         }
1973
1974         /* PORT statistics are not necessarily reset, so we need to
1975          * read and create a baseline for future statistics.
1976          * Link change stat is maintained by MFW, return its value as is.
1977          */
1978         if (!cdev->reset_stats) {
1979                 DP_INFO(cdev, "Reset stats not allocated\n");
1980         } else {
1981                 _qed_get_vport_stats(cdev, cdev->reset_stats);
1982                 cdev->reset_stats->common.link_change_count = 0;
1983         }
1984 }
1985
1986 static enum gft_profile_type
1987 qed_arfs_mode_to_hsi(enum qed_filter_config_mode mode)
1988 {
1989         if (mode == QED_FILTER_CONFIG_MODE_5_TUPLE)
1990                 return GFT_PROFILE_TYPE_4_TUPLE;
1991         if (mode == QED_FILTER_CONFIG_MODE_IP_DEST)
1992                 return GFT_PROFILE_TYPE_IP_DST_ADDR;
1993         if (mode == QED_FILTER_CONFIG_MODE_IP_SRC)
1994                 return GFT_PROFILE_TYPE_IP_SRC_ADDR;
1995         return GFT_PROFILE_TYPE_L4_DST_PORT;
1996 }
1997
1998 void qed_arfs_mode_configure(struct qed_hwfn *p_hwfn,
1999                              struct qed_ptt *p_ptt,
2000                              struct qed_arfs_config_params *p_cfg_params)
2001 {
2002         if (p_cfg_params->mode != QED_FILTER_CONFIG_MODE_DISABLE) {
2003                 qed_gft_config(p_hwfn, p_ptt, p_hwfn->rel_pf_id,
2004                                p_cfg_params->tcp,
2005                                p_cfg_params->udp,
2006                                p_cfg_params->ipv4,
2007                                p_cfg_params->ipv6,
2008                                qed_arfs_mode_to_hsi(p_cfg_params->mode));
2009                 DP_VERBOSE(p_hwfn,
2010                            QED_MSG_SP,
2011                            "Configured Filtering: tcp = %s, udp = %s, ipv4 = %s, ipv6 =%s mode=%08x\n",
2012                            p_cfg_params->tcp ? "Enable" : "Disable",
2013                            p_cfg_params->udp ? "Enable" : "Disable",
2014                            p_cfg_params->ipv4 ? "Enable" : "Disable",
2015                            p_cfg_params->ipv6 ? "Enable" : "Disable",
2016                            (u32)p_cfg_params->mode);
2017         } else {
2018                 DP_VERBOSE(p_hwfn, QED_MSG_SP, "Disabled Filtering\n");
2019                 qed_gft_disable(p_hwfn, p_ptt, p_hwfn->rel_pf_id);
2020         }
2021 }
2022
2023 int
2024 qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn,
2025                                 struct qed_spq_comp_cb *p_cb,
2026                                 struct qed_ntuple_filter_params *p_params)
2027 {
2028         struct rx_update_gft_filter_data *p_ramrod = NULL;
2029         struct qed_spq_entry *p_ent = NULL;
2030         struct qed_sp_init_data init_data;
2031         u16 abs_rx_q_id = 0;
2032         u8 abs_vport_id = 0;
2033         int rc = -EINVAL;
2034
2035         /* Get SPQ entry */
2036         memset(&init_data, 0, sizeof(init_data));
2037         init_data.cid = qed_spq_get_cid(p_hwfn);
2038
2039         init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
2040
2041         if (p_cb) {
2042                 init_data.comp_mode = QED_SPQ_MODE_CB;
2043                 init_data.p_comp_data = p_cb;
2044         } else {
2045                 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
2046         }
2047
2048         rc = qed_sp_init_request(p_hwfn, &p_ent,
2049                                  ETH_RAMROD_GFT_UPDATE_FILTER,
2050                                  PROTOCOLID_ETH, &init_data);
2051         if (rc)
2052                 return rc;
2053
2054         p_ramrod = &p_ent->ramrod.rx_update_gft;
2055
2056         DMA_REGPAIR_LE(p_ramrod->pkt_hdr_addr, p_params->addr);
2057         p_ramrod->pkt_hdr_length = cpu_to_le16(p_params->length);
2058
2059         if (p_params->b_is_drop) {
2060                 p_ramrod->vport_id = cpu_to_le16(ETH_GFT_TRASHCAN_VPORT);
2061         } else {
2062                 rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
2063                 if (rc)
2064                         goto err;
2065
2066                 if (p_params->qid != QED_RFS_NTUPLE_QID_RSS) {
2067                         rc = qed_fw_l2_queue(p_hwfn, p_params->qid,
2068                                              &abs_rx_q_id);
2069                         if (rc)
2070                                 goto err;
2071
2072                         p_ramrod->rx_qid_valid = 1;
2073                         p_ramrod->rx_qid = cpu_to_le16(abs_rx_q_id);
2074                 }
2075
2076                 p_ramrod->vport_id = cpu_to_le16((u16)abs_vport_id);
2077         }
2078
2079         p_ramrod->flow_id_valid = 0;
2080         p_ramrod->flow_id = 0;
2081         p_ramrod->filter_action = p_params->b_is_add ? GFT_ADD_FILTER
2082             : GFT_DELETE_FILTER;
2083
2084         DP_VERBOSE(p_hwfn, QED_MSG_SP,
2085                    "V[%0x], Q[%04x] - %s filter from 0x%llx [length %04xb]\n",
2086                    abs_vport_id, abs_rx_q_id,
2087                    p_params->b_is_add ? "Adding" : "Removing",
2088                    (u64)p_params->addr, p_params->length);
2089
2090         return qed_spq_post(p_hwfn, p_ent, NULL);
2091
2092 err:
2093         qed_sp_destroy_request(p_hwfn, p_ent);
2094         return rc;
2095 }
2096
2097 int qed_get_rxq_coalesce(struct qed_hwfn *p_hwfn,
2098                          struct qed_ptt *p_ptt,
2099                          struct qed_queue_cid *p_cid, u16 *p_rx_coal)
2100 {
2101         u32 coalesce, address, is_valid;
2102         struct cau_sb_entry sb_entry;
2103         u8 timer_res;
2104         int rc;
2105
2106         rc = qed_dmae_grc2host(p_hwfn, p_ptt, CAU_REG_SB_VAR_MEMORY +
2107                                p_cid->sb_igu_id * sizeof(u64),
2108                                (u64)(uintptr_t)&sb_entry, 2, 0);
2109         if (rc) {
2110                 DP_ERR(p_hwfn, "dmae_grc2host failed %d\n", rc);
2111                 return rc;
2112         }
2113
2114         timer_res = GET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES0);
2115
2116         address = BAR0_MAP_REG_USDM_RAM +
2117                   USTORM_ETH_QUEUE_ZONE_OFFSET(p_cid->abs.queue_id);
2118         coalesce = qed_rd(p_hwfn, p_ptt, address);
2119
2120         is_valid = GET_FIELD(coalesce, COALESCING_TIMESET_VALID);
2121         if (!is_valid)
2122                 return -EINVAL;
2123
2124         coalesce = GET_FIELD(coalesce, COALESCING_TIMESET_TIMESET);
2125         *p_rx_coal = (u16)(coalesce << timer_res);
2126
2127         return 0;
2128 }
2129
2130 int qed_get_txq_coalesce(struct qed_hwfn *p_hwfn,
2131                          struct qed_ptt *p_ptt,
2132                          struct qed_queue_cid *p_cid, u16 *p_tx_coal)
2133 {
2134         u32 coalesce, address, is_valid;
2135         struct cau_sb_entry sb_entry;
2136         u8 timer_res;
2137         int rc;
2138
2139         rc = qed_dmae_grc2host(p_hwfn, p_ptt, CAU_REG_SB_VAR_MEMORY +
2140                                p_cid->sb_igu_id * sizeof(u64),
2141                                (u64)(uintptr_t)&sb_entry, 2, 0);
2142         if (rc) {
2143                 DP_ERR(p_hwfn, "dmae_grc2host failed %d\n", rc);
2144                 return rc;
2145         }
2146
2147         timer_res = GET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES1);
2148
2149         address = BAR0_MAP_REG_XSDM_RAM +
2150                   XSTORM_ETH_QUEUE_ZONE_OFFSET(p_cid->abs.queue_id);
2151         coalesce = qed_rd(p_hwfn, p_ptt, address);
2152
2153         is_valid = GET_FIELD(coalesce, COALESCING_TIMESET_VALID);
2154         if (!is_valid)
2155                 return -EINVAL;
2156
2157         coalesce = GET_FIELD(coalesce, COALESCING_TIMESET_TIMESET);
2158         *p_tx_coal = (u16)(coalesce << timer_res);
2159
2160         return 0;
2161 }
2162
2163 int qed_get_queue_coalesce(struct qed_hwfn *p_hwfn, u16 *p_coal, void *handle)
2164 {
2165         struct qed_queue_cid *p_cid = handle;
2166         struct qed_ptt *p_ptt;
2167         int rc = 0;
2168
2169         if (IS_VF(p_hwfn->cdev)) {
2170                 rc = qed_vf_pf_get_coalesce(p_hwfn, p_coal, p_cid);
2171                 if (rc)
2172                         DP_NOTICE(p_hwfn, "Unable to read queue coalescing\n");
2173
2174                 return rc;
2175         }
2176
2177         p_ptt = qed_ptt_acquire(p_hwfn);
2178         if (!p_ptt)
2179                 return -EAGAIN;
2180
2181         if (p_cid->b_is_rx) {
2182                 rc = qed_get_rxq_coalesce(p_hwfn, p_ptt, p_cid, p_coal);
2183                 if (rc)
2184                         goto out;
2185         } else {
2186                 rc = qed_get_txq_coalesce(p_hwfn, p_ptt, p_cid, p_coal);
2187                 if (rc)
2188                         goto out;
2189         }
2190
2191 out:
2192         qed_ptt_release(p_hwfn, p_ptt);
2193
2194         return rc;
2195 }
2196
2197 static int qed_fill_eth_dev_info(struct qed_dev *cdev,
2198                                  struct qed_dev_eth_info *info)
2199 {
2200         struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
2201         int i;
2202
2203         memset(info, 0, sizeof(*info));
2204
2205         if (IS_PF(cdev)) {
2206                 int max_vf_vlan_filters = 0;
2207                 int max_vf_mac_filters = 0;
2208
2209                 info->num_tc = p_hwfn->hw_info.num_hw_tc;
2210
2211                 if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
2212                         u16 num_queues = 0;
2213
2214                         /* Since the feature controls only queue-zones,
2215                          * make sure we have the contexts [rx, xdp, tcs] to
2216                          * match.
2217                          */
2218                         for_each_hwfn(cdev, i) {
2219                                 struct qed_hwfn *hwfn = &cdev->hwfns[i];
2220                                 u16 l2_queues = (u16)FEAT_NUM(hwfn,
2221                                                               QED_PF_L2_QUE);
2222                                 u16 cids;
2223
2224                                 cids = hwfn->pf_params.eth_pf_params.num_cons;
2225                                 cids /= (2 + info->num_tc);
2226                                 num_queues += min_t(u16, l2_queues, cids);
2227                         }
2228
2229                         /* queues might theoretically be >256, but interrupts'
2230                          * upper-limit guarantes that it would fit in a u8.
2231                          */
2232                         if (cdev->int_params.fp_msix_cnt) {
2233                                 u8 irqs = cdev->int_params.fp_msix_cnt;
2234
2235                                 info->num_queues = (u8)min_t(u16,
2236                                                              num_queues, irqs);
2237                         }
2238                 } else {
2239                         info->num_queues = cdev->num_hwfns;
2240                 }
2241
2242                 if (IS_QED_SRIOV(cdev)) {
2243                         max_vf_vlan_filters = cdev->p_iov_info->total_vfs *
2244                                               QED_ETH_VF_NUM_VLAN_FILTERS;
2245                         max_vf_mac_filters = cdev->p_iov_info->total_vfs *
2246                                              QED_ETH_VF_NUM_MAC_FILTERS;
2247                 }
2248                 info->num_vlan_filters = RESC_NUM(QED_LEADING_HWFN(cdev),
2249                                                   QED_VLAN) -
2250                                          max_vf_vlan_filters;
2251                 info->num_mac_filters = RESC_NUM(QED_LEADING_HWFN(cdev),
2252                                                  QED_MAC) -
2253                                         max_vf_mac_filters;
2254
2255                 ether_addr_copy(info->port_mac,
2256                                 cdev->hwfns[0].hw_info.hw_mac_addr);
2257
2258                 info->xdp_supported = true;
2259         } else {
2260                 u16 total_cids = 0;
2261
2262                 info->num_tc = 1;
2263
2264                 /* Determine queues &  XDP support */
2265                 for_each_hwfn(cdev, i) {
2266                         struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
2267                         u8 queues, cids;
2268
2269                         qed_vf_get_num_cids(p_hwfn, &cids);
2270                         qed_vf_get_num_rxqs(p_hwfn, &queues);
2271                         info->num_queues += queues;
2272                         total_cids += cids;
2273                 }
2274
2275                 /* Enable VF XDP in case PF guarntees sufficient connections */
2276                 if (total_cids >= info->num_queues * 3)
2277                         info->xdp_supported = true;
2278
2279                 qed_vf_get_num_vlan_filters(&cdev->hwfns[0],
2280                                             (u8 *)&info->num_vlan_filters);
2281                 qed_vf_get_num_mac_filters(&cdev->hwfns[0],
2282                                            (u8 *)&info->num_mac_filters);
2283                 qed_vf_get_port_mac(&cdev->hwfns[0], info->port_mac);
2284
2285                 info->is_legacy = !!cdev->hwfns[0].vf_iov_info->b_pre_fp_hsi;
2286         }
2287
2288         qed_fill_dev_info(cdev, &info->common);
2289
2290         if (IS_VF(cdev))
2291                 eth_zero_addr(info->common.hw_mac);
2292
2293         return 0;
2294 }
2295
2296 static void qed_register_eth_ops(struct qed_dev *cdev,
2297                                  struct qed_eth_cb_ops *ops, void *cookie)
2298 {
2299         cdev->protocol_ops.eth = ops;
2300         cdev->ops_cookie = cookie;
2301
2302         /* For VF, we start bulletin reading */
2303         if (IS_VF(cdev))
2304                 qed_vf_start_iov_wq(cdev);
2305 }
2306
2307 static bool qed_check_mac(struct qed_dev *cdev, u8 *mac)
2308 {
2309         if (IS_PF(cdev))
2310                 return true;
2311
2312         return qed_vf_check_mac(&cdev->hwfns[0], mac);
2313 }
2314
2315 static int qed_start_vport(struct qed_dev *cdev,
2316                            struct qed_start_vport_params *params)
2317 {
2318         int rc, i;
2319
2320         for_each_hwfn(cdev, i) {
2321                 struct qed_sp_vport_start_params start = { 0 };
2322                 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
2323
2324                 start.tpa_mode = params->gro_enable ? QED_TPA_MODE_GRO :
2325                                                         QED_TPA_MODE_NONE;
2326                 start.remove_inner_vlan = params->remove_inner_vlan;
2327                 start.only_untagged = true;     /* untagged only */
2328                 start.drop_ttl0 = params->drop_ttl0;
2329                 start.opaque_fid = p_hwfn->hw_info.opaque_fid;
2330                 start.concrete_fid = p_hwfn->hw_info.concrete_fid;
2331                 start.handle_ptp_pkts = params->handle_ptp_pkts;
2332                 start.vport_id = params->vport_id;
2333                 start.max_buffers_per_cqe = 16;
2334                 start.mtu = params->mtu;
2335
2336                 rc = qed_sp_vport_start(p_hwfn, &start);
2337                 if (rc) {
2338                         DP_ERR(cdev, "Failed to start VPORT\n");
2339                         return rc;
2340                 }
2341
2342                 rc = qed_hw_start_fastpath(p_hwfn);
2343                 if (rc) {
2344                         DP_ERR(cdev, "Failed to start VPORT fastpath\n");
2345                         return rc;
2346                 }
2347
2348                 DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
2349                            "Started V-PORT %d with MTU %d\n",
2350                            start.vport_id, start.mtu);
2351         }
2352
2353         if (params->clear_stats)
2354                 qed_reset_vport_stats(cdev);
2355
2356         return 0;
2357 }
2358
2359 static int qed_stop_vport(struct qed_dev *cdev, u8 vport_id)
2360 {
2361         int rc, i;
2362
2363         for_each_hwfn(cdev, i) {
2364                 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
2365
2366                 rc = qed_sp_vport_stop(p_hwfn,
2367                                        p_hwfn->hw_info.opaque_fid, vport_id);
2368
2369                 if (rc) {
2370                         DP_ERR(cdev, "Failed to stop VPORT\n");
2371                         return rc;
2372                 }
2373         }
2374         return 0;
2375 }
2376
2377 static int qed_update_vport_rss(struct qed_dev *cdev,
2378                                 struct qed_update_vport_rss_params *input,
2379                                 struct qed_rss_params *rss)
2380 {
2381         int i, fn;
2382
2383         /* Update configuration with what's correct regardless of CMT */
2384         rss->update_rss_config = 1;
2385         rss->rss_enable = 1;
2386         rss->update_rss_capabilities = 1;
2387         rss->update_rss_ind_table = 1;
2388         rss->update_rss_key = 1;
2389         rss->rss_caps = input->rss_caps;
2390         memcpy(rss->rss_key, input->rss_key, QED_RSS_KEY_SIZE * sizeof(u32));
2391
2392         /* In regular scenario, we'd simply need to take input handlers.
2393          * But in CMT, we'd have to split the handlers according to the
2394          * engine they were configured on. We'd then have to understand
2395          * whether RSS is really required, since 2-queues on CMT doesn't
2396          * require RSS.
2397          */
2398         if (cdev->num_hwfns == 1) {
2399                 memcpy(rss->rss_ind_table,
2400                        input->rss_ind_table,
2401                        QED_RSS_IND_TABLE_SIZE * sizeof(void *));
2402                 rss->rss_table_size_log = 7;
2403                 return 0;
2404         }
2405
2406         /* Start by copying the non-spcific information to the 2nd copy */
2407         memcpy(&rss[1], &rss[0], sizeof(struct qed_rss_params));
2408
2409         /* CMT should be round-robin */
2410         for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) {
2411                 struct qed_queue_cid *cid = input->rss_ind_table[i];
2412                 struct qed_rss_params *t_rss;
2413
2414                 if (cid->p_owner == QED_LEADING_HWFN(cdev))
2415                         t_rss = &rss[0];
2416                 else
2417                         t_rss = &rss[1];
2418
2419                 t_rss->rss_ind_table[i / cdev->num_hwfns] = cid;
2420         }
2421
2422         /* Make sure RSS is actually required */
2423         for_each_hwfn(cdev, fn) {
2424                 for (i = 1; i < QED_RSS_IND_TABLE_SIZE / cdev->num_hwfns; i++) {
2425                         if (rss[fn].rss_ind_table[i] !=
2426                             rss[fn].rss_ind_table[0])
2427                                 break;
2428                 }
2429                 if (i == QED_RSS_IND_TABLE_SIZE / cdev->num_hwfns) {
2430                         DP_VERBOSE(cdev, NETIF_MSG_IFUP,
2431                                    "CMT - 1 queue per-hwfn; Disabling RSS\n");
2432                         return -EINVAL;
2433                 }
2434                 rss[fn].rss_table_size_log = 6;
2435         }
2436
2437         return 0;
2438 }
2439
2440 static int qed_update_vport(struct qed_dev *cdev,
2441                             struct qed_update_vport_params *params)
2442 {
2443         struct qed_sp_vport_update_params sp_params;
2444         struct qed_rss_params *rss;
2445         int rc = 0, i;
2446
2447         if (!cdev)
2448                 return -ENODEV;
2449
2450         rss = vzalloc(array_size(sizeof(*rss), cdev->num_hwfns));
2451         if (!rss)
2452                 return -ENOMEM;
2453
2454         memset(&sp_params, 0, sizeof(sp_params));
2455
2456         /* Translate protocol params into sp params */
2457         sp_params.vport_id = params->vport_id;
2458         sp_params.update_vport_active_rx_flg = params->update_vport_active_flg;
2459         sp_params.update_vport_active_tx_flg = params->update_vport_active_flg;
2460         sp_params.vport_active_rx_flg = params->vport_active_flg;
2461         sp_params.vport_active_tx_flg = params->vport_active_flg;
2462         sp_params.update_tx_switching_flg = params->update_tx_switching_flg;
2463         sp_params.tx_switching_flg = params->tx_switching_flg;
2464         sp_params.accept_any_vlan = params->accept_any_vlan;
2465         sp_params.update_accept_any_vlan_flg =
2466                 params->update_accept_any_vlan_flg;
2467
2468         /* Prepare the RSS configuration */
2469         if (params->update_rss_flg)
2470                 if (qed_update_vport_rss(cdev, &params->rss_params, rss))
2471                         params->update_rss_flg = 0;
2472
2473         for_each_hwfn(cdev, i) {
2474                 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
2475
2476                 if (params->update_rss_flg)
2477                         sp_params.rss_params = &rss[i];
2478
2479                 sp_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
2480                 rc = qed_sp_vport_update(p_hwfn, &sp_params,
2481                                          QED_SPQ_MODE_EBLOCK,
2482                                          NULL);
2483                 if (rc) {
2484                         DP_ERR(cdev, "Failed to update VPORT\n");
2485                         goto out;
2486                 }
2487
2488                 DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
2489                            "Updated V-PORT %d: active_flag %d [update %d]\n",
2490                            params->vport_id, params->vport_active_flg,
2491                            params->update_vport_active_flg);
2492         }
2493
2494 out:
2495         vfree(rss);
2496         return rc;
2497 }
2498
2499 static int qed_start_rxq(struct qed_dev *cdev,
2500                          u8 rss_num,
2501                          struct qed_queue_start_common_params *p_params,
2502                          u16 bd_max_bytes,
2503                          dma_addr_t bd_chain_phys_addr,
2504                          dma_addr_t cqe_pbl_addr,
2505                          u16 cqe_pbl_size,
2506                          struct qed_rxq_start_ret_params *ret_params)
2507 {
2508         struct qed_hwfn *p_hwfn;
2509         int rc, hwfn_index;
2510
2511         hwfn_index = rss_num % cdev->num_hwfns;
2512         p_hwfn = &cdev->hwfns[hwfn_index];
2513
2514         p_params->queue_id = p_params->queue_id / cdev->num_hwfns;
2515         p_params->stats_id = p_params->vport_id;
2516
2517         rc = qed_eth_rx_queue_start(p_hwfn,
2518                                     p_hwfn->hw_info.opaque_fid,
2519                                     p_params,
2520                                     bd_max_bytes,
2521                                     bd_chain_phys_addr,
2522                                     cqe_pbl_addr, cqe_pbl_size, ret_params);
2523         if (rc) {
2524                 DP_ERR(cdev, "Failed to start RXQ#%d\n", p_params->queue_id);
2525                 return rc;
2526         }
2527
2528         DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
2529                    "Started RX-Q %d [rss_num %d] on V-PORT %d and SB igu %d\n",
2530                    p_params->queue_id, rss_num, p_params->vport_id,
2531                    p_params->p_sb->igu_sb_id);
2532
2533         return 0;
2534 }
2535
2536 static int qed_stop_rxq(struct qed_dev *cdev, u8 rss_id, void *handle)
2537 {
2538         int rc, hwfn_index;
2539         struct qed_hwfn *p_hwfn;
2540
2541         hwfn_index = rss_id % cdev->num_hwfns;
2542         p_hwfn = &cdev->hwfns[hwfn_index];
2543
2544         rc = qed_eth_rx_queue_stop(p_hwfn, handle, false, false);
2545         if (rc) {
2546                 DP_ERR(cdev, "Failed to stop RXQ#%02x\n", rss_id);
2547                 return rc;
2548         }
2549
2550         return 0;
2551 }
2552
2553 static int qed_start_txq(struct qed_dev *cdev,
2554                          u8 rss_num,
2555                          struct qed_queue_start_common_params *p_params,
2556                          dma_addr_t pbl_addr,
2557                          u16 pbl_size,
2558                          struct qed_txq_start_ret_params *ret_params)
2559 {
2560         struct qed_hwfn *p_hwfn;
2561         int rc, hwfn_index;
2562
2563         hwfn_index = rss_num % cdev->num_hwfns;
2564         p_hwfn = &cdev->hwfns[hwfn_index];
2565         p_params->queue_id = p_params->queue_id / cdev->num_hwfns;
2566         p_params->stats_id = p_params->vport_id;
2567
2568         rc = qed_eth_tx_queue_start(p_hwfn,
2569                                     p_hwfn->hw_info.opaque_fid,
2570                                     p_params, p_params->tc,
2571                                     pbl_addr, pbl_size, ret_params);
2572
2573         if (rc) {
2574                 DP_ERR(cdev, "Failed to start TXQ#%d\n", p_params->queue_id);
2575                 return rc;
2576         }
2577
2578         DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
2579                    "Started TX-Q %d [rss_num %d] on V-PORT %d and SB igu %d\n",
2580                    p_params->queue_id, rss_num, p_params->vport_id,
2581                    p_params->p_sb->igu_sb_id);
2582
2583         return 0;
2584 }
2585
2586 #define QED_HW_STOP_RETRY_LIMIT (10)
2587 static int qed_fastpath_stop(struct qed_dev *cdev)
2588 {
2589         int rc;
2590
2591         rc = qed_hw_stop_fastpath(cdev);
2592         if (rc) {
2593                 DP_ERR(cdev, "Failed to stop Fastpath\n");
2594                 return rc;
2595         }
2596
2597         return 0;
2598 }
2599
2600 static int qed_stop_txq(struct qed_dev *cdev, u8 rss_id, void *handle)
2601 {
2602         struct qed_hwfn *p_hwfn;
2603         int rc, hwfn_index;
2604
2605         hwfn_index = rss_id % cdev->num_hwfns;
2606         p_hwfn = &cdev->hwfns[hwfn_index];
2607
2608         rc = qed_eth_tx_queue_stop(p_hwfn, handle);
2609         if (rc) {
2610                 DP_ERR(cdev, "Failed to stop TXQ#%02x\n", rss_id);
2611                 return rc;
2612         }
2613
2614         return 0;
2615 }
2616
2617 static int qed_tunn_configure(struct qed_dev *cdev,
2618                               struct qed_tunn_params *tunn_params)
2619 {
2620         struct qed_tunnel_info tunn_info;
2621         int i, rc;
2622
2623         memset(&tunn_info, 0, sizeof(tunn_info));
2624         if (tunn_params->update_vxlan_port) {
2625                 tunn_info.vxlan_port.b_update_port = true;
2626                 tunn_info.vxlan_port.port = tunn_params->vxlan_port;
2627         }
2628
2629         if (tunn_params->update_geneve_port) {
2630                 tunn_info.geneve_port.b_update_port = true;
2631                 tunn_info.geneve_port.port = tunn_params->geneve_port;
2632         }
2633
2634         for_each_hwfn(cdev, i) {
2635                 struct qed_hwfn *hwfn = &cdev->hwfns[i];
2636                 struct qed_ptt *p_ptt;
2637                 struct qed_tunnel_info *tun;
2638
2639                 tun = &hwfn->cdev->tunnel;
2640                 if (IS_PF(cdev)) {
2641                         p_ptt = qed_ptt_acquire(hwfn);
2642                         if (!p_ptt)
2643                                 return -EAGAIN;
2644                 } else {
2645                         p_ptt = NULL;
2646                 }
2647
2648                 rc = qed_sp_pf_update_tunn_cfg(hwfn, p_ptt, &tunn_info,
2649                                                QED_SPQ_MODE_EBLOCK, NULL);
2650                 if (rc) {
2651                         if (IS_PF(cdev))
2652                                 qed_ptt_release(hwfn, p_ptt);
2653                         return rc;
2654                 }
2655
2656                 if (IS_PF_SRIOV(hwfn)) {
2657                         u16 vxlan_port, geneve_port;
2658                         int j;
2659
2660                         vxlan_port = tun->vxlan_port.port;
2661                         geneve_port = tun->geneve_port.port;
2662
2663                         qed_for_each_vf(hwfn, j) {
2664                                 qed_iov_bulletin_set_udp_ports(hwfn, j,
2665                                                                vxlan_port,
2666                                                                geneve_port);
2667                         }
2668
2669                         qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG);
2670                 }
2671                 if (IS_PF(cdev))
2672                         qed_ptt_release(hwfn, p_ptt);
2673         }
2674
2675         return 0;
2676 }
2677
2678 static int qed_configure_filter_rx_mode(struct qed_dev *cdev,
2679                                         enum qed_filter_rx_mode_type type)
2680 {
2681         struct qed_filter_accept_flags accept_flags;
2682
2683         memset(&accept_flags, 0, sizeof(accept_flags));
2684
2685         accept_flags.update_rx_mode_config = 1;
2686         accept_flags.update_tx_mode_config = 1;
2687         accept_flags.rx_accept_filter = QED_ACCEPT_UCAST_MATCHED |
2688                                         QED_ACCEPT_MCAST_MATCHED |
2689                                         QED_ACCEPT_BCAST;
2690         accept_flags.tx_accept_filter = QED_ACCEPT_UCAST_MATCHED |
2691                                         QED_ACCEPT_MCAST_MATCHED |
2692                                         QED_ACCEPT_BCAST;
2693
2694         if (type == QED_FILTER_RX_MODE_TYPE_PROMISC) {
2695                 accept_flags.rx_accept_filter |= QED_ACCEPT_UCAST_UNMATCHED |
2696                                                  QED_ACCEPT_MCAST_UNMATCHED;
2697                 accept_flags.tx_accept_filter |= QED_ACCEPT_UCAST_UNMATCHED |
2698                                                  QED_ACCEPT_MCAST_UNMATCHED;
2699         } else if (type == QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC) {
2700                 accept_flags.rx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED;
2701                 accept_flags.tx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED;
2702         }
2703
2704         return qed_filter_accept_cmd(cdev, 0, accept_flags, false, false,
2705                                      QED_SPQ_MODE_CB, NULL);
2706 }
2707
2708 static int qed_configure_filter_ucast(struct qed_dev *cdev,
2709                                       struct qed_filter_ucast_params *params)
2710 {
2711         struct qed_filter_ucast ucast;
2712
2713         if (!params->vlan_valid && !params->mac_valid) {
2714                 DP_NOTICE(cdev,
2715                           "Tried configuring a unicast filter, but both MAC and VLAN are not set\n");
2716                 return -EINVAL;
2717         }
2718
2719         memset(&ucast, 0, sizeof(ucast));
2720         switch (params->type) {
2721         case QED_FILTER_XCAST_TYPE_ADD:
2722                 ucast.opcode = QED_FILTER_ADD;
2723                 break;
2724         case QED_FILTER_XCAST_TYPE_DEL:
2725                 ucast.opcode = QED_FILTER_REMOVE;
2726                 break;
2727         case QED_FILTER_XCAST_TYPE_REPLACE:
2728                 ucast.opcode = QED_FILTER_REPLACE;
2729                 break;
2730         default:
2731                 DP_NOTICE(cdev, "Unknown unicast filter type %d\n",
2732                           params->type);
2733         }
2734
2735         if (params->vlan_valid && params->mac_valid) {
2736                 ucast.type = QED_FILTER_MAC_VLAN;
2737                 ether_addr_copy(ucast.mac, params->mac);
2738                 ucast.vlan = params->vlan;
2739         } else if (params->mac_valid) {
2740                 ucast.type = QED_FILTER_MAC;
2741                 ether_addr_copy(ucast.mac, params->mac);
2742         } else {
2743                 ucast.type = QED_FILTER_VLAN;
2744                 ucast.vlan = params->vlan;
2745         }
2746
2747         ucast.is_rx_filter = true;
2748         ucast.is_tx_filter = true;
2749
2750         return qed_filter_ucast_cmd(cdev, &ucast, QED_SPQ_MODE_CB, NULL);
2751 }
2752
2753 static int qed_configure_filter_mcast(struct qed_dev *cdev,
2754                                       struct qed_filter_mcast_params *params)
2755 {
2756         struct qed_filter_mcast mcast;
2757         int i;
2758
2759         memset(&mcast, 0, sizeof(mcast));
2760         switch (params->type) {
2761         case QED_FILTER_XCAST_TYPE_ADD:
2762                 mcast.opcode = QED_FILTER_ADD;
2763                 break;
2764         case QED_FILTER_XCAST_TYPE_DEL:
2765                 mcast.opcode = QED_FILTER_REMOVE;
2766                 break;
2767         default:
2768                 DP_NOTICE(cdev, "Unknown multicast filter type %d\n",
2769                           params->type);
2770         }
2771
2772         mcast.num_mc_addrs = params->num;
2773         for (i = 0; i < mcast.num_mc_addrs; i++)
2774                 ether_addr_copy(mcast.mac[i], params->mac[i]);
2775
2776         return qed_filter_mcast_cmd(cdev, &mcast, QED_SPQ_MODE_CB, NULL);
2777 }
2778
2779 static int qed_configure_filter(struct qed_dev *cdev,
2780                                 struct qed_filter_params *params)
2781 {
2782         enum qed_filter_rx_mode_type accept_flags;
2783
2784         switch (params->type) {
2785         case QED_FILTER_TYPE_UCAST:
2786                 return qed_configure_filter_ucast(cdev, &params->filter.ucast);
2787         case QED_FILTER_TYPE_MCAST:
2788                 return qed_configure_filter_mcast(cdev, &params->filter.mcast);
2789         case QED_FILTER_TYPE_RX_MODE:
2790                 accept_flags = params->filter.accept_flags;
2791                 return qed_configure_filter_rx_mode(cdev, accept_flags);
2792         default:
2793                 DP_NOTICE(cdev, "Unknown filter type %d\n", (int)params->type);
2794                 return -EINVAL;
2795         }
2796 }
2797
2798 static int qed_configure_arfs_searcher(struct qed_dev *cdev,
2799                                        enum qed_filter_config_mode mode)
2800 {
2801         struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
2802         struct qed_arfs_config_params arfs_config_params;
2803
2804         memset(&arfs_config_params, 0, sizeof(arfs_config_params));
2805         arfs_config_params.tcp = true;
2806         arfs_config_params.udp = true;
2807         arfs_config_params.ipv4 = true;
2808         arfs_config_params.ipv6 = true;
2809         arfs_config_params.mode = mode;
2810         qed_arfs_mode_configure(p_hwfn, p_hwfn->p_arfs_ptt,
2811                                 &arfs_config_params);
2812         return 0;
2813 }
2814
2815 static void
2816 qed_arfs_sp_response_handler(struct qed_hwfn *p_hwfn,
2817                              void *cookie,
2818                              union event_ring_data *data, u8 fw_return_code)
2819 {
2820         struct qed_common_cb_ops *op = p_hwfn->cdev->protocol_ops.common;
2821         void *dev = p_hwfn->cdev->ops_cookie;
2822
2823         op->arfs_filter_op(dev, cookie, fw_return_code);
2824 }
2825
2826 static int
2827 qed_ntuple_arfs_filter_config(struct qed_dev *cdev,
2828                               void *cookie,
2829                               struct qed_ntuple_filter_params *params)
2830 {
2831         struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
2832         struct qed_spq_comp_cb cb;
2833         int rc = -EINVAL;
2834
2835         cb.function = qed_arfs_sp_response_handler;
2836         cb.cookie = cookie;
2837
2838         if (params->b_is_vf) {
2839                 if (!qed_iov_is_valid_vfid(p_hwfn, params->vf_id, false,
2840                                            false)) {
2841                         DP_INFO(p_hwfn, "vfid 0x%02x is out of bounds\n",
2842                                 params->vf_id);
2843                         return rc;
2844                 }
2845
2846                 params->vport_id = params->vf_id + 1;
2847                 params->qid = QED_RFS_NTUPLE_QID_RSS;
2848         }
2849
2850         rc = qed_configure_rfs_ntuple_filter(p_hwfn, &cb, params);
2851         if (rc)
2852                 DP_NOTICE(p_hwfn,
2853                           "Failed to issue a-RFS filter configuration\n");
2854         else
2855                 DP_VERBOSE(p_hwfn, NETIF_MSG_DRV,
2856                            "Successfully issued a-RFS filter configuration\n");
2857
2858         return rc;
2859 }
2860
2861 static int qed_get_coalesce(struct qed_dev *cdev, u16 *coal, void *handle)
2862 {
2863         struct qed_queue_cid *p_cid = handle;
2864         struct qed_hwfn *p_hwfn;
2865         int rc;
2866
2867         p_hwfn = p_cid->p_owner;
2868         rc = qed_get_queue_coalesce(p_hwfn, coal, handle);
2869         if (rc)
2870                 DP_NOTICE(p_hwfn, "Unable to read queue coalescing\n");
2871
2872         return rc;
2873 }
2874
2875 static int qed_fp_cqe_completion(struct qed_dev *dev,
2876                                  u8 rss_id, struct eth_slow_path_rx_cqe *cqe)
2877 {
2878         return qed_eth_cqe_completion(&dev->hwfns[rss_id % dev->num_hwfns],
2879                                       cqe);
2880 }
2881
2882 static int qed_req_bulletin_update_mac(struct qed_dev *cdev, u8 *mac)
2883 {
2884         int i, ret;
2885
2886         if (IS_PF(cdev))
2887                 return 0;
2888
2889         for_each_hwfn(cdev, i) {
2890                 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
2891
2892                 ret = qed_vf_pf_bulletin_update_mac(p_hwfn, mac);
2893                 if (ret)
2894                         return ret;
2895         }
2896
2897         return 0;
2898 }
2899
2900 #ifdef CONFIG_QED_SRIOV
2901 extern const struct qed_iov_hv_ops qed_iov_ops_pass;
2902 #endif
2903
2904 #ifdef CONFIG_DCB
2905 extern const struct qed_eth_dcbnl_ops qed_dcbnl_ops_pass;
2906 #endif
2907
2908 extern const struct qed_eth_ptp_ops qed_ptp_ops_pass;
2909
2910 static const struct qed_eth_ops qed_eth_ops_pass = {
2911         .common = &qed_common_ops_pass,
2912 #ifdef CONFIG_QED_SRIOV
2913         .iov = &qed_iov_ops_pass,
2914 #endif
2915 #ifdef CONFIG_DCB
2916         .dcb = &qed_dcbnl_ops_pass,
2917 #endif
2918         .ptp = &qed_ptp_ops_pass,
2919         .fill_dev_info = &qed_fill_eth_dev_info,
2920         .register_ops = &qed_register_eth_ops,
2921         .check_mac = &qed_check_mac,
2922         .vport_start = &qed_start_vport,
2923         .vport_stop = &qed_stop_vport,
2924         .vport_update = &qed_update_vport,
2925         .q_rx_start = &qed_start_rxq,
2926         .q_rx_stop = &qed_stop_rxq,
2927         .q_tx_start = &qed_start_txq,
2928         .q_tx_stop = &qed_stop_txq,
2929         .filter_config = &qed_configure_filter,
2930         .fastpath_stop = &qed_fastpath_stop,
2931         .eth_cqe_completion = &qed_fp_cqe_completion,
2932         .get_vport_stats = &qed_get_vport_stats,
2933         .tunn_config = &qed_tunn_configure,
2934         .ntuple_filter_config = &qed_ntuple_arfs_filter_config,
2935         .configure_arfs_searcher = &qed_configure_arfs_searcher,
2936         .get_coalesce = &qed_get_coalesce,
2937         .req_bulletin_update_mac = &qed_req_bulletin_update_mac,
2938 };
2939
2940 const struct qed_eth_ops *qed_get_eth_ops(void)
2941 {
2942         return &qed_eth_ops_pass;
2943 }
2944 EXPORT_SYMBOL(qed_get_eth_ops);
2945
2946 void qed_put_eth_ops(void)
2947 {
2948         /* TODO - reference count for module? */
2949 }
2950 EXPORT_SYMBOL(qed_put_eth_ops);