GNU Linux-libre 5.10.215-gnu1
[releases.git] / drivers / net / ethernet / qlogic / qed / qed_sriov.c
1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
2 /* QLogic qed NIC Driver
3  * Copyright (c) 2015-2017  QLogic Corporation
4  * Copyright (c) 2019-2020 Marvell International Ltd.
5  */
6
7 #include <linux/etherdevice.h>
8 #include <linux/crc32.h>
9 #include <linux/vmalloc.h>
10 #include <linux/crash_dump.h>
11 #include <linux/qed/qed_iov_if.h>
12 #include "qed_cxt.h"
13 #include "qed_hsi.h"
14 #include "qed_hw.h"
15 #include "qed_init_ops.h"
16 #include "qed_int.h"
17 #include "qed_mcp.h"
18 #include "qed_reg_addr.h"
19 #include "qed_sp.h"
20 #include "qed_sriov.h"
21 #include "qed_vf.h"
22 static int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn,
23                                u8 opcode,
24                                __le16 echo,
25                                union event_ring_data *data, u8 fw_return_code);
26 static int qed_iov_bulletin_set_mac(struct qed_hwfn *p_hwfn, u8 *mac, int vfid);
27
28 static u8 qed_vf_calculate_legacy(struct qed_vf_info *p_vf)
29 {
30         u8 legacy = 0;
31
32         if (p_vf->acquire.vfdev_info.eth_fp_hsi_minor ==
33             ETH_HSI_VER_NO_PKT_LEN_TUNN)
34                 legacy |= QED_QCID_LEGACY_VF_RX_PROD;
35
36         if (!(p_vf->acquire.vfdev_info.capabilities &
37               VFPF_ACQUIRE_CAP_QUEUE_QIDS))
38                 legacy |= QED_QCID_LEGACY_VF_CID;
39
40         return legacy;
41 }
42
43 /* IOV ramrods */
44 static int qed_sp_vf_start(struct qed_hwfn *p_hwfn, struct qed_vf_info *p_vf)
45 {
46         struct vf_start_ramrod_data *p_ramrod = NULL;
47         struct qed_spq_entry *p_ent = NULL;
48         struct qed_sp_init_data init_data;
49         int rc = -EINVAL;
50         u8 fp_minor;
51
52         /* Get SPQ entry */
53         memset(&init_data, 0, sizeof(init_data));
54         init_data.cid = qed_spq_get_cid(p_hwfn);
55         init_data.opaque_fid = p_vf->opaque_fid;
56         init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
57
58         rc = qed_sp_init_request(p_hwfn, &p_ent,
59                                  COMMON_RAMROD_VF_START,
60                                  PROTOCOLID_COMMON, &init_data);
61         if (rc)
62                 return rc;
63
64         p_ramrod = &p_ent->ramrod.vf_start;
65
66         p_ramrod->vf_id = GET_FIELD(p_vf->concrete_fid, PXP_CONCRETE_FID_VFID);
67         p_ramrod->opaque_fid = cpu_to_le16(p_vf->opaque_fid);
68
69         switch (p_hwfn->hw_info.personality) {
70         case QED_PCI_ETH:
71                 p_ramrod->personality = PERSONALITY_ETH;
72                 break;
73         case QED_PCI_ETH_ROCE:
74         case QED_PCI_ETH_IWARP:
75                 p_ramrod->personality = PERSONALITY_RDMA_AND_ETH;
76                 break;
77         default:
78                 DP_NOTICE(p_hwfn, "Unknown VF personality %d\n",
79                           p_hwfn->hw_info.personality);
80                 qed_sp_destroy_request(p_hwfn, p_ent);
81                 return -EINVAL;
82         }
83
84         fp_minor = p_vf->acquire.vfdev_info.eth_fp_hsi_minor;
85         if (fp_minor > ETH_HSI_VER_MINOR &&
86             fp_minor != ETH_HSI_VER_NO_PKT_LEN_TUNN) {
87                 DP_VERBOSE(p_hwfn,
88                            QED_MSG_IOV,
89                            "VF [%d] - Requested fp hsi %02x.%02x which is slightly newer than PF's %02x.%02x; Configuring PFs version\n",
90                            p_vf->abs_vf_id,
91                            ETH_HSI_VER_MAJOR,
92                            fp_minor, ETH_HSI_VER_MAJOR, ETH_HSI_VER_MINOR);
93                 fp_minor = ETH_HSI_VER_MINOR;
94         }
95
96         p_ramrod->hsi_fp_ver.major_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MAJOR;
97         p_ramrod->hsi_fp_ver.minor_ver_arr[ETH_VER_KEY] = fp_minor;
98
99         DP_VERBOSE(p_hwfn, QED_MSG_IOV,
100                    "VF[%d] - Starting using HSI %02x.%02x\n",
101                    p_vf->abs_vf_id, ETH_HSI_VER_MAJOR, fp_minor);
102
103         return qed_spq_post(p_hwfn, p_ent, NULL);
104 }
105
106 static int qed_sp_vf_stop(struct qed_hwfn *p_hwfn,
107                           u32 concrete_vfid, u16 opaque_vfid)
108 {
109         struct vf_stop_ramrod_data *p_ramrod = NULL;
110         struct qed_spq_entry *p_ent = NULL;
111         struct qed_sp_init_data init_data;
112         int rc = -EINVAL;
113
114         /* Get SPQ entry */
115         memset(&init_data, 0, sizeof(init_data));
116         init_data.cid = qed_spq_get_cid(p_hwfn);
117         init_data.opaque_fid = opaque_vfid;
118         init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
119
120         rc = qed_sp_init_request(p_hwfn, &p_ent,
121                                  COMMON_RAMROD_VF_STOP,
122                                  PROTOCOLID_COMMON, &init_data);
123         if (rc)
124                 return rc;
125
126         p_ramrod = &p_ent->ramrod.vf_stop;
127
128         p_ramrod->vf_id = GET_FIELD(concrete_vfid, PXP_CONCRETE_FID_VFID);
129
130         return qed_spq_post(p_hwfn, p_ent, NULL);
131 }
132
133 bool qed_iov_is_valid_vfid(struct qed_hwfn *p_hwfn,
134                            int rel_vf_id,
135                            bool b_enabled_only, bool b_non_malicious)
136 {
137         if (!p_hwfn->pf_iov_info) {
138                 DP_NOTICE(p_hwfn->cdev, "No iov info\n");
139                 return false;
140         }
141
142         if ((rel_vf_id >= p_hwfn->cdev->p_iov_info->total_vfs) ||
143             (rel_vf_id < 0))
144                 return false;
145
146         if ((!p_hwfn->pf_iov_info->vfs_array[rel_vf_id].b_init) &&
147             b_enabled_only)
148                 return false;
149
150         if ((p_hwfn->pf_iov_info->vfs_array[rel_vf_id].b_malicious) &&
151             b_non_malicious)
152                 return false;
153
154         return true;
155 }
156
157 static struct qed_vf_info *qed_iov_get_vf_info(struct qed_hwfn *p_hwfn,
158                                                u16 relative_vf_id,
159                                                bool b_enabled_only)
160 {
161         struct qed_vf_info *vf = NULL;
162
163         if (!p_hwfn->pf_iov_info) {
164                 DP_NOTICE(p_hwfn->cdev, "No iov info\n");
165                 return NULL;
166         }
167
168         if (qed_iov_is_valid_vfid(p_hwfn, relative_vf_id,
169                                   b_enabled_only, false))
170                 vf = &p_hwfn->pf_iov_info->vfs_array[relative_vf_id];
171         else
172                 DP_ERR(p_hwfn, "qed_iov_get_vf_info: VF[%d] is not enabled\n",
173                        relative_vf_id);
174
175         return vf;
176 }
177
178 static struct qed_queue_cid *
179 qed_iov_get_vf_rx_queue_cid(struct qed_vf_queue *p_queue)
180 {
181         int i;
182
183         for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
184                 if (p_queue->cids[i].p_cid && !p_queue->cids[i].b_is_tx)
185                         return p_queue->cids[i].p_cid;
186         }
187
188         return NULL;
189 }
190
191 enum qed_iov_validate_q_mode {
192         QED_IOV_VALIDATE_Q_NA,
193         QED_IOV_VALIDATE_Q_ENABLE,
194         QED_IOV_VALIDATE_Q_DISABLE,
195 };
196
197 static bool qed_iov_validate_queue_mode(struct qed_hwfn *p_hwfn,
198                                         struct qed_vf_info *p_vf,
199                                         u16 qid,
200                                         enum qed_iov_validate_q_mode mode,
201                                         bool b_is_tx)
202 {
203         int i;
204
205         if (mode == QED_IOV_VALIDATE_Q_NA)
206                 return true;
207
208         for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
209                 struct qed_vf_queue_cid *p_qcid;
210
211                 p_qcid = &p_vf->vf_queues[qid].cids[i];
212
213                 if (!p_qcid->p_cid)
214                         continue;
215
216                 if (p_qcid->b_is_tx != b_is_tx)
217                         continue;
218
219                 return mode == QED_IOV_VALIDATE_Q_ENABLE;
220         }
221
222         /* In case we haven't found any valid cid, then its disabled */
223         return mode == QED_IOV_VALIDATE_Q_DISABLE;
224 }
225
226 static bool qed_iov_validate_rxq(struct qed_hwfn *p_hwfn,
227                                  struct qed_vf_info *p_vf,
228                                  u16 rx_qid,
229                                  enum qed_iov_validate_q_mode mode)
230 {
231         if (rx_qid >= p_vf->num_rxqs) {
232                 DP_VERBOSE(p_hwfn,
233                            QED_MSG_IOV,
234                            "VF[0x%02x] - can't touch Rx queue[%04x]; Only 0x%04x are allocated\n",
235                            p_vf->abs_vf_id, rx_qid, p_vf->num_rxqs);
236                 return false;
237         }
238
239         return qed_iov_validate_queue_mode(p_hwfn, p_vf, rx_qid, mode, false);
240 }
241
242 static bool qed_iov_validate_txq(struct qed_hwfn *p_hwfn,
243                                  struct qed_vf_info *p_vf,
244                                  u16 tx_qid,
245                                  enum qed_iov_validate_q_mode mode)
246 {
247         if (tx_qid >= p_vf->num_txqs) {
248                 DP_VERBOSE(p_hwfn,
249                            QED_MSG_IOV,
250                            "VF[0x%02x] - can't touch Tx queue[%04x]; Only 0x%04x are allocated\n",
251                            p_vf->abs_vf_id, tx_qid, p_vf->num_txqs);
252                 return false;
253         }
254
255         return qed_iov_validate_queue_mode(p_hwfn, p_vf, tx_qid, mode, true);
256 }
257
258 static bool qed_iov_validate_sb(struct qed_hwfn *p_hwfn,
259                                 struct qed_vf_info *p_vf, u16 sb_idx)
260 {
261         int i;
262
263         for (i = 0; i < p_vf->num_sbs; i++)
264                 if (p_vf->igu_sbs[i] == sb_idx)
265                         return true;
266
267         DP_VERBOSE(p_hwfn,
268                    QED_MSG_IOV,
269                    "VF[0%02x] - tried using sb_idx %04x which doesn't exist as one of its 0x%02x SBs\n",
270                    p_vf->abs_vf_id, sb_idx, p_vf->num_sbs);
271
272         return false;
273 }
274
275 static bool qed_iov_validate_active_rxq(struct qed_hwfn *p_hwfn,
276                                         struct qed_vf_info *p_vf)
277 {
278         u8 i;
279
280         for (i = 0; i < p_vf->num_rxqs; i++)
281                 if (qed_iov_validate_queue_mode(p_hwfn, p_vf, i,
282                                                 QED_IOV_VALIDATE_Q_ENABLE,
283                                                 false))
284                         return true;
285
286         return false;
287 }
288
289 static bool qed_iov_validate_active_txq(struct qed_hwfn *p_hwfn,
290                                         struct qed_vf_info *p_vf)
291 {
292         u8 i;
293
294         for (i = 0; i < p_vf->num_txqs; i++)
295                 if (qed_iov_validate_queue_mode(p_hwfn, p_vf, i,
296                                                 QED_IOV_VALIDATE_Q_ENABLE,
297                                                 true))
298                         return true;
299
300         return false;
301 }
302
303 static int qed_iov_post_vf_bulletin(struct qed_hwfn *p_hwfn,
304                                     int vfid, struct qed_ptt *p_ptt)
305 {
306         struct qed_bulletin_content *p_bulletin;
307         int crc_size = sizeof(p_bulletin->crc);
308         struct qed_dmae_params params;
309         struct qed_vf_info *p_vf;
310
311         p_vf = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
312         if (!p_vf)
313                 return -EINVAL;
314
315         if (!p_vf->vf_bulletin)
316                 return -EINVAL;
317
318         p_bulletin = p_vf->bulletin.p_virt;
319
320         /* Increment bulletin board version and compute crc */
321         p_bulletin->version++;
322         p_bulletin->crc = crc32(0, (u8 *)p_bulletin + crc_size,
323                                 p_vf->bulletin.size - crc_size);
324
325         DP_VERBOSE(p_hwfn, QED_MSG_IOV,
326                    "Posting Bulletin 0x%08x to VF[%d] (CRC 0x%08x)\n",
327                    p_bulletin->version, p_vf->relative_vf_id, p_bulletin->crc);
328
329         /* propagate bulletin board via dmae to vm memory */
330         memset(&params, 0, sizeof(params));
331         SET_FIELD(params.flags, QED_DMAE_PARAMS_DST_VF_VALID, 0x1);
332         params.dst_vfid = p_vf->abs_vf_id;
333         return qed_dmae_host2host(p_hwfn, p_ptt, p_vf->bulletin.phys,
334                                   p_vf->vf_bulletin, p_vf->bulletin.size / 4,
335                                   &params);
336 }
337
338 static int qed_iov_pci_cfg_info(struct qed_dev *cdev)
339 {
340         struct qed_hw_sriov_info *iov = cdev->p_iov_info;
341         int pos = iov->pos;
342
343         DP_VERBOSE(cdev, QED_MSG_IOV, "sriov ext pos %d\n", pos);
344         pci_read_config_word(cdev->pdev, pos + PCI_SRIOV_CTRL, &iov->ctrl);
345
346         pci_read_config_word(cdev->pdev,
347                              pos + PCI_SRIOV_TOTAL_VF, &iov->total_vfs);
348         pci_read_config_word(cdev->pdev,
349                              pos + PCI_SRIOV_INITIAL_VF, &iov->initial_vfs);
350
351         pci_read_config_word(cdev->pdev, pos + PCI_SRIOV_NUM_VF, &iov->num_vfs);
352         if (iov->num_vfs) {
353                 DP_VERBOSE(cdev,
354                            QED_MSG_IOV,
355                            "Number of VFs are already set to non-zero value. Ignoring PCI configuration value\n");
356                 iov->num_vfs = 0;
357         }
358
359         pci_read_config_word(cdev->pdev,
360                              pos + PCI_SRIOV_VF_OFFSET, &iov->offset);
361
362         pci_read_config_word(cdev->pdev,
363                              pos + PCI_SRIOV_VF_STRIDE, &iov->stride);
364
365         pci_read_config_word(cdev->pdev,
366                              pos + PCI_SRIOV_VF_DID, &iov->vf_device_id);
367
368         pci_read_config_dword(cdev->pdev,
369                               pos + PCI_SRIOV_SUP_PGSIZE, &iov->pgsz);
370
371         pci_read_config_dword(cdev->pdev, pos + PCI_SRIOV_CAP, &iov->cap);
372
373         pci_read_config_byte(cdev->pdev, pos + PCI_SRIOV_FUNC_LINK, &iov->link);
374
375         DP_VERBOSE(cdev,
376                    QED_MSG_IOV,
377                    "IOV info: nres %d, cap 0x%x, ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d, stride %d, page size 0x%x\n",
378                    iov->nres,
379                    iov->cap,
380                    iov->ctrl,
381                    iov->total_vfs,
382                    iov->initial_vfs,
383                    iov->nr_virtfn, iov->offset, iov->stride, iov->pgsz);
384
385         /* Some sanity checks */
386         if (iov->num_vfs > NUM_OF_VFS(cdev) ||
387             iov->total_vfs > NUM_OF_VFS(cdev)) {
388                 /* This can happen only due to a bug. In this case we set
389                  * num_vfs to zero to avoid memory corruption in the code that
390                  * assumes max number of vfs
391                  */
392                 DP_NOTICE(cdev,
393                           "IOV: Unexpected number of vfs set: %d setting num_vf to zero\n",
394                           iov->num_vfs);
395
396                 iov->num_vfs = 0;
397                 iov->total_vfs = 0;
398         }
399
400         return 0;
401 }
402
403 static void qed_iov_setup_vfdb(struct qed_hwfn *p_hwfn)
404 {
405         struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info;
406         struct qed_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
407         struct qed_bulletin_content *p_bulletin_virt;
408         dma_addr_t req_p, rply_p, bulletin_p;
409         union pfvf_tlvs *p_reply_virt_addr;
410         union vfpf_tlvs *p_req_virt_addr;
411         u8 idx = 0;
412
413         memset(p_iov_info->vfs_array, 0, sizeof(p_iov_info->vfs_array));
414
415         p_req_virt_addr = p_iov_info->mbx_msg_virt_addr;
416         req_p = p_iov_info->mbx_msg_phys_addr;
417         p_reply_virt_addr = p_iov_info->mbx_reply_virt_addr;
418         rply_p = p_iov_info->mbx_reply_phys_addr;
419         p_bulletin_virt = p_iov_info->p_bulletins;
420         bulletin_p = p_iov_info->bulletins_phys;
421         if (!p_req_virt_addr || !p_reply_virt_addr || !p_bulletin_virt) {
422                 DP_ERR(p_hwfn,
423                        "qed_iov_setup_vfdb called without allocating mem first\n");
424                 return;
425         }
426
427         for (idx = 0; idx < p_iov->total_vfs; idx++) {
428                 struct qed_vf_info *vf = &p_iov_info->vfs_array[idx];
429                 u32 concrete;
430
431                 vf->vf_mbx.req_virt = p_req_virt_addr + idx;
432                 vf->vf_mbx.req_phys = req_p + idx * sizeof(union vfpf_tlvs);
433                 vf->vf_mbx.reply_virt = p_reply_virt_addr + idx;
434                 vf->vf_mbx.reply_phys = rply_p + idx * sizeof(union pfvf_tlvs);
435
436                 vf->state = VF_STOPPED;
437                 vf->b_init = false;
438
439                 vf->bulletin.phys = idx *
440                                     sizeof(struct qed_bulletin_content) +
441                                     bulletin_p;
442                 vf->bulletin.p_virt = p_bulletin_virt + idx;
443                 vf->bulletin.size = sizeof(struct qed_bulletin_content);
444
445                 vf->relative_vf_id = idx;
446                 vf->abs_vf_id = idx + p_iov->first_vf_in_pf;
447                 concrete = qed_vfid_to_concrete(p_hwfn, vf->abs_vf_id);
448                 vf->concrete_fid = concrete;
449                 vf->opaque_fid = (p_hwfn->hw_info.opaque_fid & 0xff) |
450                                  (vf->abs_vf_id << 8);
451                 vf->vport_id = idx + 1;
452
453                 vf->num_mac_filters = QED_ETH_VF_NUM_MAC_FILTERS;
454                 vf->num_vlan_filters = QED_ETH_VF_NUM_VLAN_FILTERS;
455         }
456 }
457
458 static int qed_iov_allocate_vfdb(struct qed_hwfn *p_hwfn)
459 {
460         struct qed_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
461         void **p_v_addr;
462         u16 num_vfs = 0;
463
464         num_vfs = p_hwfn->cdev->p_iov_info->total_vfs;
465
466         DP_VERBOSE(p_hwfn, QED_MSG_IOV,
467                    "qed_iov_allocate_vfdb for %d VFs\n", num_vfs);
468
469         /* Allocate PF Mailbox buffer (per-VF) */
470         p_iov_info->mbx_msg_size = sizeof(union vfpf_tlvs) * num_vfs;
471         p_v_addr = &p_iov_info->mbx_msg_virt_addr;
472         *p_v_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
473                                        p_iov_info->mbx_msg_size,
474                                        &p_iov_info->mbx_msg_phys_addr,
475                                        GFP_KERNEL);
476         if (!*p_v_addr)
477                 return -ENOMEM;
478
479         /* Allocate PF Mailbox Reply buffer (per-VF) */
480         p_iov_info->mbx_reply_size = sizeof(union pfvf_tlvs) * num_vfs;
481         p_v_addr = &p_iov_info->mbx_reply_virt_addr;
482         *p_v_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
483                                        p_iov_info->mbx_reply_size,
484                                        &p_iov_info->mbx_reply_phys_addr,
485                                        GFP_KERNEL);
486         if (!*p_v_addr)
487                 return -ENOMEM;
488
489         p_iov_info->bulletins_size = sizeof(struct qed_bulletin_content) *
490                                      num_vfs;
491         p_v_addr = &p_iov_info->p_bulletins;
492         *p_v_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
493                                        p_iov_info->bulletins_size,
494                                        &p_iov_info->bulletins_phys,
495                                        GFP_KERNEL);
496         if (!*p_v_addr)
497                 return -ENOMEM;
498
499         DP_VERBOSE(p_hwfn,
500                    QED_MSG_IOV,
501                    "PF's Requests mailbox [%p virt 0x%llx phys],  Response mailbox [%p virt 0x%llx phys] Bulletins [%p virt 0x%llx phys]\n",
502                    p_iov_info->mbx_msg_virt_addr,
503                    (u64) p_iov_info->mbx_msg_phys_addr,
504                    p_iov_info->mbx_reply_virt_addr,
505                    (u64) p_iov_info->mbx_reply_phys_addr,
506                    p_iov_info->p_bulletins, (u64) p_iov_info->bulletins_phys);
507
508         return 0;
509 }
510
511 static void qed_iov_free_vfdb(struct qed_hwfn *p_hwfn)
512 {
513         struct qed_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
514
515         if (p_hwfn->pf_iov_info->mbx_msg_virt_addr)
516                 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
517                                   p_iov_info->mbx_msg_size,
518                                   p_iov_info->mbx_msg_virt_addr,
519                                   p_iov_info->mbx_msg_phys_addr);
520
521         if (p_hwfn->pf_iov_info->mbx_reply_virt_addr)
522                 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
523                                   p_iov_info->mbx_reply_size,
524                                   p_iov_info->mbx_reply_virt_addr,
525                                   p_iov_info->mbx_reply_phys_addr);
526
527         if (p_iov_info->p_bulletins)
528                 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
529                                   p_iov_info->bulletins_size,
530                                   p_iov_info->p_bulletins,
531                                   p_iov_info->bulletins_phys);
532 }
533
534 int qed_iov_alloc(struct qed_hwfn *p_hwfn)
535 {
536         struct qed_pf_iov *p_sriov;
537
538         if (!IS_PF_SRIOV(p_hwfn)) {
539                 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
540                            "No SR-IOV - no need for IOV db\n");
541                 return 0;
542         }
543
544         p_sriov = kzalloc(sizeof(*p_sriov), GFP_KERNEL);
545         if (!p_sriov)
546                 return -ENOMEM;
547
548         p_hwfn->pf_iov_info = p_sriov;
549
550         qed_spq_register_async_cb(p_hwfn, PROTOCOLID_COMMON,
551                                   qed_sriov_eqe_event);
552
553         return qed_iov_allocate_vfdb(p_hwfn);
554 }
555
556 void qed_iov_setup(struct qed_hwfn *p_hwfn)
557 {
558         if (!IS_PF_SRIOV(p_hwfn) || !IS_PF_SRIOV_ALLOC(p_hwfn))
559                 return;
560
561         qed_iov_setup_vfdb(p_hwfn);
562 }
563
564 void qed_iov_free(struct qed_hwfn *p_hwfn)
565 {
566         qed_spq_unregister_async_cb(p_hwfn, PROTOCOLID_COMMON);
567
568         if (IS_PF_SRIOV_ALLOC(p_hwfn)) {
569                 qed_iov_free_vfdb(p_hwfn);
570                 kfree(p_hwfn->pf_iov_info);
571         }
572 }
573
574 void qed_iov_free_hw_info(struct qed_dev *cdev)
575 {
576         kfree(cdev->p_iov_info);
577         cdev->p_iov_info = NULL;
578 }
579
580 int qed_iov_hw_info(struct qed_hwfn *p_hwfn)
581 {
582         struct qed_dev *cdev = p_hwfn->cdev;
583         int pos;
584         int rc;
585
586         if (is_kdump_kernel())
587                 return 0;
588
589         if (IS_VF(p_hwfn->cdev))
590                 return 0;
591
592         /* Learn the PCI configuration */
593         pos = pci_find_ext_capability(p_hwfn->cdev->pdev,
594                                       PCI_EXT_CAP_ID_SRIOV);
595         if (!pos) {
596                 DP_VERBOSE(p_hwfn, QED_MSG_IOV, "No PCIe IOV support\n");
597                 return 0;
598         }
599
600         /* Allocate a new struct for IOV information */
601         cdev->p_iov_info = kzalloc(sizeof(*cdev->p_iov_info), GFP_KERNEL);
602         if (!cdev->p_iov_info)
603                 return -ENOMEM;
604
605         cdev->p_iov_info->pos = pos;
606
607         rc = qed_iov_pci_cfg_info(cdev);
608         if (rc)
609                 return rc;
610
611         /* We want PF IOV to be synonemous with the existance of p_iov_info;
612          * In case the capability is published but there are no VFs, simply
613          * de-allocate the struct.
614          */
615         if (!cdev->p_iov_info->total_vfs) {
616                 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
617                            "IOV capabilities, but no VFs are published\n");
618                 kfree(cdev->p_iov_info);
619                 cdev->p_iov_info = NULL;
620                 return 0;
621         }
622
623         /* First VF index based on offset is tricky:
624          *  - If ARI is supported [likely], offset - (16 - pf_id) would
625          *    provide the number for eng0. 2nd engine Vfs would begin
626          *    after the first engine's VFs.
627          *  - If !ARI, VFs would start on next device.
628          *    so offset - (256 - pf_id) would provide the number.
629          * Utilize the fact that (256 - pf_id) is achieved only by later
630          * to differentiate between the two.
631          */
632
633         if (p_hwfn->cdev->p_iov_info->offset < (256 - p_hwfn->abs_pf_id)) {
634                 u32 first = p_hwfn->cdev->p_iov_info->offset +
635                             p_hwfn->abs_pf_id - 16;
636
637                 cdev->p_iov_info->first_vf_in_pf = first;
638
639                 if (QED_PATH_ID(p_hwfn))
640                         cdev->p_iov_info->first_vf_in_pf -= MAX_NUM_VFS_BB;
641         } else {
642                 u32 first = p_hwfn->cdev->p_iov_info->offset +
643                             p_hwfn->abs_pf_id - 256;
644
645                 cdev->p_iov_info->first_vf_in_pf = first;
646         }
647
648         DP_VERBOSE(p_hwfn, QED_MSG_IOV,
649                    "First VF in hwfn 0x%08x\n",
650                    cdev->p_iov_info->first_vf_in_pf);
651
652         return 0;
653 }
654
655 static bool _qed_iov_pf_sanity_check(struct qed_hwfn *p_hwfn,
656                                      int vfid, bool b_fail_malicious)
657 {
658         /* Check PF supports sriov */
659         if (IS_VF(p_hwfn->cdev) || !IS_QED_SRIOV(p_hwfn->cdev) ||
660             !IS_PF_SRIOV_ALLOC(p_hwfn))
661                 return false;
662
663         /* Check VF validity */
664         if (!qed_iov_is_valid_vfid(p_hwfn, vfid, true, b_fail_malicious))
665                 return false;
666
667         return true;
668 }
669
670 static bool qed_iov_pf_sanity_check(struct qed_hwfn *p_hwfn, int vfid)
671 {
672         return _qed_iov_pf_sanity_check(p_hwfn, vfid, true);
673 }
674
675 static void qed_iov_set_vf_to_disable(struct qed_dev *cdev,
676                                       u16 rel_vf_id, u8 to_disable)
677 {
678         struct qed_vf_info *vf;
679         int i;
680
681         for_each_hwfn(cdev, i) {
682                 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
683
684                 vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, false);
685                 if (!vf)
686                         continue;
687
688                 vf->to_disable = to_disable;
689         }
690 }
691
692 static void qed_iov_set_vfs_to_disable(struct qed_dev *cdev, u8 to_disable)
693 {
694         u16 i;
695
696         if (!IS_QED_SRIOV(cdev))
697                 return;
698
699         for (i = 0; i < cdev->p_iov_info->total_vfs; i++)
700                 qed_iov_set_vf_to_disable(cdev, i, to_disable);
701 }
702
703 static void qed_iov_vf_pglue_clear_err(struct qed_hwfn *p_hwfn,
704                                        struct qed_ptt *p_ptt, u8 abs_vfid)
705 {
706         qed_wr(p_hwfn, p_ptt,
707                PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR + (abs_vfid >> 5) * 4,
708                1 << (abs_vfid & 0x1f));
709 }
710
711 static void qed_iov_vf_igu_reset(struct qed_hwfn *p_hwfn,
712                                  struct qed_ptt *p_ptt, struct qed_vf_info *vf)
713 {
714         int i;
715
716         /* Set VF masks and configuration - pretend */
717         qed_fid_pretend(p_hwfn, p_ptt, (u16) vf->concrete_fid);
718
719         qed_wr(p_hwfn, p_ptt, IGU_REG_STATISTIC_NUM_VF_MSG_SENT, 0);
720
721         /* unpretend */
722         qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid);
723
724         /* iterate over all queues, clear sb consumer */
725         for (i = 0; i < vf->num_sbs; i++)
726                 qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt,
727                                                 vf->igu_sbs[i],
728                                                 vf->opaque_fid, true);
729 }
730
731 static void qed_iov_vf_igu_set_int(struct qed_hwfn *p_hwfn,
732                                    struct qed_ptt *p_ptt,
733                                    struct qed_vf_info *vf, bool enable)
734 {
735         u32 igu_vf_conf;
736
737         qed_fid_pretend(p_hwfn, p_ptt, (u16) vf->concrete_fid);
738
739         igu_vf_conf = qed_rd(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION);
740
741         if (enable)
742                 igu_vf_conf |= IGU_VF_CONF_MSI_MSIX_EN;
743         else
744                 igu_vf_conf &= ~IGU_VF_CONF_MSI_MSIX_EN;
745
746         qed_wr(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION, igu_vf_conf);
747
748         /* unpretend */
749         qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid);
750 }
751
752 static int
753 qed_iov_enable_vf_access_msix(struct qed_hwfn *p_hwfn,
754                               struct qed_ptt *p_ptt, u8 abs_vf_id, u8 num_sbs)
755 {
756         u8 current_max = 0;
757         int i;
758
759         /* For AH onward, configuration is per-PF. Find maximum of all
760          * the currently enabled child VFs, and set the number to be that.
761          */
762         if (!QED_IS_BB(p_hwfn->cdev)) {
763                 qed_for_each_vf(p_hwfn, i) {
764                         struct qed_vf_info *p_vf;
765
766                         p_vf = qed_iov_get_vf_info(p_hwfn, (u16)i, true);
767                         if (!p_vf)
768                                 continue;
769
770                         current_max = max_t(u8, current_max, p_vf->num_sbs);
771                 }
772         }
773
774         if (num_sbs > current_max)
775                 return qed_mcp_config_vf_msix(p_hwfn, p_ptt,
776                                               abs_vf_id, num_sbs);
777
778         return 0;
779 }
780
781 static int qed_iov_enable_vf_access(struct qed_hwfn *p_hwfn,
782                                     struct qed_ptt *p_ptt,
783                                     struct qed_vf_info *vf)
784 {
785         u32 igu_vf_conf = IGU_VF_CONF_FUNC_EN;
786         int rc;
787
788         /* It's possible VF was previously considered malicious -
789          * clear the indication even if we're only going to disable VF.
790          */
791         vf->b_malicious = false;
792
793         if (vf->to_disable)
794                 return 0;
795
796         DP_VERBOSE(p_hwfn,
797                    QED_MSG_IOV,
798                    "Enable internal access for vf %x [abs %x]\n",
799                    vf->abs_vf_id, QED_VF_ABS_ID(p_hwfn, vf));
800
801         qed_iov_vf_pglue_clear_err(p_hwfn, p_ptt, QED_VF_ABS_ID(p_hwfn, vf));
802
803         qed_iov_vf_igu_reset(p_hwfn, p_ptt, vf);
804
805         rc = qed_iov_enable_vf_access_msix(p_hwfn, p_ptt,
806                                            vf->abs_vf_id, vf->num_sbs);
807         if (rc)
808                 return rc;
809
810         qed_fid_pretend(p_hwfn, p_ptt, (u16) vf->concrete_fid);
811
812         SET_FIELD(igu_vf_conf, IGU_VF_CONF_PARENT, p_hwfn->rel_pf_id);
813         STORE_RT_REG(p_hwfn, IGU_REG_VF_CONFIGURATION_RT_OFFSET, igu_vf_conf);
814
815         qed_init_run(p_hwfn, p_ptt, PHASE_VF, vf->abs_vf_id,
816                      p_hwfn->hw_info.hw_mode);
817
818         /* unpretend */
819         qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid);
820
821         vf->state = VF_FREE;
822
823         return rc;
824 }
825
826 /**
827  * qed_iov_config_perm_table() - Configure the permission zone table.
828  *
829  * @p_hwfn: HW device data.
830  * @p_ptt: PTT window for writing the registers.
831  * @vf: VF info data.
832  * @enable: The actual permision for this VF.
833  *
834  * In E4, queue zone permission table size is 320x9. There
835  * are 320 VF queues for single engine device (256 for dual
836  * engine device), and each entry has the following format:
837  * {Valid, VF[7:0]}
838  */
839 static void qed_iov_config_perm_table(struct qed_hwfn *p_hwfn,
840                                       struct qed_ptt *p_ptt,
841                                       struct qed_vf_info *vf, u8 enable)
842 {
843         u32 reg_addr, val;
844         u16 qzone_id = 0;
845         int qid;
846
847         for (qid = 0; qid < vf->num_rxqs; qid++) {
848                 qed_fw_l2_queue(p_hwfn, vf->vf_queues[qid].fw_rx_qid,
849                                 &qzone_id);
850
851                 reg_addr = PSWHST_REG_ZONE_PERMISSION_TABLE + qzone_id * 4;
852                 val = enable ? (vf->abs_vf_id | BIT(8)) : 0;
853                 qed_wr(p_hwfn, p_ptt, reg_addr, val);
854         }
855 }
856
857 static void qed_iov_enable_vf_traffic(struct qed_hwfn *p_hwfn,
858                                       struct qed_ptt *p_ptt,
859                                       struct qed_vf_info *vf)
860 {
861         /* Reset vf in IGU - interrupts are still disabled */
862         qed_iov_vf_igu_reset(p_hwfn, p_ptt, vf);
863
864         qed_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 1);
865
866         /* Permission Table */
867         qed_iov_config_perm_table(p_hwfn, p_ptt, vf, true);
868 }
869
870 static u8 qed_iov_alloc_vf_igu_sbs(struct qed_hwfn *p_hwfn,
871                                    struct qed_ptt *p_ptt,
872                                    struct qed_vf_info *vf, u16 num_rx_queues)
873 {
874         struct qed_igu_block *p_block;
875         struct cau_sb_entry sb_entry;
876         int qid = 0;
877         u32 val = 0;
878
879         if (num_rx_queues > p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov)
880                 num_rx_queues = p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov;
881         p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov -= num_rx_queues;
882
883         SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER, vf->abs_vf_id);
884         SET_FIELD(val, IGU_MAPPING_LINE_VALID, 1);
885         SET_FIELD(val, IGU_MAPPING_LINE_PF_VALID, 0);
886
887         for (qid = 0; qid < num_rx_queues; qid++) {
888                 p_block = qed_get_igu_free_sb(p_hwfn, false);
889                 vf->igu_sbs[qid] = p_block->igu_sb_id;
890                 p_block->status &= ~QED_IGU_STATUS_FREE;
891                 SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER, qid);
892
893                 qed_wr(p_hwfn, p_ptt,
894                        IGU_REG_MAPPING_MEMORY +
895                        sizeof(u32) * p_block->igu_sb_id, val);
896
897                 /* Configure igu sb in CAU which were marked valid */
898                 qed_init_cau_sb_entry(p_hwfn, &sb_entry,
899                                       p_hwfn->rel_pf_id, vf->abs_vf_id, 1);
900
901                 qed_dmae_host2grc(p_hwfn, p_ptt,
902                                   (u64)(uintptr_t)&sb_entry,
903                                   CAU_REG_SB_VAR_MEMORY +
904                                   p_block->igu_sb_id * sizeof(u64), 2, NULL);
905         }
906
907         vf->num_sbs = (u8) num_rx_queues;
908
909         return vf->num_sbs;
910 }
911
912 static void qed_iov_free_vf_igu_sbs(struct qed_hwfn *p_hwfn,
913                                     struct qed_ptt *p_ptt,
914                                     struct qed_vf_info *vf)
915 {
916         struct qed_igu_info *p_info = p_hwfn->hw_info.p_igu_info;
917         int idx, igu_id;
918         u32 addr, val;
919
920         /* Invalidate igu CAM lines and mark them as free */
921         for (idx = 0; idx < vf->num_sbs; idx++) {
922                 igu_id = vf->igu_sbs[idx];
923                 addr = IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_id;
924
925                 val = qed_rd(p_hwfn, p_ptt, addr);
926                 SET_FIELD(val, IGU_MAPPING_LINE_VALID, 0);
927                 qed_wr(p_hwfn, p_ptt, addr, val);
928
929                 p_info->entry[igu_id].status |= QED_IGU_STATUS_FREE;
930                 p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov++;
931         }
932
933         vf->num_sbs = 0;
934 }
935
936 static void qed_iov_set_link(struct qed_hwfn *p_hwfn,
937                              u16 vfid,
938                              struct qed_mcp_link_params *params,
939                              struct qed_mcp_link_state *link,
940                              struct qed_mcp_link_capabilities *p_caps)
941 {
942         struct qed_vf_info *p_vf = qed_iov_get_vf_info(p_hwfn,
943                                                        vfid,
944                                                        false);
945         struct qed_bulletin_content *p_bulletin;
946
947         if (!p_vf)
948                 return;
949
950         p_bulletin = p_vf->bulletin.p_virt;
951         p_bulletin->req_autoneg = params->speed.autoneg;
952         p_bulletin->req_adv_speed = params->speed.advertised_speeds;
953         p_bulletin->req_forced_speed = params->speed.forced_speed;
954         p_bulletin->req_autoneg_pause = params->pause.autoneg;
955         p_bulletin->req_forced_rx = params->pause.forced_rx;
956         p_bulletin->req_forced_tx = params->pause.forced_tx;
957         p_bulletin->req_loopback = params->loopback_mode;
958
959         p_bulletin->link_up = link->link_up;
960         p_bulletin->speed = link->speed;
961         p_bulletin->full_duplex = link->full_duplex;
962         p_bulletin->autoneg = link->an;
963         p_bulletin->autoneg_complete = link->an_complete;
964         p_bulletin->parallel_detection = link->parallel_detection;
965         p_bulletin->pfc_enabled = link->pfc_enabled;
966         p_bulletin->partner_adv_speed = link->partner_adv_speed;
967         p_bulletin->partner_tx_flow_ctrl_en = link->partner_tx_flow_ctrl_en;
968         p_bulletin->partner_rx_flow_ctrl_en = link->partner_rx_flow_ctrl_en;
969         p_bulletin->partner_adv_pause = link->partner_adv_pause;
970         p_bulletin->sfp_tx_fault = link->sfp_tx_fault;
971
972         p_bulletin->capability_speed = p_caps->speed_capabilities;
973 }
974
975 static int qed_iov_init_hw_for_vf(struct qed_hwfn *p_hwfn,
976                                   struct qed_ptt *p_ptt,
977                                   struct qed_iov_vf_init_params *p_params)
978 {
979         struct qed_mcp_link_capabilities link_caps;
980         struct qed_mcp_link_params link_params;
981         struct qed_mcp_link_state link_state;
982         u8 num_of_vf_avaiable_chains = 0;
983         struct qed_vf_info *vf = NULL;
984         u16 qid, num_irqs;
985         int rc = 0;
986         u32 cids;
987         u8 i;
988
989         vf = qed_iov_get_vf_info(p_hwfn, p_params->rel_vf_id, false);
990         if (!vf) {
991                 DP_ERR(p_hwfn, "qed_iov_init_hw_for_vf : vf is NULL\n");
992                 return -EINVAL;
993         }
994
995         if (vf->b_init) {
996                 DP_NOTICE(p_hwfn, "VF[%d] is already active.\n",
997                           p_params->rel_vf_id);
998                 return -EINVAL;
999         }
1000
1001         /* Perform sanity checking on the requested queue_id */
1002         for (i = 0; i < p_params->num_queues; i++) {
1003                 u16 min_vf_qzone = FEAT_NUM(p_hwfn, QED_PF_L2_QUE);
1004                 u16 max_vf_qzone = min_vf_qzone +
1005                     FEAT_NUM(p_hwfn, QED_VF_L2_QUE) - 1;
1006
1007                 qid = p_params->req_rx_queue[i];
1008                 if (qid < min_vf_qzone || qid > max_vf_qzone) {
1009                         DP_NOTICE(p_hwfn,
1010                                   "Can't enable Rx qid [%04x] for VF[%d]: qids [0x%04x,...,0x%04x] available\n",
1011                                   qid,
1012                                   p_params->rel_vf_id,
1013                                   min_vf_qzone, max_vf_qzone);
1014                         return -EINVAL;
1015                 }
1016
1017                 qid = p_params->req_tx_queue[i];
1018                 if (qid > max_vf_qzone) {
1019                         DP_NOTICE(p_hwfn,
1020                                   "Can't enable Tx qid [%04x] for VF[%d]: max qid 0x%04x\n",
1021                                   qid, p_params->rel_vf_id, max_vf_qzone);
1022                         return -EINVAL;
1023                 }
1024
1025                 /* If client *really* wants, Tx qid can be shared with PF */
1026                 if (qid < min_vf_qzone)
1027                         DP_VERBOSE(p_hwfn,
1028                                    QED_MSG_IOV,
1029                                    "VF[%d] is using PF qid [0x%04x] for Txq[0x%02x]\n",
1030                                    p_params->rel_vf_id, qid, i);
1031         }
1032
1033         /* Limit number of queues according to number of CIDs */
1034         qed_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_ETH, &cids);
1035         DP_VERBOSE(p_hwfn,
1036                    QED_MSG_IOV,
1037                    "VF[%d] - requesting to initialize for 0x%04x queues [0x%04x CIDs available]\n",
1038                    vf->relative_vf_id, p_params->num_queues, (u16)cids);
1039         num_irqs = min_t(u16, p_params->num_queues, ((u16)cids));
1040
1041         num_of_vf_avaiable_chains = qed_iov_alloc_vf_igu_sbs(p_hwfn,
1042                                                              p_ptt,
1043                                                              vf, num_irqs);
1044         if (!num_of_vf_avaiable_chains) {
1045                 DP_ERR(p_hwfn, "no available igu sbs\n");
1046                 return -ENOMEM;
1047         }
1048
1049         /* Choose queue number and index ranges */
1050         vf->num_rxqs = num_of_vf_avaiable_chains;
1051         vf->num_txqs = num_of_vf_avaiable_chains;
1052
1053         for (i = 0; i < vf->num_rxqs; i++) {
1054                 struct qed_vf_queue *p_queue = &vf->vf_queues[i];
1055
1056                 p_queue->fw_rx_qid = p_params->req_rx_queue[i];
1057                 p_queue->fw_tx_qid = p_params->req_tx_queue[i];
1058
1059                 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
1060                            "VF[%d] - Q[%d] SB %04x, qid [Rx %04x Tx %04x]\n",
1061                            vf->relative_vf_id, i, vf->igu_sbs[i],
1062                            p_queue->fw_rx_qid, p_queue->fw_tx_qid);
1063         }
1064
1065         /* Update the link configuration in bulletin */
1066         memcpy(&link_params, qed_mcp_get_link_params(p_hwfn),
1067                sizeof(link_params));
1068         memcpy(&link_state, qed_mcp_get_link_state(p_hwfn), sizeof(link_state));
1069         memcpy(&link_caps, qed_mcp_get_link_capabilities(p_hwfn),
1070                sizeof(link_caps));
1071         qed_iov_set_link(p_hwfn, p_params->rel_vf_id,
1072                          &link_params, &link_state, &link_caps);
1073
1074         rc = qed_iov_enable_vf_access(p_hwfn, p_ptt, vf);
1075         if (!rc) {
1076                 vf->b_init = true;
1077
1078                 if (IS_LEAD_HWFN(p_hwfn))
1079                         p_hwfn->cdev->p_iov_info->num_vfs++;
1080         }
1081
1082         return rc;
1083 }
1084
1085 static int qed_iov_release_hw_for_vf(struct qed_hwfn *p_hwfn,
1086                                      struct qed_ptt *p_ptt, u16 rel_vf_id)
1087 {
1088         struct qed_mcp_link_capabilities caps;
1089         struct qed_mcp_link_params params;
1090         struct qed_mcp_link_state link;
1091         struct qed_vf_info *vf = NULL;
1092
1093         vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true);
1094         if (!vf) {
1095                 DP_ERR(p_hwfn, "qed_iov_release_hw_for_vf : vf is NULL\n");
1096                 return -EINVAL;
1097         }
1098
1099         if (vf->bulletin.p_virt)
1100                 memset(vf->bulletin.p_virt, 0, sizeof(*vf->bulletin.p_virt));
1101
1102         memset(&vf->p_vf_info, 0, sizeof(vf->p_vf_info));
1103
1104         /* Get the link configuration back in bulletin so
1105          * that when VFs are re-enabled they get the actual
1106          * link configuration.
1107          */
1108         memcpy(&params, qed_mcp_get_link_params(p_hwfn), sizeof(params));
1109         memcpy(&link, qed_mcp_get_link_state(p_hwfn), sizeof(link));
1110         memcpy(&caps, qed_mcp_get_link_capabilities(p_hwfn), sizeof(caps));
1111         qed_iov_set_link(p_hwfn, rel_vf_id, &params, &link, &caps);
1112
1113         /* Forget the VF's acquisition message */
1114         memset(&vf->acquire, 0, sizeof(vf->acquire));
1115
1116         /* disablng interrupts and resetting permission table was done during
1117          * vf-close, however, we could get here without going through vf_close
1118          */
1119         /* Disable Interrupts for VF */
1120         qed_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 0);
1121
1122         /* Reset Permission table */
1123         qed_iov_config_perm_table(p_hwfn, p_ptt, vf, 0);
1124
1125         vf->num_rxqs = 0;
1126         vf->num_txqs = 0;
1127         qed_iov_free_vf_igu_sbs(p_hwfn, p_ptt, vf);
1128
1129         if (vf->b_init) {
1130                 vf->b_init = false;
1131
1132                 if (IS_LEAD_HWFN(p_hwfn))
1133                         p_hwfn->cdev->p_iov_info->num_vfs--;
1134         }
1135
1136         return 0;
1137 }
1138
1139 static bool qed_iov_tlv_supported(u16 tlvtype)
1140 {
1141         return CHANNEL_TLV_NONE < tlvtype && tlvtype < CHANNEL_TLV_MAX;
1142 }
1143
1144 /* place a given tlv on the tlv buffer, continuing current tlv list */
1145 void *qed_add_tlv(struct qed_hwfn *p_hwfn, u8 **offset, u16 type, u16 length)
1146 {
1147         struct channel_tlv *tl = (struct channel_tlv *)*offset;
1148
1149         tl->type = type;
1150         tl->length = length;
1151
1152         /* Offset should keep pointing to next TLV (the end of the last) */
1153         *offset += length;
1154
1155         /* Return a pointer to the start of the added tlv */
1156         return *offset - length;
1157 }
1158
1159 /* list the types and lengths of the tlvs on the buffer */
1160 void qed_dp_tlv_list(struct qed_hwfn *p_hwfn, void *tlvs_list)
1161 {
1162         u16 i = 1, total_length = 0;
1163         struct channel_tlv *tlv;
1164
1165         do {
1166                 tlv = (struct channel_tlv *)((u8 *)tlvs_list + total_length);
1167
1168                 /* output tlv */
1169                 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
1170                            "TLV number %d: type %d, length %d\n",
1171                            i, tlv->type, tlv->length);
1172
1173                 if (tlv->type == CHANNEL_TLV_LIST_END)
1174                         return;
1175
1176                 /* Validate entry - protect against malicious VFs */
1177                 if (!tlv->length) {
1178                         DP_NOTICE(p_hwfn, "TLV of length 0 found\n");
1179                         return;
1180                 }
1181
1182                 total_length += tlv->length;
1183
1184                 if (total_length >= sizeof(struct tlv_buffer_size)) {
1185                         DP_NOTICE(p_hwfn, "TLV ==> Buffer overflow\n");
1186                         return;
1187                 }
1188
1189                 i++;
1190         } while (1);
1191 }
1192
1193 static void qed_iov_send_response(struct qed_hwfn *p_hwfn,
1194                                   struct qed_ptt *p_ptt,
1195                                   struct qed_vf_info *p_vf,
1196                                   u16 length, u8 status)
1197 {
1198         struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx;
1199         struct qed_dmae_params params;
1200         u8 eng_vf_id;
1201
1202         mbx->reply_virt->default_resp.hdr.status = status;
1203
1204         qed_dp_tlv_list(p_hwfn, mbx->reply_virt);
1205
1206         eng_vf_id = p_vf->abs_vf_id;
1207
1208         memset(&params, 0, sizeof(params));
1209         SET_FIELD(params.flags, QED_DMAE_PARAMS_DST_VF_VALID, 0x1);
1210         params.dst_vfid = eng_vf_id;
1211
1212         qed_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys + sizeof(u64),
1213                            mbx->req_virt->first_tlv.reply_address +
1214                            sizeof(u64),
1215                            (sizeof(union pfvf_tlvs) - sizeof(u64)) / 4,
1216                            &params);
1217
1218         /* Once PF copies the rc to the VF, the latter can continue
1219          * and send an additional message. So we have to make sure the
1220          * channel would be re-set to ready prior to that.
1221          */
1222         REG_WR(p_hwfn,
1223                GTT_BAR0_MAP_REG_USDM_RAM +
1224                USTORM_VF_PF_CHANNEL_READY_OFFSET(eng_vf_id), 1);
1225
1226         qed_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys,
1227                            mbx->req_virt->first_tlv.reply_address,
1228                            sizeof(u64) / 4, &params);
1229 }
1230
1231 static u16 qed_iov_vport_to_tlv(struct qed_hwfn *p_hwfn,
1232                                 enum qed_iov_vport_update_flag flag)
1233 {
1234         switch (flag) {
1235         case QED_IOV_VP_UPDATE_ACTIVATE:
1236                 return CHANNEL_TLV_VPORT_UPDATE_ACTIVATE;
1237         case QED_IOV_VP_UPDATE_VLAN_STRIP:
1238                 return CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP;
1239         case QED_IOV_VP_UPDATE_TX_SWITCH:
1240                 return CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH;
1241         case QED_IOV_VP_UPDATE_MCAST:
1242                 return CHANNEL_TLV_VPORT_UPDATE_MCAST;
1243         case QED_IOV_VP_UPDATE_ACCEPT_PARAM:
1244                 return CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM;
1245         case QED_IOV_VP_UPDATE_RSS:
1246                 return CHANNEL_TLV_VPORT_UPDATE_RSS;
1247         case QED_IOV_VP_UPDATE_ACCEPT_ANY_VLAN:
1248                 return CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN;
1249         case QED_IOV_VP_UPDATE_SGE_TPA:
1250                 return CHANNEL_TLV_VPORT_UPDATE_SGE_TPA;
1251         default:
1252                 return 0;
1253         }
1254 }
1255
1256 static u16 qed_iov_prep_vp_update_resp_tlvs(struct qed_hwfn *p_hwfn,
1257                                             struct qed_vf_info *p_vf,
1258                                             struct qed_iov_vf_mbx *p_mbx,
1259                                             u8 status,
1260                                             u16 tlvs_mask, u16 tlvs_accepted)
1261 {
1262         struct pfvf_def_resp_tlv *resp;
1263         u16 size, total_len, i;
1264
1265         memset(p_mbx->reply_virt, 0, sizeof(union pfvf_tlvs));
1266         p_mbx->offset = (u8 *)p_mbx->reply_virt;
1267         size = sizeof(struct pfvf_def_resp_tlv);
1268         total_len = size;
1269
1270         qed_add_tlv(p_hwfn, &p_mbx->offset, CHANNEL_TLV_VPORT_UPDATE, size);
1271
1272         /* Prepare response for all extended tlvs if they are found by PF */
1273         for (i = 0; i < QED_IOV_VP_UPDATE_MAX; i++) {
1274                 if (!(tlvs_mask & BIT(i)))
1275                         continue;
1276
1277                 resp = qed_add_tlv(p_hwfn, &p_mbx->offset,
1278                                    qed_iov_vport_to_tlv(p_hwfn, i), size);
1279
1280                 if (tlvs_accepted & BIT(i))
1281                         resp->hdr.status = status;
1282                 else
1283                         resp->hdr.status = PFVF_STATUS_NOT_SUPPORTED;
1284
1285                 DP_VERBOSE(p_hwfn,
1286                            QED_MSG_IOV,
1287                            "VF[%d] - vport_update response: TLV %d, status %02x\n",
1288                            p_vf->relative_vf_id,
1289                            qed_iov_vport_to_tlv(p_hwfn, i), resp->hdr.status);
1290
1291                 total_len += size;
1292         }
1293
1294         qed_add_tlv(p_hwfn, &p_mbx->offset, CHANNEL_TLV_LIST_END,
1295                     sizeof(struct channel_list_end_tlv));
1296
1297         return total_len;
1298 }
1299
1300 static void qed_iov_prepare_resp(struct qed_hwfn *p_hwfn,
1301                                  struct qed_ptt *p_ptt,
1302                                  struct qed_vf_info *vf_info,
1303                                  u16 type, u16 length, u8 status)
1304 {
1305         struct qed_iov_vf_mbx *mbx = &vf_info->vf_mbx;
1306
1307         mbx->offset = (u8 *)mbx->reply_virt;
1308
1309         qed_add_tlv(p_hwfn, &mbx->offset, type, length);
1310         qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
1311                     sizeof(struct channel_list_end_tlv));
1312
1313         qed_iov_send_response(p_hwfn, p_ptt, vf_info, length, status);
1314 }
1315
1316 static struct
1317 qed_public_vf_info *qed_iov_get_public_vf_info(struct qed_hwfn *p_hwfn,
1318                                                u16 relative_vf_id,
1319                                                bool b_enabled_only)
1320 {
1321         struct qed_vf_info *vf = NULL;
1322
1323         vf = qed_iov_get_vf_info(p_hwfn, relative_vf_id, b_enabled_only);
1324         if (!vf)
1325                 return NULL;
1326
1327         return &vf->p_vf_info;
1328 }
1329
1330 static void qed_iov_clean_vf(struct qed_hwfn *p_hwfn, u8 vfid)
1331 {
1332         struct qed_public_vf_info *vf_info;
1333
1334         vf_info = qed_iov_get_public_vf_info(p_hwfn, vfid, false);
1335
1336         if (!vf_info)
1337                 return;
1338
1339         /* Clear the VF mac */
1340         eth_zero_addr(vf_info->mac);
1341
1342         vf_info->rx_accept_mode = 0;
1343         vf_info->tx_accept_mode = 0;
1344 }
1345
1346 static void qed_iov_vf_cleanup(struct qed_hwfn *p_hwfn,
1347                                struct qed_vf_info *p_vf)
1348 {
1349         u32 i, j;
1350
1351         p_vf->vf_bulletin = 0;
1352         p_vf->vport_instance = 0;
1353         p_vf->configured_features = 0;
1354
1355         /* If VF previously requested less resources, go back to default */
1356         p_vf->num_rxqs = p_vf->num_sbs;
1357         p_vf->num_txqs = p_vf->num_sbs;
1358
1359         p_vf->num_active_rxqs = 0;
1360
1361         for (i = 0; i < QED_MAX_VF_CHAINS_PER_PF; i++) {
1362                 struct qed_vf_queue *p_queue = &p_vf->vf_queues[i];
1363
1364                 for (j = 0; j < MAX_QUEUES_PER_QZONE; j++) {
1365                         if (!p_queue->cids[j].p_cid)
1366                                 continue;
1367
1368                         qed_eth_queue_cid_release(p_hwfn,
1369                                                   p_queue->cids[j].p_cid);
1370                         p_queue->cids[j].p_cid = NULL;
1371                 }
1372         }
1373
1374         memset(&p_vf->shadow_config, 0, sizeof(p_vf->shadow_config));
1375         memset(&p_vf->acquire, 0, sizeof(p_vf->acquire));
1376         qed_iov_clean_vf(p_hwfn, p_vf->relative_vf_id);
1377 }
1378
1379 /* Returns either 0, or log(size) */
1380 static u32 qed_iov_vf_db_bar_size(struct qed_hwfn *p_hwfn,
1381                                   struct qed_ptt *p_ptt)
1382 {
1383         u32 val = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_VF_BAR1_SIZE);
1384
1385         if (val)
1386                 return val + 11;
1387         return 0;
1388 }
1389
1390 static void
1391 qed_iov_vf_mbx_acquire_resc_cids(struct qed_hwfn *p_hwfn,
1392                                  struct qed_ptt *p_ptt,
1393                                  struct qed_vf_info *p_vf,
1394                                  struct vf_pf_resc_request *p_req,
1395                                  struct pf_vf_resc *p_resp)
1396 {
1397         u8 num_vf_cons = p_hwfn->pf_params.eth_pf_params.num_vf_cons;
1398         u8 db_size = qed_db_addr_vf(1, DQ_DEMS_LEGACY) -
1399                      qed_db_addr_vf(0, DQ_DEMS_LEGACY);
1400         u32 bar_size;
1401
1402         p_resp->num_cids = min_t(u8, p_req->num_cids, num_vf_cons);
1403
1404         /* If VF didn't bother asking for QIDs than don't bother limiting
1405          * number of CIDs. The VF doesn't care about the number, and this
1406          * has the likely result of causing an additional acquisition.
1407          */
1408         if (!(p_vf->acquire.vfdev_info.capabilities &
1409               VFPF_ACQUIRE_CAP_QUEUE_QIDS))
1410                 return;
1411
1412         /* If doorbell bar was mapped by VF, limit the VF CIDs to an amount
1413          * that would make sure doorbells for all CIDs fall within the bar.
1414          * If it doesn't, make sure regview window is sufficient.
1415          */
1416         if (p_vf->acquire.vfdev_info.capabilities &
1417             VFPF_ACQUIRE_CAP_PHYSICAL_BAR) {
1418                 bar_size = qed_iov_vf_db_bar_size(p_hwfn, p_ptt);
1419                 if (bar_size)
1420                         bar_size = 1 << bar_size;
1421
1422                 if (p_hwfn->cdev->num_hwfns > 1)
1423                         bar_size /= 2;
1424         } else {
1425                 bar_size = PXP_VF_BAR0_DQ_LENGTH;
1426         }
1427
1428         if (bar_size / db_size < 256)
1429                 p_resp->num_cids = min_t(u8, p_resp->num_cids,
1430                                          (u8)(bar_size / db_size));
1431 }
1432
1433 static u8 qed_iov_vf_mbx_acquire_resc(struct qed_hwfn *p_hwfn,
1434                                       struct qed_ptt *p_ptt,
1435                                       struct qed_vf_info *p_vf,
1436                                       struct vf_pf_resc_request *p_req,
1437                                       struct pf_vf_resc *p_resp)
1438 {
1439         u8 i;
1440
1441         /* Queue related information */
1442         p_resp->num_rxqs = p_vf->num_rxqs;
1443         p_resp->num_txqs = p_vf->num_txqs;
1444         p_resp->num_sbs = p_vf->num_sbs;
1445
1446         for (i = 0; i < p_resp->num_sbs; i++) {
1447                 p_resp->hw_sbs[i].hw_sb_id = p_vf->igu_sbs[i];
1448                 p_resp->hw_sbs[i].sb_qid = 0;
1449         }
1450
1451         /* These fields are filled for backward compatibility.
1452          * Unused by modern vfs.
1453          */
1454         for (i = 0; i < p_resp->num_rxqs; i++) {
1455                 qed_fw_l2_queue(p_hwfn, p_vf->vf_queues[i].fw_rx_qid,
1456                                 (u16 *)&p_resp->hw_qid[i]);
1457                 p_resp->cid[i] = i;
1458         }
1459
1460         /* Filter related information */
1461         p_resp->num_mac_filters = min_t(u8, p_vf->num_mac_filters,
1462                                         p_req->num_mac_filters);
1463         p_resp->num_vlan_filters = min_t(u8, p_vf->num_vlan_filters,
1464                                          p_req->num_vlan_filters);
1465
1466         qed_iov_vf_mbx_acquire_resc_cids(p_hwfn, p_ptt, p_vf, p_req, p_resp);
1467
1468         /* This isn't really needed/enforced, but some legacy VFs might depend
1469          * on the correct filling of this field.
1470          */
1471         p_resp->num_mc_filters = QED_MAX_MC_ADDRS;
1472
1473         /* Validate sufficient resources for VF */
1474         if (p_resp->num_rxqs < p_req->num_rxqs ||
1475             p_resp->num_txqs < p_req->num_txqs ||
1476             p_resp->num_sbs < p_req->num_sbs ||
1477             p_resp->num_mac_filters < p_req->num_mac_filters ||
1478             p_resp->num_vlan_filters < p_req->num_vlan_filters ||
1479             p_resp->num_mc_filters < p_req->num_mc_filters ||
1480             p_resp->num_cids < p_req->num_cids) {
1481                 DP_VERBOSE(p_hwfn,
1482                            QED_MSG_IOV,
1483                            "VF[%d] - Insufficient resources: rxq [%02x/%02x] txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x] vlan [%02x/%02x] mc [%02x/%02x] cids [%02x/%02x]\n",
1484                            p_vf->abs_vf_id,
1485                            p_req->num_rxqs,
1486                            p_resp->num_rxqs,
1487                            p_req->num_rxqs,
1488                            p_resp->num_txqs,
1489                            p_req->num_sbs,
1490                            p_resp->num_sbs,
1491                            p_req->num_mac_filters,
1492                            p_resp->num_mac_filters,
1493                            p_req->num_vlan_filters,
1494                            p_resp->num_vlan_filters,
1495                            p_req->num_mc_filters,
1496                            p_resp->num_mc_filters,
1497                            p_req->num_cids, p_resp->num_cids);
1498
1499                 /* Some legacy OSes are incapable of correctly handling this
1500                  * failure.
1501                  */
1502                 if ((p_vf->acquire.vfdev_info.eth_fp_hsi_minor ==
1503                      ETH_HSI_VER_NO_PKT_LEN_TUNN) &&
1504                     (p_vf->acquire.vfdev_info.os_type ==
1505                      VFPF_ACQUIRE_OS_WINDOWS))
1506                         return PFVF_STATUS_SUCCESS;
1507
1508                 return PFVF_STATUS_NO_RESOURCE;
1509         }
1510
1511         return PFVF_STATUS_SUCCESS;
1512 }
1513
1514 static void qed_iov_vf_mbx_acquire_stats(struct qed_hwfn *p_hwfn,
1515                                          struct pfvf_stats_info *p_stats)
1516 {
1517         p_stats->mstats.address = PXP_VF_BAR0_START_MSDM_ZONE_B +
1518                                   offsetof(struct mstorm_vf_zone,
1519                                            non_trigger.eth_queue_stat);
1520         p_stats->mstats.len = sizeof(struct eth_mstorm_per_queue_stat);
1521         p_stats->ustats.address = PXP_VF_BAR0_START_USDM_ZONE_B +
1522                                   offsetof(struct ustorm_vf_zone,
1523                                            non_trigger.eth_queue_stat);
1524         p_stats->ustats.len = sizeof(struct eth_ustorm_per_queue_stat);
1525         p_stats->pstats.address = PXP_VF_BAR0_START_PSDM_ZONE_B +
1526                                   offsetof(struct pstorm_vf_zone,
1527                                            non_trigger.eth_queue_stat);
1528         p_stats->pstats.len = sizeof(struct eth_pstorm_per_queue_stat);
1529         p_stats->tstats.address = 0;
1530         p_stats->tstats.len = 0;
1531 }
1532
1533 static void qed_iov_vf_mbx_acquire(struct qed_hwfn *p_hwfn,
1534                                    struct qed_ptt *p_ptt,
1535                                    struct qed_vf_info *vf)
1536 {
1537         struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
1538         struct pfvf_acquire_resp_tlv *resp = &mbx->reply_virt->acquire_resp;
1539         struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info;
1540         struct vfpf_acquire_tlv *req = &mbx->req_virt->acquire;
1541         u8 vfpf_status = PFVF_STATUS_NOT_SUPPORTED;
1542         struct pf_vf_resc *resc = &resp->resc;
1543         int rc;
1544
1545         memset(resp, 0, sizeof(*resp));
1546
1547         /* Write the PF version so that VF would know which version
1548          * is supported - might be later overriden. This guarantees that
1549          * VF could recognize legacy PF based on lack of versions in reply.
1550          */
1551         pfdev_info->major_fp_hsi = ETH_HSI_VER_MAJOR;
1552         pfdev_info->minor_fp_hsi = ETH_HSI_VER_MINOR;
1553
1554         if (vf->state != VF_FREE && vf->state != VF_STOPPED) {
1555                 DP_VERBOSE(p_hwfn,
1556                            QED_MSG_IOV,
1557                            "VF[%d] sent ACQUIRE but is already in state %d - fail request\n",
1558                            vf->abs_vf_id, vf->state);
1559                 goto out;
1560         }
1561
1562         /* Validate FW compatibility */
1563         if (req->vfdev_info.eth_fp_hsi_major != ETH_HSI_VER_MAJOR) {
1564                 if (req->vfdev_info.capabilities &
1565                     VFPF_ACQUIRE_CAP_PRE_FP_HSI) {
1566                         struct vf_pf_vfdev_info *p_vfdev = &req->vfdev_info;
1567
1568                         DP_VERBOSE(p_hwfn, QED_MSG_IOV,
1569                                    "VF[%d] is pre-fastpath HSI\n",
1570                                    vf->abs_vf_id);
1571                         p_vfdev->eth_fp_hsi_major = ETH_HSI_VER_MAJOR;
1572                         p_vfdev->eth_fp_hsi_minor = ETH_HSI_VER_NO_PKT_LEN_TUNN;
1573                 } else {
1574                         DP_INFO(p_hwfn,
1575                                 "VF[%d] needs fastpath HSI %02x.%02x, which is incompatible with loaded FW's fastpath HSI %02x.%02x\n",
1576                                 vf->abs_vf_id,
1577                                 req->vfdev_info.eth_fp_hsi_major,
1578                                 req->vfdev_info.eth_fp_hsi_minor,
1579                                 ETH_HSI_VER_MAJOR, ETH_HSI_VER_MINOR);
1580
1581                         goto out;
1582                 }
1583         }
1584
1585         /* On 100g PFs, prevent old VFs from loading */
1586         if ((p_hwfn->cdev->num_hwfns > 1) &&
1587             !(req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_100G)) {
1588                 DP_INFO(p_hwfn,
1589                         "VF[%d] is running an old driver that doesn't support 100g\n",
1590                         vf->abs_vf_id);
1591                 goto out;
1592         }
1593
1594         /* Store the acquire message */
1595         memcpy(&vf->acquire, req, sizeof(vf->acquire));
1596
1597         vf->opaque_fid = req->vfdev_info.opaque_fid;
1598
1599         vf->vf_bulletin = req->bulletin_addr;
1600         vf->bulletin.size = (vf->bulletin.size < req->bulletin_size) ?
1601                             vf->bulletin.size : req->bulletin_size;
1602
1603         /* fill in pfdev info */
1604         pfdev_info->chip_num = p_hwfn->cdev->chip_num;
1605         pfdev_info->db_size = 0;
1606         pfdev_info->indices_per_sb = PIS_PER_SB_E4;
1607
1608         pfdev_info->capabilities = PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED |
1609                                    PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE;
1610         if (p_hwfn->cdev->num_hwfns > 1)
1611                 pfdev_info->capabilities |= PFVF_ACQUIRE_CAP_100G;
1612
1613         /* Share our ability to use multiple queue-ids only with VFs
1614          * that request it.
1615          */
1616         if (req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_QUEUE_QIDS)
1617                 pfdev_info->capabilities |= PFVF_ACQUIRE_CAP_QUEUE_QIDS;
1618
1619         /* Share the sizes of the bars with VF */
1620         resp->pfdev_info.bar_size = qed_iov_vf_db_bar_size(p_hwfn, p_ptt);
1621
1622         qed_iov_vf_mbx_acquire_stats(p_hwfn, &pfdev_info->stats_info);
1623
1624         memcpy(pfdev_info->port_mac, p_hwfn->hw_info.hw_mac_addr, ETH_ALEN);
1625
1626         pfdev_info->fw_major = FW_MAJOR_VERSION;
1627         pfdev_info->fw_minor = FW_MINOR_VERSION;
1628         pfdev_info->fw_rev = FW_REVISION_VERSION;
1629         pfdev_info->fw_eng = FW_ENGINEERING_VERSION;
1630
1631         /* Incorrect when legacy, but doesn't matter as legacy isn't reading
1632          * this field.
1633          */
1634         pfdev_info->minor_fp_hsi = min_t(u8, ETH_HSI_VER_MINOR,
1635                                          req->vfdev_info.eth_fp_hsi_minor);
1636         pfdev_info->os_type = VFPF_ACQUIRE_OS_LINUX;
1637         qed_mcp_get_mfw_ver(p_hwfn, p_ptt, &pfdev_info->mfw_ver, NULL);
1638
1639         pfdev_info->dev_type = p_hwfn->cdev->type;
1640         pfdev_info->chip_rev = p_hwfn->cdev->chip_rev;
1641
1642         /* Fill resources available to VF; Make sure there are enough to
1643          * satisfy the VF's request.
1644          */
1645         vfpf_status = qed_iov_vf_mbx_acquire_resc(p_hwfn, p_ptt, vf,
1646                                                   &req->resc_request, resc);
1647         if (vfpf_status != PFVF_STATUS_SUCCESS)
1648                 goto out;
1649
1650         /* Start the VF in FW */
1651         rc = qed_sp_vf_start(p_hwfn, vf);
1652         if (rc) {
1653                 DP_NOTICE(p_hwfn, "Failed to start VF[%02x]\n", vf->abs_vf_id);
1654                 vfpf_status = PFVF_STATUS_FAILURE;
1655                 goto out;
1656         }
1657
1658         /* Fill agreed size of bulletin board in response */
1659         resp->bulletin_size = vf->bulletin.size;
1660         qed_iov_post_vf_bulletin(p_hwfn, vf->relative_vf_id, p_ptt);
1661
1662         DP_VERBOSE(p_hwfn,
1663                    QED_MSG_IOV,
1664                    "VF[%d] ACQUIRE_RESPONSE: pfdev_info- chip_num=0x%x, db_size=%d, idx_per_sb=%d, pf_cap=0x%llx\n"
1665                    "resources- n_rxq-%d, n_txq-%d, n_sbs-%d, n_macs-%d, n_vlans-%d\n",
1666                    vf->abs_vf_id,
1667                    resp->pfdev_info.chip_num,
1668                    resp->pfdev_info.db_size,
1669                    resp->pfdev_info.indices_per_sb,
1670                    resp->pfdev_info.capabilities,
1671                    resc->num_rxqs,
1672                    resc->num_txqs,
1673                    resc->num_sbs,
1674                    resc->num_mac_filters,
1675                    resc->num_vlan_filters);
1676         vf->state = VF_ACQUIRED;
1677
1678         /* Prepare Response */
1679 out:
1680         qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_ACQUIRE,
1681                              sizeof(struct pfvf_acquire_resp_tlv), vfpf_status);
1682 }
1683
1684 static int __qed_iov_spoofchk_set(struct qed_hwfn *p_hwfn,
1685                                   struct qed_vf_info *p_vf, bool val)
1686 {
1687         struct qed_sp_vport_update_params params;
1688         int rc;
1689
1690         if (val == p_vf->spoof_chk) {
1691                 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
1692                            "Spoofchk value[%d] is already configured\n", val);
1693                 return 0;
1694         }
1695
1696         memset(&params, 0, sizeof(struct qed_sp_vport_update_params));
1697         params.opaque_fid = p_vf->opaque_fid;
1698         params.vport_id = p_vf->vport_id;
1699         params.update_anti_spoofing_en_flg = 1;
1700         params.anti_spoofing_en = val;
1701
1702         rc = qed_sp_vport_update(p_hwfn, &params, QED_SPQ_MODE_EBLOCK, NULL);
1703         if (!rc) {
1704                 p_vf->spoof_chk = val;
1705                 p_vf->req_spoofchk_val = p_vf->spoof_chk;
1706                 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
1707                            "Spoofchk val[%d] configured\n", val);
1708         } else {
1709                 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
1710                            "Spoofchk configuration[val:%d] failed for VF[%d]\n",
1711                            val, p_vf->relative_vf_id);
1712         }
1713
1714         return rc;
1715 }
1716
1717 static int qed_iov_reconfigure_unicast_vlan(struct qed_hwfn *p_hwfn,
1718                                             struct qed_vf_info *p_vf)
1719 {
1720         struct qed_filter_ucast filter;
1721         int rc = 0;
1722         int i;
1723
1724         memset(&filter, 0, sizeof(filter));
1725         filter.is_rx_filter = 1;
1726         filter.is_tx_filter = 1;
1727         filter.vport_to_add_to = p_vf->vport_id;
1728         filter.opcode = QED_FILTER_ADD;
1729
1730         /* Reconfigure vlans */
1731         for (i = 0; i < QED_ETH_VF_NUM_VLAN_FILTERS + 1; i++) {
1732                 if (!p_vf->shadow_config.vlans[i].used)
1733                         continue;
1734
1735                 filter.type = QED_FILTER_VLAN;
1736                 filter.vlan = p_vf->shadow_config.vlans[i].vid;
1737                 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
1738                            "Reconfiguring VLAN [0x%04x] for VF [%04x]\n",
1739                            filter.vlan, p_vf->relative_vf_id);
1740                 rc = qed_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid,
1741                                              &filter, QED_SPQ_MODE_CB, NULL);
1742                 if (rc) {
1743                         DP_NOTICE(p_hwfn,
1744                                   "Failed to configure VLAN [%04x] to VF [%04x]\n",
1745                                   filter.vlan, p_vf->relative_vf_id);
1746                         break;
1747                 }
1748         }
1749
1750         return rc;
1751 }
1752
1753 static int
1754 qed_iov_reconfigure_unicast_shadow(struct qed_hwfn *p_hwfn,
1755                                    struct qed_vf_info *p_vf, u64 events)
1756 {
1757         int rc = 0;
1758
1759         if ((events & BIT(VLAN_ADDR_FORCED)) &&
1760             !(p_vf->configured_features & (1 << VLAN_ADDR_FORCED)))
1761                 rc = qed_iov_reconfigure_unicast_vlan(p_hwfn, p_vf);
1762
1763         return rc;
1764 }
1765
1766 static int qed_iov_configure_vport_forced(struct qed_hwfn *p_hwfn,
1767                                           struct qed_vf_info *p_vf, u64 events)
1768 {
1769         int rc = 0;
1770         struct qed_filter_ucast filter;
1771
1772         if (!p_vf->vport_instance)
1773                 return -EINVAL;
1774
1775         if ((events & BIT(MAC_ADDR_FORCED)) ||
1776             p_vf->p_vf_info.is_trusted_configured) {
1777                 /* Since there's no way [currently] of removing the MAC,
1778                  * we can always assume this means we need to force it.
1779                  */
1780                 memset(&filter, 0, sizeof(filter));
1781                 filter.type = QED_FILTER_MAC;
1782                 filter.opcode = QED_FILTER_REPLACE;
1783                 filter.is_rx_filter = 1;
1784                 filter.is_tx_filter = 1;
1785                 filter.vport_to_add_to = p_vf->vport_id;
1786                 ether_addr_copy(filter.mac, p_vf->bulletin.p_virt->mac);
1787
1788                 rc = qed_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid,
1789                                              &filter, QED_SPQ_MODE_CB, NULL);
1790                 if (rc) {
1791                         DP_NOTICE(p_hwfn,
1792                                   "PF failed to configure MAC for VF\n");
1793                         return rc;
1794                 }
1795                 if (p_vf->p_vf_info.is_trusted_configured)
1796                         p_vf->configured_features |=
1797                                 BIT(VFPF_BULLETIN_MAC_ADDR);
1798                 else
1799                         p_vf->configured_features |=
1800                                 BIT(MAC_ADDR_FORCED);
1801         }
1802
1803         if (events & BIT(VLAN_ADDR_FORCED)) {
1804                 struct qed_sp_vport_update_params vport_update;
1805                 u8 removal;
1806                 int i;
1807
1808                 memset(&filter, 0, sizeof(filter));
1809                 filter.type = QED_FILTER_VLAN;
1810                 filter.is_rx_filter = 1;
1811                 filter.is_tx_filter = 1;
1812                 filter.vport_to_add_to = p_vf->vport_id;
1813                 filter.vlan = p_vf->bulletin.p_virt->pvid;
1814                 filter.opcode = filter.vlan ? QED_FILTER_REPLACE :
1815                                               QED_FILTER_FLUSH;
1816
1817                 /* Send the ramrod */
1818                 rc = qed_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid,
1819                                              &filter, QED_SPQ_MODE_CB, NULL);
1820                 if (rc) {
1821                         DP_NOTICE(p_hwfn,
1822                                   "PF failed to configure VLAN for VF\n");
1823                         return rc;
1824                 }
1825
1826                 /* Update the default-vlan & silent vlan stripping */
1827                 memset(&vport_update, 0, sizeof(vport_update));
1828                 vport_update.opaque_fid = p_vf->opaque_fid;
1829                 vport_update.vport_id = p_vf->vport_id;
1830                 vport_update.update_default_vlan_enable_flg = 1;
1831                 vport_update.default_vlan_enable_flg = filter.vlan ? 1 : 0;
1832                 vport_update.update_default_vlan_flg = 1;
1833                 vport_update.default_vlan = filter.vlan;
1834
1835                 vport_update.update_inner_vlan_removal_flg = 1;
1836                 removal = filter.vlan ? 1
1837                                       : p_vf->shadow_config.inner_vlan_removal;
1838                 vport_update.inner_vlan_removal_flg = removal;
1839                 vport_update.silent_vlan_removal_flg = filter.vlan ? 1 : 0;
1840                 rc = qed_sp_vport_update(p_hwfn,
1841                                          &vport_update,
1842                                          QED_SPQ_MODE_EBLOCK, NULL);
1843                 if (rc) {
1844                         DP_NOTICE(p_hwfn,
1845                                   "PF failed to configure VF vport for vlan\n");
1846                         return rc;
1847                 }
1848
1849                 /* Update all the Rx queues */
1850                 for (i = 0; i < QED_MAX_VF_CHAINS_PER_PF; i++) {
1851                         struct qed_vf_queue *p_queue = &p_vf->vf_queues[i];
1852                         struct qed_queue_cid *p_cid = NULL;
1853
1854                         /* There can be at most 1 Rx queue on qzone. Find it */
1855                         p_cid = qed_iov_get_vf_rx_queue_cid(p_queue);
1856                         if (!p_cid)
1857                                 continue;
1858
1859                         rc = qed_sp_eth_rx_queues_update(p_hwfn,
1860                                                          (void **)&p_cid,
1861                                                          1, 0, 1,
1862                                                          QED_SPQ_MODE_EBLOCK,
1863                                                          NULL);
1864                         if (rc) {
1865                                 DP_NOTICE(p_hwfn,
1866                                           "Failed to send Rx update fo queue[0x%04x]\n",
1867                                           p_cid->rel.queue_id);
1868                                 return rc;
1869                         }
1870                 }
1871
1872                 if (filter.vlan)
1873                         p_vf->configured_features |= 1 << VLAN_ADDR_FORCED;
1874                 else
1875                         p_vf->configured_features &= ~BIT(VLAN_ADDR_FORCED);
1876         }
1877
1878         /* If forced features are terminated, we need to configure the shadow
1879          * configuration back again.
1880          */
1881         if (events)
1882                 qed_iov_reconfigure_unicast_shadow(p_hwfn, p_vf, events);
1883
1884         return rc;
1885 }
1886
1887 static void qed_iov_vf_mbx_start_vport(struct qed_hwfn *p_hwfn,
1888                                        struct qed_ptt *p_ptt,
1889                                        struct qed_vf_info *vf)
1890 {
1891         struct qed_sp_vport_start_params params = { 0 };
1892         struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
1893         struct vfpf_vport_start_tlv *start;
1894         u8 status = PFVF_STATUS_SUCCESS;
1895         struct qed_vf_info *vf_info;
1896         u64 *p_bitmap;
1897         int sb_id;
1898         int rc;
1899
1900         vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vf->relative_vf_id, true);
1901         if (!vf_info) {
1902                 DP_NOTICE(p_hwfn->cdev,
1903                           "Failed to get VF info, invalid vfid [%d]\n",
1904                           vf->relative_vf_id);
1905                 return;
1906         }
1907
1908         vf->state = VF_ENABLED;
1909         start = &mbx->req_virt->start_vport;
1910
1911         qed_iov_enable_vf_traffic(p_hwfn, p_ptt, vf);
1912
1913         /* Initialize Status block in CAU */
1914         for (sb_id = 0; sb_id < vf->num_sbs; sb_id++) {
1915                 if (!start->sb_addr[sb_id]) {
1916                         DP_VERBOSE(p_hwfn, QED_MSG_IOV,
1917                                    "VF[%d] did not fill the address of SB %d\n",
1918                                    vf->relative_vf_id, sb_id);
1919                         break;
1920                 }
1921
1922                 qed_int_cau_conf_sb(p_hwfn, p_ptt,
1923                                     start->sb_addr[sb_id],
1924                                     vf->igu_sbs[sb_id], vf->abs_vf_id, 1);
1925         }
1926
1927         vf->mtu = start->mtu;
1928         vf->shadow_config.inner_vlan_removal = start->inner_vlan_removal;
1929
1930         /* Take into consideration configuration forced by hypervisor;
1931          * If none is configured, use the supplied VF values [for old
1932          * vfs that would still be fine, since they passed '0' as padding].
1933          */
1934         p_bitmap = &vf_info->bulletin.p_virt->valid_bitmap;
1935         if (!(*p_bitmap & BIT(VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED))) {
1936                 u8 vf_req = start->only_untagged;
1937
1938                 vf_info->bulletin.p_virt->default_only_untagged = vf_req;
1939                 *p_bitmap |= 1 << VFPF_BULLETIN_UNTAGGED_DEFAULT;
1940         }
1941
1942         params.tpa_mode = start->tpa_mode;
1943         params.remove_inner_vlan = start->inner_vlan_removal;
1944         params.tx_switching = true;
1945
1946         params.only_untagged = vf_info->bulletin.p_virt->default_only_untagged;
1947         params.drop_ttl0 = false;
1948         params.concrete_fid = vf->concrete_fid;
1949         params.opaque_fid = vf->opaque_fid;
1950         params.vport_id = vf->vport_id;
1951         params.max_buffers_per_cqe = start->max_buffers_per_cqe;
1952         params.mtu = vf->mtu;
1953
1954         /* Non trusted VFs should enable control frame filtering */
1955         params.check_mac = !vf->p_vf_info.is_trusted_configured;
1956
1957         rc = qed_sp_eth_vport_start(p_hwfn, &params);
1958         if (rc) {
1959                 DP_ERR(p_hwfn,
1960                        "qed_iov_vf_mbx_start_vport returned error %d\n", rc);
1961                 status = PFVF_STATUS_FAILURE;
1962         } else {
1963                 vf->vport_instance++;
1964
1965                 /* Force configuration if needed on the newly opened vport */
1966                 qed_iov_configure_vport_forced(p_hwfn, vf, *p_bitmap);
1967
1968                 __qed_iov_spoofchk_set(p_hwfn, vf, vf->req_spoofchk_val);
1969         }
1970         qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_VPORT_START,
1971                              sizeof(struct pfvf_def_resp_tlv), status);
1972 }
1973
1974 static void qed_iov_vf_mbx_stop_vport(struct qed_hwfn *p_hwfn,
1975                                       struct qed_ptt *p_ptt,
1976                                       struct qed_vf_info *vf)
1977 {
1978         u8 status = PFVF_STATUS_SUCCESS;
1979         int rc;
1980
1981         vf->vport_instance--;
1982         vf->spoof_chk = false;
1983
1984         if ((qed_iov_validate_active_rxq(p_hwfn, vf)) ||
1985             (qed_iov_validate_active_txq(p_hwfn, vf))) {
1986                 vf->b_malicious = true;
1987                 DP_NOTICE(p_hwfn,
1988                           "VF [%02x] - considered malicious; Unable to stop RX/TX queues\n",
1989                           vf->abs_vf_id);
1990                 status = PFVF_STATUS_MALICIOUS;
1991                 goto out;
1992         }
1993
1994         rc = qed_sp_vport_stop(p_hwfn, vf->opaque_fid, vf->vport_id);
1995         if (rc) {
1996                 DP_ERR(p_hwfn, "qed_iov_vf_mbx_stop_vport returned error %d\n",
1997                        rc);
1998                 status = PFVF_STATUS_FAILURE;
1999         }
2000
2001         /* Forget the configuration on the vport */
2002         vf->configured_features = 0;
2003         memset(&vf->shadow_config, 0, sizeof(vf->shadow_config));
2004
2005 out:
2006         qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_VPORT_TEARDOWN,
2007                              sizeof(struct pfvf_def_resp_tlv), status);
2008 }
2009
2010 static void qed_iov_vf_mbx_start_rxq_resp(struct qed_hwfn *p_hwfn,
2011                                           struct qed_ptt *p_ptt,
2012                                           struct qed_vf_info *vf,
2013                                           u8 status, bool b_legacy)
2014 {
2015         struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
2016         struct pfvf_start_queue_resp_tlv *p_tlv;
2017         struct vfpf_start_rxq_tlv *req;
2018         u16 length;
2019
2020         mbx->offset = (u8 *)mbx->reply_virt;
2021
2022         /* Taking a bigger struct instead of adding a TLV to list was a
2023          * mistake, but one which we're now stuck with, as some older
2024          * clients assume the size of the previous response.
2025          */
2026         if (!b_legacy)
2027                 length = sizeof(*p_tlv);
2028         else
2029                 length = sizeof(struct pfvf_def_resp_tlv);
2030
2031         p_tlv = qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_START_RXQ,
2032                             length);
2033         qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
2034                     sizeof(struct channel_list_end_tlv));
2035
2036         /* Update the TLV with the response */
2037         if ((status == PFVF_STATUS_SUCCESS) && !b_legacy) {
2038                 req = &mbx->req_virt->start_rxq;
2039                 p_tlv->offset = PXP_VF_BAR0_START_MSDM_ZONE_B +
2040                                 offsetof(struct mstorm_vf_zone,
2041                                          non_trigger.eth_rx_queue_producers) +
2042                                 sizeof(struct eth_rx_prod_data) * req->rx_qid;
2043         }
2044
2045         qed_iov_send_response(p_hwfn, p_ptt, vf, length, status);
2046 }
2047
2048 static u8 qed_iov_vf_mbx_qid(struct qed_hwfn *p_hwfn,
2049                              struct qed_vf_info *p_vf, bool b_is_tx)
2050 {
2051         struct qed_iov_vf_mbx *p_mbx = &p_vf->vf_mbx;
2052         struct vfpf_qid_tlv *p_qid_tlv;
2053
2054         /* Search for the qid if the VF published its going to provide it */
2055         if (!(p_vf->acquire.vfdev_info.capabilities &
2056               VFPF_ACQUIRE_CAP_QUEUE_QIDS)) {
2057                 if (b_is_tx)
2058                         return QED_IOV_LEGACY_QID_TX;
2059                 else
2060                         return QED_IOV_LEGACY_QID_RX;
2061         }
2062
2063         p_qid_tlv = (struct vfpf_qid_tlv *)
2064                     qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt,
2065                                              CHANNEL_TLV_QID);
2066         if (!p_qid_tlv) {
2067                 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
2068                            "VF[%2x]: Failed to provide qid\n",
2069                            p_vf->relative_vf_id);
2070
2071                 return QED_IOV_QID_INVALID;
2072         }
2073
2074         if (p_qid_tlv->qid >= MAX_QUEUES_PER_QZONE) {
2075                 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
2076                            "VF[%02x]: Provided qid out-of-bounds %02x\n",
2077                            p_vf->relative_vf_id, p_qid_tlv->qid);
2078                 return QED_IOV_QID_INVALID;
2079         }
2080
2081         return p_qid_tlv->qid;
2082 }
2083
2084 static void qed_iov_vf_mbx_start_rxq(struct qed_hwfn *p_hwfn,
2085                                      struct qed_ptt *p_ptt,
2086                                      struct qed_vf_info *vf)
2087 {
2088         struct qed_queue_start_common_params params;
2089         struct qed_queue_cid_vf_params vf_params;
2090         struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
2091         u8 status = PFVF_STATUS_NO_RESOURCE;
2092         u8 qid_usage_idx, vf_legacy = 0;
2093         struct vfpf_start_rxq_tlv *req;
2094         struct qed_vf_queue *p_queue;
2095         struct qed_queue_cid *p_cid;
2096         struct qed_sb_info sb_dummy;
2097         int rc;
2098
2099         req = &mbx->req_virt->start_rxq;
2100
2101         if (!qed_iov_validate_rxq(p_hwfn, vf, req->rx_qid,
2102                                   QED_IOV_VALIDATE_Q_DISABLE) ||
2103             !qed_iov_validate_sb(p_hwfn, vf, req->hw_sb))
2104                 goto out;
2105
2106         qid_usage_idx = qed_iov_vf_mbx_qid(p_hwfn, vf, false);
2107         if (qid_usage_idx == QED_IOV_QID_INVALID)
2108                 goto out;
2109
2110         p_queue = &vf->vf_queues[req->rx_qid];
2111         if (p_queue->cids[qid_usage_idx].p_cid)
2112                 goto out;
2113
2114         vf_legacy = qed_vf_calculate_legacy(vf);
2115
2116         /* Acquire a new queue-cid */
2117         memset(&params, 0, sizeof(params));
2118         params.queue_id = p_queue->fw_rx_qid;
2119         params.vport_id = vf->vport_id;
2120         params.stats_id = vf->abs_vf_id + 0x10;
2121         /* Since IGU index is passed via sb_info, construct a dummy one */
2122         memset(&sb_dummy, 0, sizeof(sb_dummy));
2123         sb_dummy.igu_sb_id = req->hw_sb;
2124         params.p_sb = &sb_dummy;
2125         params.sb_idx = req->sb_index;
2126
2127         memset(&vf_params, 0, sizeof(vf_params));
2128         vf_params.vfid = vf->relative_vf_id;
2129         vf_params.vf_qid = (u8)req->rx_qid;
2130         vf_params.vf_legacy = vf_legacy;
2131         vf_params.qid_usage_idx = qid_usage_idx;
2132         p_cid = qed_eth_queue_to_cid(p_hwfn, vf->opaque_fid,
2133                                      &params, true, &vf_params);
2134         if (!p_cid)
2135                 goto out;
2136
2137         /* Legacy VFs have their Producers in a different location, which they
2138          * calculate on their own and clean the producer prior to this.
2139          */
2140         if (!(vf_legacy & QED_QCID_LEGACY_VF_RX_PROD))
2141                 REG_WR(p_hwfn,
2142                        GTT_BAR0_MAP_REG_MSDM_RAM +
2143                        MSTORM_ETH_VF_PRODS_OFFSET(vf->abs_vf_id, req->rx_qid),
2144                        0);
2145
2146         rc = qed_eth_rxq_start_ramrod(p_hwfn, p_cid,
2147                                       req->bd_max_bytes,
2148                                       req->rxq_addr,
2149                                       req->cqe_pbl_addr, req->cqe_pbl_size);
2150         if (rc) {
2151                 status = PFVF_STATUS_FAILURE;
2152                 qed_eth_queue_cid_release(p_hwfn, p_cid);
2153         } else {
2154                 p_queue->cids[qid_usage_idx].p_cid = p_cid;
2155                 p_queue->cids[qid_usage_idx].b_is_tx = false;
2156                 status = PFVF_STATUS_SUCCESS;
2157                 vf->num_active_rxqs++;
2158         }
2159
2160 out:
2161         qed_iov_vf_mbx_start_rxq_resp(p_hwfn, p_ptt, vf, status,
2162                                       !!(vf_legacy &
2163                                          QED_QCID_LEGACY_VF_RX_PROD));
2164 }
2165
2166 static void
2167 qed_iov_pf_update_tun_response(struct pfvf_update_tunn_param_tlv *p_resp,
2168                                struct qed_tunnel_info *p_tun,
2169                                u16 tunn_feature_mask)
2170 {
2171         p_resp->tunn_feature_mask = tunn_feature_mask;
2172         p_resp->vxlan_mode = p_tun->vxlan.b_mode_enabled;
2173         p_resp->l2geneve_mode = p_tun->l2_geneve.b_mode_enabled;
2174         p_resp->ipgeneve_mode = p_tun->ip_geneve.b_mode_enabled;
2175         p_resp->l2gre_mode = p_tun->l2_gre.b_mode_enabled;
2176         p_resp->ipgre_mode = p_tun->l2_gre.b_mode_enabled;
2177         p_resp->vxlan_clss = p_tun->vxlan.tun_cls;
2178         p_resp->l2gre_clss = p_tun->l2_gre.tun_cls;
2179         p_resp->ipgre_clss = p_tun->ip_gre.tun_cls;
2180         p_resp->l2geneve_clss = p_tun->l2_geneve.tun_cls;
2181         p_resp->ipgeneve_clss = p_tun->ip_geneve.tun_cls;
2182         p_resp->geneve_udp_port = p_tun->geneve_port.port;
2183         p_resp->vxlan_udp_port = p_tun->vxlan_port.port;
2184 }
2185
2186 static void
2187 __qed_iov_pf_update_tun_param(struct vfpf_update_tunn_param_tlv *p_req,
2188                               struct qed_tunn_update_type *p_tun,
2189                               enum qed_tunn_mode mask, u8 tun_cls)
2190 {
2191         if (p_req->tun_mode_update_mask & BIT(mask)) {
2192                 p_tun->b_update_mode = true;
2193
2194                 if (p_req->tunn_mode & BIT(mask))
2195                         p_tun->b_mode_enabled = true;
2196         }
2197
2198         p_tun->tun_cls = tun_cls;
2199 }
2200
2201 static void
2202 qed_iov_pf_update_tun_param(struct vfpf_update_tunn_param_tlv *p_req,
2203                             struct qed_tunn_update_type *p_tun,
2204                             struct qed_tunn_update_udp_port *p_port,
2205                             enum qed_tunn_mode mask,
2206                             u8 tun_cls, u8 update_port, u16 port)
2207 {
2208         if (update_port) {
2209                 p_port->b_update_port = true;
2210                 p_port->port = port;
2211         }
2212
2213         __qed_iov_pf_update_tun_param(p_req, p_tun, mask, tun_cls);
2214 }
2215
2216 static bool
2217 qed_iov_pf_validate_tunn_param(struct vfpf_update_tunn_param_tlv *p_req)
2218 {
2219         bool b_update_requested = false;
2220
2221         if (p_req->tun_mode_update_mask || p_req->update_tun_cls ||
2222             p_req->update_geneve_port || p_req->update_vxlan_port)
2223                 b_update_requested = true;
2224
2225         return b_update_requested;
2226 }
2227
2228 static void qed_pf_validate_tunn_mode(struct qed_tunn_update_type *tun, int *rc)
2229 {
2230         if (tun->b_update_mode && !tun->b_mode_enabled) {
2231                 tun->b_update_mode = false;
2232                 *rc = -EINVAL;
2233         }
2234 }
2235
2236 static int
2237 qed_pf_validate_modify_tunn_config(struct qed_hwfn *p_hwfn,
2238                                    u16 *tun_features, bool *update,
2239                                    struct qed_tunnel_info *tun_src)
2240 {
2241         struct qed_eth_cb_ops *ops = p_hwfn->cdev->protocol_ops.eth;
2242         struct qed_tunnel_info *tun = &p_hwfn->cdev->tunnel;
2243         u16 bultn_vxlan_port, bultn_geneve_port;
2244         void *cookie = p_hwfn->cdev->ops_cookie;
2245         int i, rc = 0;
2246
2247         *tun_features = p_hwfn->cdev->tunn_feature_mask;
2248         bultn_vxlan_port = tun->vxlan_port.port;
2249         bultn_geneve_port = tun->geneve_port.port;
2250         qed_pf_validate_tunn_mode(&tun_src->vxlan, &rc);
2251         qed_pf_validate_tunn_mode(&tun_src->l2_geneve, &rc);
2252         qed_pf_validate_tunn_mode(&tun_src->ip_geneve, &rc);
2253         qed_pf_validate_tunn_mode(&tun_src->l2_gre, &rc);
2254         qed_pf_validate_tunn_mode(&tun_src->ip_gre, &rc);
2255
2256         if ((tun_src->b_update_rx_cls || tun_src->b_update_tx_cls) &&
2257             (tun_src->vxlan.tun_cls != QED_TUNN_CLSS_MAC_VLAN ||
2258              tun_src->l2_geneve.tun_cls != QED_TUNN_CLSS_MAC_VLAN ||
2259              tun_src->ip_geneve.tun_cls != QED_TUNN_CLSS_MAC_VLAN ||
2260              tun_src->l2_gre.tun_cls != QED_TUNN_CLSS_MAC_VLAN ||
2261              tun_src->ip_gre.tun_cls != QED_TUNN_CLSS_MAC_VLAN)) {
2262                 tun_src->b_update_rx_cls = false;
2263                 tun_src->b_update_tx_cls = false;
2264                 rc = -EINVAL;
2265         }
2266
2267         if (tun_src->vxlan_port.b_update_port) {
2268                 if (tun_src->vxlan_port.port == tun->vxlan_port.port) {
2269                         tun_src->vxlan_port.b_update_port = false;
2270                 } else {
2271                         *update = true;
2272                         bultn_vxlan_port = tun_src->vxlan_port.port;
2273                 }
2274         }
2275
2276         if (tun_src->geneve_port.b_update_port) {
2277                 if (tun_src->geneve_port.port == tun->geneve_port.port) {
2278                         tun_src->geneve_port.b_update_port = false;
2279                 } else {
2280                         *update = true;
2281                         bultn_geneve_port = tun_src->geneve_port.port;
2282                 }
2283         }
2284
2285         qed_for_each_vf(p_hwfn, i) {
2286                 qed_iov_bulletin_set_udp_ports(p_hwfn, i, bultn_vxlan_port,
2287                                                bultn_geneve_port);
2288         }
2289
2290         qed_schedule_iov(p_hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG);
2291         ops->ports_update(cookie, bultn_vxlan_port, bultn_geneve_port);
2292
2293         return rc;
2294 }
2295
2296 static void qed_iov_vf_mbx_update_tunn_param(struct qed_hwfn *p_hwfn,
2297                                              struct qed_ptt *p_ptt,
2298                                              struct qed_vf_info *p_vf)
2299 {
2300         struct qed_tunnel_info *p_tun = &p_hwfn->cdev->tunnel;
2301         struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx;
2302         struct pfvf_update_tunn_param_tlv *p_resp;
2303         struct vfpf_update_tunn_param_tlv *p_req;
2304         u8 status = PFVF_STATUS_SUCCESS;
2305         bool b_update_required = false;
2306         struct qed_tunnel_info tunn;
2307         u16 tunn_feature_mask = 0;
2308         int i, rc = 0;
2309
2310         mbx->offset = (u8 *)mbx->reply_virt;
2311
2312         memset(&tunn, 0, sizeof(tunn));
2313         p_req = &mbx->req_virt->tunn_param_update;
2314
2315         if (!qed_iov_pf_validate_tunn_param(p_req)) {
2316                 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
2317                            "No tunnel update requested by VF\n");
2318                 status = PFVF_STATUS_FAILURE;
2319                 goto send_resp;
2320         }
2321
2322         tunn.b_update_rx_cls = p_req->update_tun_cls;
2323         tunn.b_update_tx_cls = p_req->update_tun_cls;
2324
2325         qed_iov_pf_update_tun_param(p_req, &tunn.vxlan, &tunn.vxlan_port,
2326                                     QED_MODE_VXLAN_TUNN, p_req->vxlan_clss,
2327                                     p_req->update_vxlan_port,
2328                                     p_req->vxlan_port);
2329         qed_iov_pf_update_tun_param(p_req, &tunn.l2_geneve, &tunn.geneve_port,
2330                                     QED_MODE_L2GENEVE_TUNN,
2331                                     p_req->l2geneve_clss,
2332                                     p_req->update_geneve_port,
2333                                     p_req->geneve_port);
2334         __qed_iov_pf_update_tun_param(p_req, &tunn.ip_geneve,
2335                                       QED_MODE_IPGENEVE_TUNN,
2336                                       p_req->ipgeneve_clss);
2337         __qed_iov_pf_update_tun_param(p_req, &tunn.l2_gre,
2338                                       QED_MODE_L2GRE_TUNN, p_req->l2gre_clss);
2339         __qed_iov_pf_update_tun_param(p_req, &tunn.ip_gre,
2340                                       QED_MODE_IPGRE_TUNN, p_req->ipgre_clss);
2341
2342         /* If PF modifies VF's req then it should
2343          * still return an error in case of partial configuration
2344          * or modified configuration as opposed to requested one.
2345          */
2346         rc = qed_pf_validate_modify_tunn_config(p_hwfn, &tunn_feature_mask,
2347                                                 &b_update_required, &tunn);
2348
2349         if (rc)
2350                 status = PFVF_STATUS_FAILURE;
2351
2352         /* If QED client is willing to update anything ? */
2353         if (b_update_required) {
2354                 u16 geneve_port;
2355
2356                 rc = qed_sp_pf_update_tunn_cfg(p_hwfn, p_ptt, &tunn,
2357                                                QED_SPQ_MODE_EBLOCK, NULL);
2358                 if (rc)
2359                         status = PFVF_STATUS_FAILURE;
2360
2361                 geneve_port = p_tun->geneve_port.port;
2362                 qed_for_each_vf(p_hwfn, i) {
2363                         qed_iov_bulletin_set_udp_ports(p_hwfn, i,
2364                                                        p_tun->vxlan_port.port,
2365                                                        geneve_port);
2366                 }
2367         }
2368
2369 send_resp:
2370         p_resp = qed_add_tlv(p_hwfn, &mbx->offset,
2371                              CHANNEL_TLV_UPDATE_TUNN_PARAM, sizeof(*p_resp));
2372
2373         qed_iov_pf_update_tun_response(p_resp, p_tun, tunn_feature_mask);
2374         qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
2375                     sizeof(struct channel_list_end_tlv));
2376
2377         qed_iov_send_response(p_hwfn, p_ptt, p_vf, sizeof(*p_resp), status);
2378 }
2379
2380 static void qed_iov_vf_mbx_start_txq_resp(struct qed_hwfn *p_hwfn,
2381                                           struct qed_ptt *p_ptt,
2382                                           struct qed_vf_info *p_vf,
2383                                           u32 cid, u8 status)
2384 {
2385         struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx;
2386         struct pfvf_start_queue_resp_tlv *p_tlv;
2387         bool b_legacy = false;
2388         u16 length;
2389
2390         mbx->offset = (u8 *)mbx->reply_virt;
2391
2392         /* Taking a bigger struct instead of adding a TLV to list was a
2393          * mistake, but one which we're now stuck with, as some older
2394          * clients assume the size of the previous response.
2395          */
2396         if (p_vf->acquire.vfdev_info.eth_fp_hsi_minor ==
2397             ETH_HSI_VER_NO_PKT_LEN_TUNN)
2398                 b_legacy = true;
2399
2400         if (!b_legacy)
2401                 length = sizeof(*p_tlv);
2402         else
2403                 length = sizeof(struct pfvf_def_resp_tlv);
2404
2405         p_tlv = qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_START_TXQ,
2406                             length);
2407         qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
2408                     sizeof(struct channel_list_end_tlv));
2409
2410         /* Update the TLV with the response */
2411         if ((status == PFVF_STATUS_SUCCESS) && !b_legacy)
2412                 p_tlv->offset = qed_db_addr_vf(cid, DQ_DEMS_LEGACY);
2413
2414         qed_iov_send_response(p_hwfn, p_ptt, p_vf, length, status);
2415 }
2416
2417 static void qed_iov_vf_mbx_start_txq(struct qed_hwfn *p_hwfn,
2418                                      struct qed_ptt *p_ptt,
2419                                      struct qed_vf_info *vf)
2420 {
2421         struct qed_queue_start_common_params params;
2422         struct qed_queue_cid_vf_params vf_params;
2423         struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
2424         u8 status = PFVF_STATUS_NO_RESOURCE;
2425         struct vfpf_start_txq_tlv *req;
2426         struct qed_vf_queue *p_queue;
2427         struct qed_queue_cid *p_cid;
2428         struct qed_sb_info sb_dummy;
2429         u8 qid_usage_idx, vf_legacy;
2430         u32 cid = 0;
2431         int rc;
2432         u16 pq;
2433
2434         memset(&params, 0, sizeof(params));
2435         req = &mbx->req_virt->start_txq;
2436
2437         if (!qed_iov_validate_txq(p_hwfn, vf, req->tx_qid,
2438                                   QED_IOV_VALIDATE_Q_NA) ||
2439             !qed_iov_validate_sb(p_hwfn, vf, req->hw_sb))
2440                 goto out;
2441
2442         qid_usage_idx = qed_iov_vf_mbx_qid(p_hwfn, vf, true);
2443         if (qid_usage_idx == QED_IOV_QID_INVALID)
2444                 goto out;
2445
2446         p_queue = &vf->vf_queues[req->tx_qid];
2447         if (p_queue->cids[qid_usage_idx].p_cid)
2448                 goto out;
2449
2450         vf_legacy = qed_vf_calculate_legacy(vf);
2451
2452         /* Acquire a new queue-cid */
2453         params.queue_id = p_queue->fw_tx_qid;
2454         params.vport_id = vf->vport_id;
2455         params.stats_id = vf->abs_vf_id + 0x10;
2456
2457         /* Since IGU index is passed via sb_info, construct a dummy one */
2458         memset(&sb_dummy, 0, sizeof(sb_dummy));
2459         sb_dummy.igu_sb_id = req->hw_sb;
2460         params.p_sb = &sb_dummy;
2461         params.sb_idx = req->sb_index;
2462
2463         memset(&vf_params, 0, sizeof(vf_params));
2464         vf_params.vfid = vf->relative_vf_id;
2465         vf_params.vf_qid = (u8)req->tx_qid;
2466         vf_params.vf_legacy = vf_legacy;
2467         vf_params.qid_usage_idx = qid_usage_idx;
2468
2469         p_cid = qed_eth_queue_to_cid(p_hwfn, vf->opaque_fid,
2470                                      &params, false, &vf_params);
2471         if (!p_cid)
2472                 goto out;
2473
2474         pq = qed_get_cm_pq_idx_vf(p_hwfn, vf->relative_vf_id);
2475         rc = qed_eth_txq_start_ramrod(p_hwfn, p_cid,
2476                                       req->pbl_addr, req->pbl_size, pq);
2477         if (rc) {
2478                 status = PFVF_STATUS_FAILURE;
2479                 qed_eth_queue_cid_release(p_hwfn, p_cid);
2480         } else {
2481                 status = PFVF_STATUS_SUCCESS;
2482                 p_queue->cids[qid_usage_idx].p_cid = p_cid;
2483                 p_queue->cids[qid_usage_idx].b_is_tx = true;
2484                 cid = p_cid->cid;
2485         }
2486
2487 out:
2488         qed_iov_vf_mbx_start_txq_resp(p_hwfn, p_ptt, vf, cid, status);
2489 }
2490
2491 static int qed_iov_vf_stop_rxqs(struct qed_hwfn *p_hwfn,
2492                                 struct qed_vf_info *vf,
2493                                 u16 rxq_id,
2494                                 u8 qid_usage_idx, bool cqe_completion)
2495 {
2496         struct qed_vf_queue *p_queue;
2497         int rc = 0;
2498
2499         if (!qed_iov_validate_rxq(p_hwfn, vf, rxq_id, QED_IOV_VALIDATE_Q_NA)) {
2500                 DP_VERBOSE(p_hwfn,
2501                            QED_MSG_IOV,
2502                            "VF[%d] Tried Closing Rx 0x%04x.%02x which is inactive\n",
2503                            vf->relative_vf_id, rxq_id, qid_usage_idx);
2504                 return -EINVAL;
2505         }
2506
2507         p_queue = &vf->vf_queues[rxq_id];
2508
2509         /* We've validated the index and the existence of the active RXQ -
2510          * now we need to make sure that it's using the correct qid.
2511          */
2512         if (!p_queue->cids[qid_usage_idx].p_cid ||
2513             p_queue->cids[qid_usage_idx].b_is_tx) {
2514                 struct qed_queue_cid *p_cid;
2515
2516                 p_cid = qed_iov_get_vf_rx_queue_cid(p_queue);
2517                 DP_VERBOSE(p_hwfn,
2518                            QED_MSG_IOV,
2519                            "VF[%d] - Tried Closing Rx 0x%04x.%02x, but Rx is at %04x.%02x\n",
2520                            vf->relative_vf_id,
2521                            rxq_id, qid_usage_idx, rxq_id, p_cid->qid_usage_idx);
2522                 return -EINVAL;
2523         }
2524
2525         /* Now that we know we have a valid Rx-queue - close it */
2526         rc = qed_eth_rx_queue_stop(p_hwfn,
2527                                    p_queue->cids[qid_usage_idx].p_cid,
2528                                    false, cqe_completion);
2529         if (rc)
2530                 return rc;
2531
2532         p_queue->cids[qid_usage_idx].p_cid = NULL;
2533         vf->num_active_rxqs--;
2534
2535         return 0;
2536 }
2537
2538 static int qed_iov_vf_stop_txqs(struct qed_hwfn *p_hwfn,
2539                                 struct qed_vf_info *vf,
2540                                 u16 txq_id, u8 qid_usage_idx)
2541 {
2542         struct qed_vf_queue *p_queue;
2543         int rc = 0;
2544
2545         if (!qed_iov_validate_txq(p_hwfn, vf, txq_id, QED_IOV_VALIDATE_Q_NA))
2546                 return -EINVAL;
2547
2548         p_queue = &vf->vf_queues[txq_id];
2549         if (!p_queue->cids[qid_usage_idx].p_cid ||
2550             !p_queue->cids[qid_usage_idx].b_is_tx)
2551                 return -EINVAL;
2552
2553         rc = qed_eth_tx_queue_stop(p_hwfn, p_queue->cids[qid_usage_idx].p_cid);
2554         if (rc)
2555                 return rc;
2556
2557         p_queue->cids[qid_usage_idx].p_cid = NULL;
2558         return 0;
2559 }
2560
2561 static void qed_iov_vf_mbx_stop_rxqs(struct qed_hwfn *p_hwfn,
2562                                      struct qed_ptt *p_ptt,
2563                                      struct qed_vf_info *vf)
2564 {
2565         u16 length = sizeof(struct pfvf_def_resp_tlv);
2566         struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
2567         u8 status = PFVF_STATUS_FAILURE;
2568         struct vfpf_stop_rxqs_tlv *req;
2569         u8 qid_usage_idx;
2570         int rc;
2571
2572         /* There has never been an official driver that used this interface
2573          * for stopping multiple queues, and it is now considered deprecated.
2574          * Validate this isn't used here.
2575          */
2576         req = &mbx->req_virt->stop_rxqs;
2577         if (req->num_rxqs != 1) {
2578                 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
2579                            "Odd; VF[%d] tried stopping multiple Rx queues\n",
2580                            vf->relative_vf_id);
2581                 status = PFVF_STATUS_NOT_SUPPORTED;
2582                 goto out;
2583         }
2584
2585         /* Find which qid-index is associated with the queue */
2586         qid_usage_idx = qed_iov_vf_mbx_qid(p_hwfn, vf, false);
2587         if (qid_usage_idx == QED_IOV_QID_INVALID)
2588                 goto out;
2589
2590         rc = qed_iov_vf_stop_rxqs(p_hwfn, vf, req->rx_qid,
2591                                   qid_usage_idx, req->cqe_completion);
2592         if (!rc)
2593                 status = PFVF_STATUS_SUCCESS;
2594 out:
2595         qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_RXQS,
2596                              length, status);
2597 }
2598
2599 static void qed_iov_vf_mbx_stop_txqs(struct qed_hwfn *p_hwfn,
2600                                      struct qed_ptt *p_ptt,
2601                                      struct qed_vf_info *vf)
2602 {
2603         u16 length = sizeof(struct pfvf_def_resp_tlv);
2604         struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
2605         u8 status = PFVF_STATUS_FAILURE;
2606         struct vfpf_stop_txqs_tlv *req;
2607         u8 qid_usage_idx;
2608         int rc;
2609
2610         /* There has never been an official driver that used this interface
2611          * for stopping multiple queues, and it is now considered deprecated.
2612          * Validate this isn't used here.
2613          */
2614         req = &mbx->req_virt->stop_txqs;
2615         if (req->num_txqs != 1) {
2616                 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
2617                            "Odd; VF[%d] tried stopping multiple Tx queues\n",
2618                            vf->relative_vf_id);
2619                 status = PFVF_STATUS_NOT_SUPPORTED;
2620                 goto out;
2621         }
2622
2623         /* Find which qid-index is associated with the queue */
2624         qid_usage_idx = qed_iov_vf_mbx_qid(p_hwfn, vf, true);
2625         if (qid_usage_idx == QED_IOV_QID_INVALID)
2626                 goto out;
2627
2628         rc = qed_iov_vf_stop_txqs(p_hwfn, vf, req->tx_qid, qid_usage_idx);
2629         if (!rc)
2630                 status = PFVF_STATUS_SUCCESS;
2631
2632 out:
2633         qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_TXQS,
2634                              length, status);
2635 }
2636
2637 static void qed_iov_vf_mbx_update_rxqs(struct qed_hwfn *p_hwfn,
2638                                        struct qed_ptt *p_ptt,
2639                                        struct qed_vf_info *vf)
2640 {
2641         struct qed_queue_cid *handlers[QED_MAX_VF_CHAINS_PER_PF];
2642         u16 length = sizeof(struct pfvf_def_resp_tlv);
2643         struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
2644         struct vfpf_update_rxq_tlv *req;
2645         u8 status = PFVF_STATUS_FAILURE;
2646         u8 complete_event_flg;
2647         u8 complete_cqe_flg;
2648         u8 qid_usage_idx;
2649         int rc;
2650         u8 i;
2651
2652         req = &mbx->req_virt->update_rxq;
2653         complete_cqe_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_CQE_FLAG);
2654         complete_event_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG);
2655
2656         qid_usage_idx = qed_iov_vf_mbx_qid(p_hwfn, vf, false);
2657         if (qid_usage_idx == QED_IOV_QID_INVALID)
2658                 goto out;
2659
2660         /* There shouldn't exist a VF that uses queue-qids yet uses this
2661          * API with multiple Rx queues. Validate this.
2662          */
2663         if ((vf->acquire.vfdev_info.capabilities &
2664              VFPF_ACQUIRE_CAP_QUEUE_QIDS) && req->num_rxqs != 1) {
2665                 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
2666                            "VF[%d] supports QIDs but sends multiple queues\n",
2667                            vf->relative_vf_id);
2668                 goto out;
2669         }
2670
2671         /* Validate inputs - for the legacy case this is still true since
2672          * qid_usage_idx for each Rx queue would be LEGACY_QID_RX.
2673          */
2674         for (i = req->rx_qid; i < req->rx_qid + req->num_rxqs; i++) {
2675                 if (!qed_iov_validate_rxq(p_hwfn, vf, i,
2676                                           QED_IOV_VALIDATE_Q_NA) ||
2677                     !vf->vf_queues[i].cids[qid_usage_idx].p_cid ||
2678                     vf->vf_queues[i].cids[qid_usage_idx].b_is_tx) {
2679                         DP_VERBOSE(p_hwfn, QED_MSG_IOV,
2680                                    "VF[%d]: Incorrect Rxqs [%04x, %02x]\n",
2681                                    vf->relative_vf_id, req->rx_qid,
2682                                    req->num_rxqs);
2683                         goto out;
2684                 }
2685         }
2686
2687         /* Prepare the handlers */
2688         for (i = 0; i < req->num_rxqs; i++) {
2689                 u16 qid = req->rx_qid + i;
2690
2691                 handlers[i] = vf->vf_queues[qid].cids[qid_usage_idx].p_cid;
2692         }
2693
2694         rc = qed_sp_eth_rx_queues_update(p_hwfn, (void **)&handlers,
2695                                          req->num_rxqs,
2696                                          complete_cqe_flg,
2697                                          complete_event_flg,
2698                                          QED_SPQ_MODE_EBLOCK, NULL);
2699         if (rc)
2700                 goto out;
2701
2702         status = PFVF_STATUS_SUCCESS;
2703 out:
2704         qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_UPDATE_RXQ,
2705                              length, status);
2706 }
2707
2708 void *qed_iov_search_list_tlvs(struct qed_hwfn *p_hwfn,
2709                                void *p_tlvs_list, u16 req_type)
2710 {
2711         struct channel_tlv *p_tlv = (struct channel_tlv *)p_tlvs_list;
2712         int len = 0;
2713
2714         do {
2715                 if (!p_tlv->length) {
2716                         DP_NOTICE(p_hwfn, "Zero length TLV found\n");
2717                         return NULL;
2718                 }
2719
2720                 if (p_tlv->type == req_type) {
2721                         DP_VERBOSE(p_hwfn, QED_MSG_IOV,
2722                                    "Extended tlv type %d, length %d found\n",
2723                                    p_tlv->type, p_tlv->length);
2724                         return p_tlv;
2725                 }
2726
2727                 len += p_tlv->length;
2728                 p_tlv = (struct channel_tlv *)((u8 *)p_tlv + p_tlv->length);
2729
2730                 if ((len + p_tlv->length) > TLV_BUFFER_SIZE) {
2731                         DP_NOTICE(p_hwfn, "TLVs has overrun the buffer size\n");
2732                         return NULL;
2733                 }
2734         } while (p_tlv->type != CHANNEL_TLV_LIST_END);
2735
2736         return NULL;
2737 }
2738
2739 static void
2740 qed_iov_vp_update_act_param(struct qed_hwfn *p_hwfn,
2741                             struct qed_sp_vport_update_params *p_data,
2742                             struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2743 {
2744         struct vfpf_vport_update_activate_tlv *p_act_tlv;
2745         u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACTIVATE;
2746
2747         p_act_tlv = (struct vfpf_vport_update_activate_tlv *)
2748                     qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2749         if (!p_act_tlv)
2750                 return;
2751
2752         p_data->update_vport_active_rx_flg = p_act_tlv->update_rx;
2753         p_data->vport_active_rx_flg = p_act_tlv->active_rx;
2754         p_data->update_vport_active_tx_flg = p_act_tlv->update_tx;
2755         p_data->vport_active_tx_flg = p_act_tlv->active_tx;
2756         *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_ACTIVATE;
2757 }
2758
2759 static void
2760 qed_iov_vp_update_vlan_param(struct qed_hwfn *p_hwfn,
2761                              struct qed_sp_vport_update_params *p_data,
2762                              struct qed_vf_info *p_vf,
2763                              struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2764 {
2765         struct vfpf_vport_update_vlan_strip_tlv *p_vlan_tlv;
2766         u16 tlv = CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP;
2767
2768         p_vlan_tlv = (struct vfpf_vport_update_vlan_strip_tlv *)
2769                      qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2770         if (!p_vlan_tlv)
2771                 return;
2772
2773         p_vf->shadow_config.inner_vlan_removal = p_vlan_tlv->remove_vlan;
2774
2775         /* Ignore the VF request if we're forcing a vlan */
2776         if (!(p_vf->configured_features & BIT(VLAN_ADDR_FORCED))) {
2777                 p_data->update_inner_vlan_removal_flg = 1;
2778                 p_data->inner_vlan_removal_flg = p_vlan_tlv->remove_vlan;
2779         }
2780
2781         *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_VLAN_STRIP;
2782 }
2783
2784 static void
2785 qed_iov_vp_update_tx_switch(struct qed_hwfn *p_hwfn,
2786                             struct qed_sp_vport_update_params *p_data,
2787                             struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2788 {
2789         struct vfpf_vport_update_tx_switch_tlv *p_tx_switch_tlv;
2790         u16 tlv = CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH;
2791
2792         p_tx_switch_tlv = (struct vfpf_vport_update_tx_switch_tlv *)
2793                           qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt,
2794                                                    tlv);
2795         if (!p_tx_switch_tlv)
2796                 return;
2797
2798         p_data->update_tx_switching_flg = 1;
2799         p_data->tx_switching_flg = p_tx_switch_tlv->tx_switching;
2800         *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_TX_SWITCH;
2801 }
2802
2803 static void
2804 qed_iov_vp_update_mcast_bin_param(struct qed_hwfn *p_hwfn,
2805                                   struct qed_sp_vport_update_params *p_data,
2806                                   struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2807 {
2808         struct vfpf_vport_update_mcast_bin_tlv *p_mcast_tlv;
2809         u16 tlv = CHANNEL_TLV_VPORT_UPDATE_MCAST;
2810
2811         p_mcast_tlv = (struct vfpf_vport_update_mcast_bin_tlv *)
2812             qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2813         if (!p_mcast_tlv)
2814                 return;
2815
2816         p_data->update_approx_mcast_flg = 1;
2817         memcpy(p_data->bins, p_mcast_tlv->bins,
2818                sizeof(u32) * ETH_MULTICAST_MAC_BINS_IN_REGS);
2819         *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_MCAST;
2820 }
2821
2822 static void
2823 qed_iov_vp_update_accept_flag(struct qed_hwfn *p_hwfn,
2824                               struct qed_sp_vport_update_params *p_data,
2825                               struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2826 {
2827         struct qed_filter_accept_flags *p_flags = &p_data->accept_flags;
2828         struct vfpf_vport_update_accept_param_tlv *p_accept_tlv;
2829         u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM;
2830
2831         p_accept_tlv = (struct vfpf_vport_update_accept_param_tlv *)
2832             qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2833         if (!p_accept_tlv)
2834                 return;
2835
2836         p_flags->update_rx_mode_config = p_accept_tlv->update_rx_mode;
2837         p_flags->rx_accept_filter = p_accept_tlv->rx_accept_filter;
2838         p_flags->update_tx_mode_config = p_accept_tlv->update_tx_mode;
2839         p_flags->tx_accept_filter = p_accept_tlv->tx_accept_filter;
2840         *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_ACCEPT_PARAM;
2841 }
2842
2843 static void
2844 qed_iov_vp_update_accept_any_vlan(struct qed_hwfn *p_hwfn,
2845                                   struct qed_sp_vport_update_params *p_data,
2846                                   struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2847 {
2848         struct vfpf_vport_update_accept_any_vlan_tlv *p_accept_any_vlan;
2849         u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN;
2850
2851         p_accept_any_vlan = (struct vfpf_vport_update_accept_any_vlan_tlv *)
2852                             qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt,
2853                                                      tlv);
2854         if (!p_accept_any_vlan)
2855                 return;
2856
2857         p_data->accept_any_vlan = p_accept_any_vlan->accept_any_vlan;
2858         p_data->update_accept_any_vlan_flg =
2859                     p_accept_any_vlan->update_accept_any_vlan_flg;
2860         *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_ACCEPT_ANY_VLAN;
2861 }
2862
2863 static void
2864 qed_iov_vp_update_rss_param(struct qed_hwfn *p_hwfn,
2865                             struct qed_vf_info *vf,
2866                             struct qed_sp_vport_update_params *p_data,
2867                             struct qed_rss_params *p_rss,
2868                             struct qed_iov_vf_mbx *p_mbx,
2869                             u16 *tlvs_mask, u16 *tlvs_accepted)
2870 {
2871         struct vfpf_vport_update_rss_tlv *p_rss_tlv;
2872         u16 tlv = CHANNEL_TLV_VPORT_UPDATE_RSS;
2873         bool b_reject = false;
2874         u16 table_size;
2875         u16 i, q_idx;
2876
2877         p_rss_tlv = (struct vfpf_vport_update_rss_tlv *)
2878                     qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2879         if (!p_rss_tlv) {
2880                 p_data->rss_params = NULL;
2881                 return;
2882         }
2883
2884         memset(p_rss, 0, sizeof(struct qed_rss_params));
2885
2886         p_rss->update_rss_config = !!(p_rss_tlv->update_rss_flags &
2887                                       VFPF_UPDATE_RSS_CONFIG_FLAG);
2888         p_rss->update_rss_capabilities = !!(p_rss_tlv->update_rss_flags &
2889                                             VFPF_UPDATE_RSS_CAPS_FLAG);
2890         p_rss->update_rss_ind_table = !!(p_rss_tlv->update_rss_flags &
2891                                          VFPF_UPDATE_RSS_IND_TABLE_FLAG);
2892         p_rss->update_rss_key = !!(p_rss_tlv->update_rss_flags &
2893                                    VFPF_UPDATE_RSS_KEY_FLAG);
2894
2895         p_rss->rss_enable = p_rss_tlv->rss_enable;
2896         p_rss->rss_eng_id = vf->relative_vf_id + 1;
2897         p_rss->rss_caps = p_rss_tlv->rss_caps;
2898         p_rss->rss_table_size_log = p_rss_tlv->rss_table_size_log;
2899         memcpy(p_rss->rss_key, p_rss_tlv->rss_key, sizeof(p_rss->rss_key));
2900
2901         table_size = min_t(u16, ARRAY_SIZE(p_rss->rss_ind_table),
2902                            (1 << p_rss_tlv->rss_table_size_log));
2903
2904         for (i = 0; i < table_size; i++) {
2905                 struct qed_queue_cid *p_cid;
2906
2907                 q_idx = p_rss_tlv->rss_ind_table[i];
2908                 if (!qed_iov_validate_rxq(p_hwfn, vf, q_idx,
2909                                           QED_IOV_VALIDATE_Q_ENABLE)) {
2910                         DP_VERBOSE(p_hwfn,
2911                                    QED_MSG_IOV,
2912                                    "VF[%d]: Omitting RSS due to wrong queue %04x\n",
2913                                    vf->relative_vf_id, q_idx);
2914                         b_reject = true;
2915                         goto out;
2916                 }
2917
2918                 p_cid = qed_iov_get_vf_rx_queue_cid(&vf->vf_queues[q_idx]);
2919                 p_rss->rss_ind_table[i] = p_cid;
2920         }
2921
2922         p_data->rss_params = p_rss;
2923 out:
2924         *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_RSS;
2925         if (!b_reject)
2926                 *tlvs_accepted |= 1 << QED_IOV_VP_UPDATE_RSS;
2927 }
2928
2929 static void
2930 qed_iov_vp_update_sge_tpa_param(struct qed_hwfn *p_hwfn,
2931                                 struct qed_vf_info *vf,
2932                                 struct qed_sp_vport_update_params *p_data,
2933                                 struct qed_sge_tpa_params *p_sge_tpa,
2934                                 struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2935 {
2936         struct vfpf_vport_update_sge_tpa_tlv *p_sge_tpa_tlv;
2937         u16 tlv = CHANNEL_TLV_VPORT_UPDATE_SGE_TPA;
2938
2939         p_sge_tpa_tlv = (struct vfpf_vport_update_sge_tpa_tlv *)
2940             qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2941
2942         if (!p_sge_tpa_tlv) {
2943                 p_data->sge_tpa_params = NULL;
2944                 return;
2945         }
2946
2947         memset(p_sge_tpa, 0, sizeof(struct qed_sge_tpa_params));
2948
2949         p_sge_tpa->update_tpa_en_flg =
2950             !!(p_sge_tpa_tlv->update_sge_tpa_flags & VFPF_UPDATE_TPA_EN_FLAG);
2951         p_sge_tpa->update_tpa_param_flg =
2952             !!(p_sge_tpa_tlv->update_sge_tpa_flags &
2953                 VFPF_UPDATE_TPA_PARAM_FLAG);
2954
2955         p_sge_tpa->tpa_ipv4_en_flg =
2956             !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_IPV4_EN_FLAG);
2957         p_sge_tpa->tpa_ipv6_en_flg =
2958             !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_IPV6_EN_FLAG);
2959         p_sge_tpa->tpa_pkt_split_flg =
2960             !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_PKT_SPLIT_FLAG);
2961         p_sge_tpa->tpa_hdr_data_split_flg =
2962             !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_HDR_DATA_SPLIT_FLAG);
2963         p_sge_tpa->tpa_gro_consistent_flg =
2964             !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_GRO_CONSIST_FLAG);
2965
2966         p_sge_tpa->tpa_max_aggs_num = p_sge_tpa_tlv->tpa_max_aggs_num;
2967         p_sge_tpa->tpa_max_size = p_sge_tpa_tlv->tpa_max_size;
2968         p_sge_tpa->tpa_min_size_to_start = p_sge_tpa_tlv->tpa_min_size_to_start;
2969         p_sge_tpa->tpa_min_size_to_cont = p_sge_tpa_tlv->tpa_min_size_to_cont;
2970         p_sge_tpa->max_buffers_per_cqe = p_sge_tpa_tlv->max_buffers_per_cqe;
2971
2972         p_data->sge_tpa_params = p_sge_tpa;
2973
2974         *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_SGE_TPA;
2975 }
2976
2977 static int qed_iov_pre_update_vport(struct qed_hwfn *hwfn,
2978                                     u8 vfid,
2979                                     struct qed_sp_vport_update_params *params,
2980                                     u16 *tlvs)
2981 {
2982         u8 mask = QED_ACCEPT_UCAST_UNMATCHED | QED_ACCEPT_MCAST_UNMATCHED;
2983         struct qed_filter_accept_flags *flags = &params->accept_flags;
2984         struct qed_public_vf_info *vf_info;
2985         u16 tlv_mask;
2986
2987         tlv_mask = BIT(QED_IOV_VP_UPDATE_ACCEPT_PARAM) |
2988                    BIT(QED_IOV_VP_UPDATE_ACCEPT_ANY_VLAN);
2989
2990         /* Untrusted VFs can't even be trusted to know that fact.
2991          * Simply indicate everything is configured fine, and trace
2992          * configuration 'behind their back'.
2993          */
2994         if (!(*tlvs & tlv_mask))
2995                 return 0;
2996
2997         vf_info = qed_iov_get_public_vf_info(hwfn, vfid, true);
2998
2999         if (flags->update_rx_mode_config) {
3000                 vf_info->rx_accept_mode = flags->rx_accept_filter;
3001                 if (!vf_info->is_trusted_configured)
3002                         flags->rx_accept_filter &= ~mask;
3003         }
3004
3005         if (flags->update_tx_mode_config) {
3006                 vf_info->tx_accept_mode = flags->tx_accept_filter;
3007                 if (!vf_info->is_trusted_configured)
3008                         flags->tx_accept_filter &= ~mask;
3009         }
3010
3011         if (params->update_accept_any_vlan_flg) {
3012                 vf_info->accept_any_vlan = params->accept_any_vlan;
3013
3014                 if (vf_info->forced_vlan && !vf_info->is_trusted_configured)
3015                         params->accept_any_vlan = false;
3016         }
3017
3018         return 0;
3019 }
3020
3021 static void qed_iov_vf_mbx_vport_update(struct qed_hwfn *p_hwfn,
3022                                         struct qed_ptt *p_ptt,
3023                                         struct qed_vf_info *vf)
3024 {
3025         struct qed_rss_params *p_rss_params = NULL;
3026         struct qed_sp_vport_update_params params;
3027         struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
3028         struct qed_sge_tpa_params sge_tpa_params;
3029         u16 tlvs_mask = 0, tlvs_accepted = 0;
3030         u8 status = PFVF_STATUS_SUCCESS;
3031         u16 length;
3032         int rc;
3033
3034         /* Valiate PF can send such a request */
3035         if (!vf->vport_instance) {
3036                 DP_VERBOSE(p_hwfn,
3037                            QED_MSG_IOV,
3038                            "No VPORT instance available for VF[%d], failing vport update\n",
3039                            vf->abs_vf_id);
3040                 status = PFVF_STATUS_FAILURE;
3041                 goto out;
3042         }
3043         p_rss_params = vzalloc(sizeof(*p_rss_params));
3044         if (p_rss_params == NULL) {
3045                 status = PFVF_STATUS_FAILURE;
3046                 goto out;
3047         }
3048
3049         memset(&params, 0, sizeof(params));
3050         params.opaque_fid = vf->opaque_fid;
3051         params.vport_id = vf->vport_id;
3052         params.rss_params = NULL;
3053
3054         /* Search for extended tlvs list and update values
3055          * from VF in struct qed_sp_vport_update_params.
3056          */
3057         qed_iov_vp_update_act_param(p_hwfn, &params, mbx, &tlvs_mask);
3058         qed_iov_vp_update_vlan_param(p_hwfn, &params, vf, mbx, &tlvs_mask);
3059         qed_iov_vp_update_tx_switch(p_hwfn, &params, mbx, &tlvs_mask);
3060         qed_iov_vp_update_mcast_bin_param(p_hwfn, &params, mbx, &tlvs_mask);
3061         qed_iov_vp_update_accept_flag(p_hwfn, &params, mbx, &tlvs_mask);
3062         qed_iov_vp_update_accept_any_vlan(p_hwfn, &params, mbx, &tlvs_mask);
3063         qed_iov_vp_update_sge_tpa_param(p_hwfn, vf, &params,
3064                                         &sge_tpa_params, mbx, &tlvs_mask);
3065
3066         tlvs_accepted = tlvs_mask;
3067
3068         /* Some of the extended TLVs need to be validated first; In that case,
3069          * they can update the mask without updating the accepted [so that
3070          * PF could communicate to VF it has rejected request].
3071          */
3072         qed_iov_vp_update_rss_param(p_hwfn, vf, &params, p_rss_params,
3073                                     mbx, &tlvs_mask, &tlvs_accepted);
3074
3075         if (qed_iov_pre_update_vport(p_hwfn, vf->relative_vf_id,
3076                                      &params, &tlvs_accepted)) {
3077                 tlvs_accepted = 0;
3078                 status = PFVF_STATUS_NOT_SUPPORTED;
3079                 goto out;
3080         }
3081
3082         if (!tlvs_accepted) {
3083                 if (tlvs_mask)
3084                         DP_VERBOSE(p_hwfn, QED_MSG_IOV,
3085                                    "Upper-layer prevents VF vport configuration\n");
3086                 else
3087                         DP_VERBOSE(p_hwfn, QED_MSG_IOV,
3088                                    "No feature tlvs found for vport update\n");
3089                 status = PFVF_STATUS_NOT_SUPPORTED;
3090                 goto out;
3091         }
3092
3093         rc = qed_sp_vport_update(p_hwfn, &params, QED_SPQ_MODE_EBLOCK, NULL);
3094
3095         if (rc)
3096                 status = PFVF_STATUS_FAILURE;
3097
3098 out:
3099         vfree(p_rss_params);
3100         length = qed_iov_prep_vp_update_resp_tlvs(p_hwfn, vf, mbx, status,
3101                                                   tlvs_mask, tlvs_accepted);
3102         qed_iov_send_response(p_hwfn, p_ptt, vf, length, status);
3103 }
3104
3105 static int qed_iov_vf_update_vlan_shadow(struct qed_hwfn *p_hwfn,
3106                                          struct qed_vf_info *p_vf,
3107                                          struct qed_filter_ucast *p_params)
3108 {
3109         int i;
3110
3111         /* First remove entries and then add new ones */
3112         if (p_params->opcode == QED_FILTER_REMOVE) {
3113                 for (i = 0; i < QED_ETH_VF_NUM_VLAN_FILTERS + 1; i++)
3114                         if (p_vf->shadow_config.vlans[i].used &&
3115                             p_vf->shadow_config.vlans[i].vid ==
3116                             p_params->vlan) {
3117                                 p_vf->shadow_config.vlans[i].used = false;
3118                                 break;
3119                         }
3120                 if (i == QED_ETH_VF_NUM_VLAN_FILTERS + 1) {
3121                         DP_VERBOSE(p_hwfn,
3122                                    QED_MSG_IOV,
3123                                    "VF [%d] - Tries to remove a non-existing vlan\n",
3124                                    p_vf->relative_vf_id);
3125                         return -EINVAL;
3126                 }
3127         } else if (p_params->opcode == QED_FILTER_REPLACE ||
3128                    p_params->opcode == QED_FILTER_FLUSH) {
3129                 for (i = 0; i < QED_ETH_VF_NUM_VLAN_FILTERS + 1; i++)
3130                         p_vf->shadow_config.vlans[i].used = false;
3131         }
3132
3133         /* In forced mode, we're willing to remove entries - but we don't add
3134          * new ones.
3135          */
3136         if (p_vf->bulletin.p_virt->valid_bitmap & BIT(VLAN_ADDR_FORCED))
3137                 return 0;
3138
3139         if (p_params->opcode == QED_FILTER_ADD ||
3140             p_params->opcode == QED_FILTER_REPLACE) {
3141                 for (i = 0; i < QED_ETH_VF_NUM_VLAN_FILTERS + 1; i++) {
3142                         if (p_vf->shadow_config.vlans[i].used)
3143                                 continue;
3144
3145                         p_vf->shadow_config.vlans[i].used = true;
3146                         p_vf->shadow_config.vlans[i].vid = p_params->vlan;
3147                         break;
3148                 }
3149
3150                 if (i == QED_ETH_VF_NUM_VLAN_FILTERS + 1) {
3151                         DP_VERBOSE(p_hwfn,
3152                                    QED_MSG_IOV,
3153                                    "VF [%d] - Tries to configure more than %d vlan filters\n",
3154                                    p_vf->relative_vf_id,
3155                                    QED_ETH_VF_NUM_VLAN_FILTERS + 1);
3156                         return -EINVAL;
3157                 }
3158         }
3159
3160         return 0;
3161 }
3162
3163 static int qed_iov_vf_update_mac_shadow(struct qed_hwfn *p_hwfn,
3164                                         struct qed_vf_info *p_vf,
3165                                         struct qed_filter_ucast *p_params)
3166 {
3167         int i;
3168
3169         /* If we're in forced-mode, we don't allow any change */
3170         if (p_vf->bulletin.p_virt->valid_bitmap & BIT(MAC_ADDR_FORCED))
3171                 return 0;
3172
3173         /* Don't keep track of shadow copy since we don't intend to restore. */
3174         if (p_vf->p_vf_info.is_trusted_configured)
3175                 return 0;
3176
3177         /* First remove entries and then add new ones */
3178         if (p_params->opcode == QED_FILTER_REMOVE) {
3179                 for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++) {
3180                         if (ether_addr_equal(p_vf->shadow_config.macs[i],
3181                                              p_params->mac)) {
3182                                 eth_zero_addr(p_vf->shadow_config.macs[i]);
3183                                 break;
3184                         }
3185                 }
3186
3187                 if (i == QED_ETH_VF_NUM_MAC_FILTERS) {
3188                         DP_VERBOSE(p_hwfn, QED_MSG_IOV,
3189                                    "MAC isn't configured\n");
3190                         return -EINVAL;
3191                 }
3192         } else if (p_params->opcode == QED_FILTER_REPLACE ||
3193                    p_params->opcode == QED_FILTER_FLUSH) {
3194                 for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++)
3195                         eth_zero_addr(p_vf->shadow_config.macs[i]);
3196         }
3197
3198         /* List the new MAC address */
3199         if (p_params->opcode != QED_FILTER_ADD &&
3200             p_params->opcode != QED_FILTER_REPLACE)
3201                 return 0;
3202
3203         for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++) {
3204                 if (is_zero_ether_addr(p_vf->shadow_config.macs[i])) {
3205                         ether_addr_copy(p_vf->shadow_config.macs[i],
3206                                         p_params->mac);
3207                         DP_VERBOSE(p_hwfn, QED_MSG_IOV,
3208                                    "Added MAC at %d entry in shadow\n", i);
3209                         break;
3210                 }
3211         }
3212
3213         if (i == QED_ETH_VF_NUM_MAC_FILTERS) {
3214                 DP_VERBOSE(p_hwfn, QED_MSG_IOV, "No available place for MAC\n");
3215                 return -EINVAL;
3216         }
3217
3218         return 0;
3219 }
3220
3221 static int
3222 qed_iov_vf_update_unicast_shadow(struct qed_hwfn *p_hwfn,
3223                                  struct qed_vf_info *p_vf,
3224                                  struct qed_filter_ucast *p_params)
3225 {
3226         int rc = 0;
3227
3228         if (p_params->type == QED_FILTER_MAC) {
3229                 rc = qed_iov_vf_update_mac_shadow(p_hwfn, p_vf, p_params);
3230                 if (rc)
3231                         return rc;
3232         }
3233
3234         if (p_params->type == QED_FILTER_VLAN)
3235                 rc = qed_iov_vf_update_vlan_shadow(p_hwfn, p_vf, p_params);
3236
3237         return rc;
3238 }
3239
3240 static int qed_iov_chk_ucast(struct qed_hwfn *hwfn,
3241                              int vfid, struct qed_filter_ucast *params)
3242 {
3243         struct qed_public_vf_info *vf;
3244
3245         vf = qed_iov_get_public_vf_info(hwfn, vfid, true);
3246         if (!vf)
3247                 return -EINVAL;
3248
3249         /* No real decision to make; Store the configured MAC */
3250         if (params->type == QED_FILTER_MAC ||
3251             params->type == QED_FILTER_MAC_VLAN) {
3252                 ether_addr_copy(vf->mac, params->mac);
3253
3254                 if (vf->is_trusted_configured) {
3255                         qed_iov_bulletin_set_mac(hwfn, vf->mac, vfid);
3256
3257                         /* Update and post bulleitin again */
3258                         qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG);
3259                 }
3260         }
3261
3262         return 0;
3263 }
3264
3265 static void qed_iov_vf_mbx_ucast_filter(struct qed_hwfn *p_hwfn,
3266                                         struct qed_ptt *p_ptt,
3267                                         struct qed_vf_info *vf)
3268 {
3269         struct qed_bulletin_content *p_bulletin = vf->bulletin.p_virt;
3270         struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
3271         struct vfpf_ucast_filter_tlv *req;
3272         u8 status = PFVF_STATUS_SUCCESS;
3273         struct qed_filter_ucast params;
3274         int rc;
3275
3276         /* Prepare the unicast filter params */
3277         memset(&params, 0, sizeof(struct qed_filter_ucast));
3278         req = &mbx->req_virt->ucast_filter;
3279         params.opcode = (enum qed_filter_opcode)req->opcode;
3280         params.type = (enum qed_filter_ucast_type)req->type;
3281
3282         params.is_rx_filter = 1;
3283         params.is_tx_filter = 1;
3284         params.vport_to_remove_from = vf->vport_id;
3285         params.vport_to_add_to = vf->vport_id;
3286         memcpy(params.mac, req->mac, ETH_ALEN);
3287         params.vlan = req->vlan;
3288
3289         DP_VERBOSE(p_hwfn,
3290                    QED_MSG_IOV,
3291                    "VF[%d]: opcode 0x%02x type 0x%02x [%s %s] [vport 0x%02x] MAC %pM, vlan 0x%04x\n",
3292                    vf->abs_vf_id, params.opcode, params.type,
3293                    params.is_rx_filter ? "RX" : "",
3294                    params.is_tx_filter ? "TX" : "",
3295                    params.vport_to_add_to,
3296                    params.mac, params.vlan);
3297
3298         if (!vf->vport_instance) {
3299                 DP_VERBOSE(p_hwfn,
3300                            QED_MSG_IOV,
3301                            "No VPORT instance available for VF[%d], failing ucast MAC configuration\n",
3302                            vf->abs_vf_id);
3303                 status = PFVF_STATUS_FAILURE;
3304                 goto out;
3305         }
3306
3307         /* Update shadow copy of the VF configuration */
3308         if (qed_iov_vf_update_unicast_shadow(p_hwfn, vf, &params)) {
3309                 status = PFVF_STATUS_FAILURE;
3310                 goto out;
3311         }
3312
3313         /* Determine if the unicast filtering is acceptible by PF */
3314         if ((p_bulletin->valid_bitmap & BIT(VLAN_ADDR_FORCED)) &&
3315             (params.type == QED_FILTER_VLAN ||
3316              params.type == QED_FILTER_MAC_VLAN)) {
3317                 /* Once VLAN is forced or PVID is set, do not allow
3318                  * to add/replace any further VLANs.
3319                  */
3320                 if (params.opcode == QED_FILTER_ADD ||
3321                     params.opcode == QED_FILTER_REPLACE)
3322                         status = PFVF_STATUS_FORCED;
3323                 goto out;
3324         }
3325
3326         if ((p_bulletin->valid_bitmap & BIT(MAC_ADDR_FORCED)) &&
3327             (params.type == QED_FILTER_MAC ||
3328              params.type == QED_FILTER_MAC_VLAN)) {
3329                 if (!ether_addr_equal(p_bulletin->mac, params.mac) ||
3330                     (params.opcode != QED_FILTER_ADD &&
3331                      params.opcode != QED_FILTER_REPLACE))
3332                         status = PFVF_STATUS_FORCED;
3333                 goto out;
3334         }
3335
3336         rc = qed_iov_chk_ucast(p_hwfn, vf->relative_vf_id, &params);
3337         if (rc) {
3338                 status = PFVF_STATUS_FAILURE;
3339                 goto out;
3340         }
3341
3342         rc = qed_sp_eth_filter_ucast(p_hwfn, vf->opaque_fid, &params,
3343                                      QED_SPQ_MODE_CB, NULL);
3344         if (rc)
3345                 status = PFVF_STATUS_FAILURE;
3346
3347 out:
3348         qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_UCAST_FILTER,
3349                              sizeof(struct pfvf_def_resp_tlv), status);
3350 }
3351
3352 static void qed_iov_vf_mbx_int_cleanup(struct qed_hwfn *p_hwfn,
3353                                        struct qed_ptt *p_ptt,
3354                                        struct qed_vf_info *vf)
3355 {
3356         int i;
3357
3358         /* Reset the SBs */
3359         for (i = 0; i < vf->num_sbs; i++)
3360                 qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt,
3361                                                 vf->igu_sbs[i],
3362                                                 vf->opaque_fid, false);
3363
3364         qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_INT_CLEANUP,
3365                              sizeof(struct pfvf_def_resp_tlv),
3366                              PFVF_STATUS_SUCCESS);
3367 }
3368
3369 static void qed_iov_vf_mbx_close(struct qed_hwfn *p_hwfn,
3370                                  struct qed_ptt *p_ptt, struct qed_vf_info *vf)
3371 {
3372         u16 length = sizeof(struct pfvf_def_resp_tlv);
3373         u8 status = PFVF_STATUS_SUCCESS;
3374
3375         /* Disable Interrupts for VF */
3376         qed_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 0);
3377
3378         /* Reset Permission table */
3379         qed_iov_config_perm_table(p_hwfn, p_ptt, vf, 0);
3380
3381         qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_CLOSE,
3382                              length, status);
3383 }
3384
3385 static void qed_iov_vf_mbx_release(struct qed_hwfn *p_hwfn,
3386                                    struct qed_ptt *p_ptt,
3387                                    struct qed_vf_info *p_vf)
3388 {
3389         u16 length = sizeof(struct pfvf_def_resp_tlv);
3390         u8 status = PFVF_STATUS_SUCCESS;
3391         int rc = 0;
3392
3393         qed_iov_vf_cleanup(p_hwfn, p_vf);
3394
3395         if (p_vf->state != VF_STOPPED && p_vf->state != VF_FREE) {
3396                 /* Stopping the VF */
3397                 rc = qed_sp_vf_stop(p_hwfn, p_vf->concrete_fid,
3398                                     p_vf->opaque_fid);
3399
3400                 if (rc) {
3401                         DP_ERR(p_hwfn, "qed_sp_vf_stop returned error %d\n",
3402                                rc);
3403                         status = PFVF_STATUS_FAILURE;
3404                 }
3405
3406                 p_vf->state = VF_STOPPED;
3407         }
3408
3409         qed_iov_prepare_resp(p_hwfn, p_ptt, p_vf, CHANNEL_TLV_RELEASE,
3410                              length, status);
3411 }
3412
3413 static void qed_iov_vf_pf_get_coalesce(struct qed_hwfn *p_hwfn,
3414                                        struct qed_ptt *p_ptt,
3415                                        struct qed_vf_info *p_vf)
3416 {
3417         struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx;
3418         struct pfvf_read_coal_resp_tlv *p_resp;
3419         struct vfpf_read_coal_req_tlv *req;
3420         u8 status = PFVF_STATUS_FAILURE;
3421         struct qed_vf_queue *p_queue;
3422         struct qed_queue_cid *p_cid;
3423         u16 coal = 0, qid, i;
3424         bool b_is_rx;
3425         int rc = 0;
3426
3427         mbx->offset = (u8 *)mbx->reply_virt;
3428         req = &mbx->req_virt->read_coal_req;
3429
3430         qid = req->qid;
3431         b_is_rx = req->is_rx ? true : false;
3432
3433         if (b_is_rx) {
3434                 if (!qed_iov_validate_rxq(p_hwfn, p_vf, qid,
3435                                           QED_IOV_VALIDATE_Q_ENABLE)) {
3436                         DP_VERBOSE(p_hwfn, QED_MSG_IOV,
3437                                    "VF[%d]: Invalid Rx queue_id = %d\n",
3438                                    p_vf->abs_vf_id, qid);
3439                         goto send_resp;
3440                 }
3441
3442                 p_cid = qed_iov_get_vf_rx_queue_cid(&p_vf->vf_queues[qid]);
3443                 rc = qed_get_rxq_coalesce(p_hwfn, p_ptt, p_cid, &coal);
3444                 if (rc)
3445                         goto send_resp;
3446         } else {
3447                 if (!qed_iov_validate_txq(p_hwfn, p_vf, qid,
3448                                           QED_IOV_VALIDATE_Q_ENABLE)) {
3449                         DP_VERBOSE(p_hwfn, QED_MSG_IOV,
3450                                    "VF[%d]: Invalid Tx queue_id = %d\n",
3451                                    p_vf->abs_vf_id, qid);
3452                         goto send_resp;
3453                 }
3454                 for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
3455                         p_queue = &p_vf->vf_queues[qid];
3456                         if ((!p_queue->cids[i].p_cid) ||
3457                             (!p_queue->cids[i].b_is_tx))
3458                                 continue;
3459
3460                         p_cid = p_queue->cids[i].p_cid;
3461
3462                         rc = qed_get_txq_coalesce(p_hwfn, p_ptt, p_cid, &coal);
3463                         if (rc)
3464                                 goto send_resp;
3465                         break;
3466                 }
3467         }
3468
3469         status = PFVF_STATUS_SUCCESS;
3470
3471 send_resp:
3472         p_resp = qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_COALESCE_READ,
3473                              sizeof(*p_resp));
3474         p_resp->coal = coal;
3475
3476         qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
3477                     sizeof(struct channel_list_end_tlv));
3478
3479         qed_iov_send_response(p_hwfn, p_ptt, p_vf, sizeof(*p_resp), status);
3480 }
3481
3482 static void qed_iov_vf_pf_set_coalesce(struct qed_hwfn *p_hwfn,
3483                                        struct qed_ptt *p_ptt,
3484                                        struct qed_vf_info *vf)
3485 {
3486         struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
3487         struct vfpf_update_coalesce *req;
3488         u8 status = PFVF_STATUS_FAILURE;
3489         struct qed_queue_cid *p_cid;
3490         u16 rx_coal, tx_coal;
3491         int rc = 0, i;
3492         u16 qid;
3493
3494         req = &mbx->req_virt->update_coalesce;
3495
3496         rx_coal = req->rx_coal;
3497         tx_coal = req->tx_coal;
3498         qid = req->qid;
3499
3500         if (!qed_iov_validate_rxq(p_hwfn, vf, qid,
3501                                   QED_IOV_VALIDATE_Q_ENABLE) && rx_coal) {
3502                 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
3503                            "VF[%d]: Invalid Rx queue_id = %d\n",
3504                            vf->abs_vf_id, qid);
3505                 goto out;
3506         }
3507
3508         if (!qed_iov_validate_txq(p_hwfn, vf, qid,
3509                                   QED_IOV_VALIDATE_Q_ENABLE) && tx_coal) {
3510                 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
3511                            "VF[%d]: Invalid Tx queue_id = %d\n",
3512                            vf->abs_vf_id, qid);
3513                 goto out;
3514         }
3515
3516         DP_VERBOSE(p_hwfn,
3517                    QED_MSG_IOV,
3518                    "VF[%d]: Setting coalesce for VF rx_coal = %d, tx_coal = %d at queue = %d\n",
3519                    vf->abs_vf_id, rx_coal, tx_coal, qid);
3520
3521         if (rx_coal) {
3522                 p_cid = qed_iov_get_vf_rx_queue_cid(&vf->vf_queues[qid]);
3523
3524                 rc = qed_set_rxq_coalesce(p_hwfn, p_ptt, rx_coal, p_cid);
3525                 if (rc) {
3526                         DP_VERBOSE(p_hwfn,
3527                                    QED_MSG_IOV,
3528                                    "VF[%d]: Unable to set rx queue = %d coalesce\n",
3529                                    vf->abs_vf_id, vf->vf_queues[qid].fw_rx_qid);
3530                         goto out;
3531                 }
3532                 vf->rx_coal = rx_coal;
3533         }
3534
3535         if (tx_coal) {
3536                 struct qed_vf_queue *p_queue = &vf->vf_queues[qid];
3537
3538                 for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
3539                         if (!p_queue->cids[i].p_cid)
3540                                 continue;
3541
3542                         if (!p_queue->cids[i].b_is_tx)
3543                                 continue;
3544
3545                         rc = qed_set_txq_coalesce(p_hwfn, p_ptt, tx_coal,
3546                                                   p_queue->cids[i].p_cid);
3547
3548                         if (rc) {
3549                                 DP_VERBOSE(p_hwfn,
3550                                            QED_MSG_IOV,
3551                                            "VF[%d]: Unable to set tx queue coalesce\n",
3552                                            vf->abs_vf_id);
3553                                 goto out;
3554                         }
3555                 }
3556                 vf->tx_coal = tx_coal;
3557         }
3558
3559         status = PFVF_STATUS_SUCCESS;
3560 out:
3561         qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_COALESCE_UPDATE,
3562                              sizeof(struct pfvf_def_resp_tlv), status);
3563 }
3564 static int
3565 qed_iov_vf_flr_poll_dorq(struct qed_hwfn *p_hwfn,
3566                          struct qed_vf_info *p_vf, struct qed_ptt *p_ptt)
3567 {
3568         int cnt;
3569         u32 val;
3570
3571         qed_fid_pretend(p_hwfn, p_ptt, (u16) p_vf->concrete_fid);
3572
3573         for (cnt = 0; cnt < 50; cnt++) {
3574                 val = qed_rd(p_hwfn, p_ptt, DORQ_REG_VF_USAGE_CNT);
3575                 if (!val)
3576                         break;
3577                 msleep(20);
3578         }
3579         qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid);
3580
3581         if (cnt == 50) {
3582                 DP_ERR(p_hwfn,
3583                        "VF[%d] - dorq failed to cleanup [usage 0x%08x]\n",
3584                        p_vf->abs_vf_id, val);
3585                 return -EBUSY;
3586         }
3587
3588         return 0;
3589 }
3590
3591 static int
3592 qed_iov_vf_flr_poll_pbf(struct qed_hwfn *p_hwfn,
3593                         struct qed_vf_info *p_vf, struct qed_ptt *p_ptt)
3594 {
3595         u32 cons[MAX_NUM_VOQS_E4], distance[MAX_NUM_VOQS_E4];
3596         int i, cnt;
3597
3598         /* Read initial consumers & producers */
3599         for (i = 0; i < MAX_NUM_VOQS_E4; i++) {
3600                 u32 prod;
3601
3602                 cons[i] = qed_rd(p_hwfn, p_ptt,
3603                                  PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 +
3604                                  i * 0x40);
3605                 prod = qed_rd(p_hwfn, p_ptt,
3606                               PBF_REG_NUM_BLOCKS_ALLOCATED_PROD_VOQ0 +
3607                               i * 0x40);
3608                 distance[i] = prod - cons[i];
3609         }
3610
3611         /* Wait for consumers to pass the producers */
3612         i = 0;
3613         for (cnt = 0; cnt < 50; cnt++) {
3614                 for (; i < MAX_NUM_VOQS_E4; i++) {
3615                         u32 tmp;
3616
3617                         tmp = qed_rd(p_hwfn, p_ptt,
3618                                      PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 +
3619                                      i * 0x40);
3620                         if (distance[i] > tmp - cons[i])
3621                                 break;
3622                 }
3623
3624                 if (i == MAX_NUM_VOQS_E4)
3625                         break;
3626
3627                 msleep(20);
3628         }
3629
3630         if (cnt == 50) {
3631                 DP_ERR(p_hwfn, "VF[%d] - pbf polling failed on VOQ %d\n",
3632                        p_vf->abs_vf_id, i);
3633                 return -EBUSY;
3634         }
3635
3636         return 0;
3637 }
3638
3639 static int qed_iov_vf_flr_poll(struct qed_hwfn *p_hwfn,
3640                                struct qed_vf_info *p_vf, struct qed_ptt *p_ptt)
3641 {
3642         int rc;
3643
3644         rc = qed_iov_vf_flr_poll_dorq(p_hwfn, p_vf, p_ptt);
3645         if (rc)
3646                 return rc;
3647
3648         rc = qed_iov_vf_flr_poll_pbf(p_hwfn, p_vf, p_ptt);
3649         if (rc)
3650                 return rc;
3651
3652         return 0;
3653 }
3654
3655 static int
3656 qed_iov_execute_vf_flr_cleanup(struct qed_hwfn *p_hwfn,
3657                                struct qed_ptt *p_ptt,
3658                                u16 rel_vf_id, u32 *ack_vfs)
3659 {
3660         struct qed_vf_info *p_vf;
3661         int rc = 0;
3662
3663         p_vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, false);
3664         if (!p_vf)
3665                 return 0;
3666
3667         if (p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &
3668             (1ULL << (rel_vf_id % 64))) {
3669                 u16 vfid = p_vf->abs_vf_id;
3670
3671                 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
3672                            "VF[%d] - Handling FLR\n", vfid);
3673
3674                 qed_iov_vf_cleanup(p_hwfn, p_vf);
3675
3676                 /* If VF isn't active, no need for anything but SW */
3677                 if (!p_vf->b_init)
3678                         goto cleanup;
3679
3680                 rc = qed_iov_vf_flr_poll(p_hwfn, p_vf, p_ptt);
3681                 if (rc)
3682                         goto cleanup;
3683
3684                 rc = qed_final_cleanup(p_hwfn, p_ptt, vfid, true);
3685                 if (rc) {
3686                         DP_ERR(p_hwfn, "Failed handle FLR of VF[%d]\n", vfid);
3687                         return rc;
3688                 }
3689
3690                 /* Workaround to make VF-PF channel ready, as FW
3691                  * doesn't do that as a part of FLR.
3692                  */
3693                 REG_WR(p_hwfn,
3694                        GTT_BAR0_MAP_REG_USDM_RAM +
3695                        USTORM_VF_PF_CHANNEL_READY_OFFSET(vfid), 1);
3696
3697                 /* VF_STOPPED has to be set only after final cleanup
3698                  * but prior to re-enabling the VF.
3699                  */
3700                 p_vf->state = VF_STOPPED;
3701
3702                 rc = qed_iov_enable_vf_access(p_hwfn, p_ptt, p_vf);
3703                 if (rc) {
3704                         DP_ERR(p_hwfn, "Failed to re-enable VF[%d] acces\n",
3705                                vfid);
3706                         return rc;
3707                 }
3708 cleanup:
3709                 /* Mark VF for ack and clean pending state */
3710                 if (p_vf->state == VF_RESET)
3711                         p_vf->state = VF_STOPPED;
3712                 ack_vfs[vfid / 32] |= BIT((vfid % 32));
3713                 p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &=
3714                     ~(1ULL << (rel_vf_id % 64));
3715                 p_vf->vf_mbx.b_pending_msg = false;
3716         }
3717
3718         return rc;
3719 }
3720
3721 static int
3722 qed_iov_vf_flr_cleanup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
3723 {
3724         u32 ack_vfs[VF_MAX_STATIC / 32];
3725         int rc = 0;
3726         u16 i;
3727
3728         memset(ack_vfs, 0, sizeof(u32) * (VF_MAX_STATIC / 32));
3729
3730         /* Since BRB <-> PRS interface can't be tested as part of the flr
3731          * polling due to HW limitations, simply sleep a bit. And since
3732          * there's no need to wait per-vf, do it before looping.
3733          */
3734         msleep(100);
3735
3736         for (i = 0; i < p_hwfn->cdev->p_iov_info->total_vfs; i++)
3737                 qed_iov_execute_vf_flr_cleanup(p_hwfn, p_ptt, i, ack_vfs);
3738
3739         rc = qed_mcp_ack_vf_flr(p_hwfn, p_ptt, ack_vfs);
3740         return rc;
3741 }
3742
3743 bool qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, u32 *p_disabled_vfs)
3744 {
3745         bool found = false;
3746         u16 i;
3747
3748         DP_VERBOSE(p_hwfn, QED_MSG_IOV, "Marking FLR-ed VFs\n");
3749         for (i = 0; i < (VF_MAX_STATIC / 32); i++)
3750                 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
3751                            "[%08x,...,%08x]: %08x\n",
3752                            i * 32, (i + 1) * 32 - 1, p_disabled_vfs[i]);
3753
3754         if (!p_hwfn->cdev->p_iov_info) {
3755                 DP_NOTICE(p_hwfn, "VF flr but no IOV\n");
3756                 return false;
3757         }
3758
3759         /* Mark VFs */
3760         for (i = 0; i < p_hwfn->cdev->p_iov_info->total_vfs; i++) {
3761                 struct qed_vf_info *p_vf;
3762                 u8 vfid;
3763
3764                 p_vf = qed_iov_get_vf_info(p_hwfn, i, false);
3765                 if (!p_vf)
3766                         continue;
3767
3768                 vfid = p_vf->abs_vf_id;
3769                 if (BIT((vfid % 32)) & p_disabled_vfs[vfid / 32]) {
3770                         u64 *p_flr = p_hwfn->pf_iov_info->pending_flr;
3771                         u16 rel_vf_id = p_vf->relative_vf_id;
3772
3773                         DP_VERBOSE(p_hwfn, QED_MSG_IOV,
3774                                    "VF[%d] [rel %d] got FLR-ed\n",
3775                                    vfid, rel_vf_id);
3776
3777                         p_vf->state = VF_RESET;
3778
3779                         /* No need to lock here, since pending_flr should
3780                          * only change here and before ACKing MFw. Since
3781                          * MFW will not trigger an additional attention for
3782                          * VF flr until ACKs, we're safe.
3783                          */
3784                         p_flr[rel_vf_id / 64] |= 1ULL << (rel_vf_id % 64);
3785                         found = true;
3786                 }
3787         }
3788
3789         return found;
3790 }
3791
3792 static int qed_iov_get_link(struct qed_hwfn *p_hwfn,
3793                             u16 vfid,
3794                             struct qed_mcp_link_params *p_params,
3795                             struct qed_mcp_link_state *p_link,
3796                             struct qed_mcp_link_capabilities *p_caps)
3797 {
3798         struct qed_vf_info *p_vf = qed_iov_get_vf_info(p_hwfn,
3799                                                        vfid,
3800                                                        false);
3801         struct qed_bulletin_content *p_bulletin;
3802
3803         if (!p_vf)
3804                 return -EINVAL;
3805
3806         p_bulletin = p_vf->bulletin.p_virt;
3807
3808         if (p_params)
3809                 __qed_vf_get_link_params(p_hwfn, p_params, p_bulletin);
3810         if (p_link)
3811                 __qed_vf_get_link_state(p_hwfn, p_link, p_bulletin);
3812         if (p_caps)
3813                 __qed_vf_get_link_caps(p_hwfn, p_caps, p_bulletin);
3814         return 0;
3815 }
3816
3817 static int
3818 qed_iov_vf_pf_bulletin_update_mac(struct qed_hwfn *p_hwfn,
3819                                   struct qed_ptt *p_ptt,
3820                                   struct qed_vf_info *p_vf)
3821 {
3822         struct qed_bulletin_content *p_bulletin = p_vf->bulletin.p_virt;
3823         struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx;
3824         struct vfpf_bulletin_update_mac_tlv *p_req;
3825         u8 status = PFVF_STATUS_SUCCESS;
3826         int rc = 0;
3827
3828         if (!p_vf->p_vf_info.is_trusted_configured) {
3829                 DP_VERBOSE(p_hwfn,
3830                            QED_MSG_IOV,
3831                            "Blocking bulletin update request from untrusted VF[%d]\n",
3832                            p_vf->abs_vf_id);
3833                 status = PFVF_STATUS_NOT_SUPPORTED;
3834                 rc = -EINVAL;
3835                 goto send_status;
3836         }
3837
3838         p_req = &mbx->req_virt->bulletin_update_mac;
3839         ether_addr_copy(p_bulletin->mac, p_req->mac);
3840         DP_VERBOSE(p_hwfn, QED_MSG_IOV,
3841                    "Updated bulletin of VF[%d] with requested MAC[%pM]\n",
3842                    p_vf->abs_vf_id, p_req->mac);
3843
3844 send_status:
3845         qed_iov_prepare_resp(p_hwfn, p_ptt, p_vf,
3846                              CHANNEL_TLV_BULLETIN_UPDATE_MAC,
3847                              sizeof(struct pfvf_def_resp_tlv), status);
3848         return rc;
3849 }
3850
3851 static void qed_iov_process_mbx_req(struct qed_hwfn *p_hwfn,
3852                                     struct qed_ptt *p_ptt, int vfid)
3853 {
3854         struct qed_iov_vf_mbx *mbx;
3855         struct qed_vf_info *p_vf;
3856
3857         p_vf = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
3858         if (!p_vf)
3859                 return;
3860
3861         mbx = &p_vf->vf_mbx;
3862
3863         /* qed_iov_process_mbx_request */
3864         if (!mbx->b_pending_msg) {
3865                 DP_NOTICE(p_hwfn,
3866                           "VF[%02x]: Trying to process mailbox message when none is pending\n",
3867                           p_vf->abs_vf_id);
3868                 return;
3869         }
3870         mbx->b_pending_msg = false;
3871
3872         mbx->first_tlv = mbx->req_virt->first_tlv;
3873
3874         DP_VERBOSE(p_hwfn, QED_MSG_IOV,
3875                    "VF[%02x]: Processing mailbox message [type %04x]\n",
3876                    p_vf->abs_vf_id, mbx->first_tlv.tl.type);
3877
3878         /* check if tlv type is known */
3879         if (qed_iov_tlv_supported(mbx->first_tlv.tl.type) &&
3880             !p_vf->b_malicious) {
3881                 switch (mbx->first_tlv.tl.type) {
3882                 case CHANNEL_TLV_ACQUIRE:
3883                         qed_iov_vf_mbx_acquire(p_hwfn, p_ptt, p_vf);
3884                         break;
3885                 case CHANNEL_TLV_VPORT_START:
3886                         qed_iov_vf_mbx_start_vport(p_hwfn, p_ptt, p_vf);
3887                         break;
3888                 case CHANNEL_TLV_VPORT_TEARDOWN:
3889                         qed_iov_vf_mbx_stop_vport(p_hwfn, p_ptt, p_vf);
3890                         break;
3891                 case CHANNEL_TLV_START_RXQ:
3892                         qed_iov_vf_mbx_start_rxq(p_hwfn, p_ptt, p_vf);
3893                         break;
3894                 case CHANNEL_TLV_START_TXQ:
3895                         qed_iov_vf_mbx_start_txq(p_hwfn, p_ptt, p_vf);
3896                         break;
3897                 case CHANNEL_TLV_STOP_RXQS:
3898                         qed_iov_vf_mbx_stop_rxqs(p_hwfn, p_ptt, p_vf);
3899                         break;
3900                 case CHANNEL_TLV_STOP_TXQS:
3901                         qed_iov_vf_mbx_stop_txqs(p_hwfn, p_ptt, p_vf);
3902                         break;
3903                 case CHANNEL_TLV_UPDATE_RXQ:
3904                         qed_iov_vf_mbx_update_rxqs(p_hwfn, p_ptt, p_vf);
3905                         break;
3906                 case CHANNEL_TLV_VPORT_UPDATE:
3907                         qed_iov_vf_mbx_vport_update(p_hwfn, p_ptt, p_vf);
3908                         break;
3909                 case CHANNEL_TLV_UCAST_FILTER:
3910                         qed_iov_vf_mbx_ucast_filter(p_hwfn, p_ptt, p_vf);
3911                         break;
3912                 case CHANNEL_TLV_CLOSE:
3913                         qed_iov_vf_mbx_close(p_hwfn, p_ptt, p_vf);
3914                         break;
3915                 case CHANNEL_TLV_INT_CLEANUP:
3916                         qed_iov_vf_mbx_int_cleanup(p_hwfn, p_ptt, p_vf);
3917                         break;
3918                 case CHANNEL_TLV_RELEASE:
3919                         qed_iov_vf_mbx_release(p_hwfn, p_ptt, p_vf);
3920                         break;
3921                 case CHANNEL_TLV_UPDATE_TUNN_PARAM:
3922                         qed_iov_vf_mbx_update_tunn_param(p_hwfn, p_ptt, p_vf);
3923                         break;
3924                 case CHANNEL_TLV_COALESCE_UPDATE:
3925                         qed_iov_vf_pf_set_coalesce(p_hwfn, p_ptt, p_vf);
3926                         break;
3927                 case CHANNEL_TLV_COALESCE_READ:
3928                         qed_iov_vf_pf_get_coalesce(p_hwfn, p_ptt, p_vf);
3929                         break;
3930                 case CHANNEL_TLV_BULLETIN_UPDATE_MAC:
3931                         qed_iov_vf_pf_bulletin_update_mac(p_hwfn, p_ptt, p_vf);
3932                         break;
3933                 }
3934         } else if (qed_iov_tlv_supported(mbx->first_tlv.tl.type)) {
3935                 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
3936                            "VF [%02x] - considered malicious; Ignoring TLV [%04x]\n",
3937                            p_vf->abs_vf_id, mbx->first_tlv.tl.type);
3938
3939                 qed_iov_prepare_resp(p_hwfn, p_ptt, p_vf,
3940                                      mbx->first_tlv.tl.type,
3941                                      sizeof(struct pfvf_def_resp_tlv),
3942                                      PFVF_STATUS_MALICIOUS);
3943         } else {
3944                 /* unknown TLV - this may belong to a VF driver from the future
3945                  * - a version written after this PF driver was written, which
3946                  * supports features unknown as of yet. Too bad since we don't
3947                  * support them. Or this may be because someone wrote a crappy
3948                  * VF driver and is sending garbage over the channel.
3949                  */
3950                 DP_NOTICE(p_hwfn,
3951                           "VF[%02x]: unknown TLV. type %04x length %04x padding %08x reply address %llu\n",
3952                           p_vf->abs_vf_id,
3953                           mbx->first_tlv.tl.type,
3954                           mbx->first_tlv.tl.length,
3955                           mbx->first_tlv.padding, mbx->first_tlv.reply_address);
3956
3957                 /* Try replying in case reply address matches the acquisition's
3958                  * posted address.
3959                  */
3960                 if (p_vf->acquire.first_tlv.reply_address &&
3961                     (mbx->first_tlv.reply_address ==
3962                      p_vf->acquire.first_tlv.reply_address)) {
3963                         qed_iov_prepare_resp(p_hwfn, p_ptt, p_vf,
3964                                              mbx->first_tlv.tl.type,
3965                                              sizeof(struct pfvf_def_resp_tlv),
3966                                              PFVF_STATUS_NOT_SUPPORTED);
3967                 } else {
3968                         DP_VERBOSE(p_hwfn,
3969                                    QED_MSG_IOV,
3970                                    "VF[%02x]: Can't respond to TLV - no valid reply address\n",
3971                                    p_vf->abs_vf_id);
3972                 }
3973         }
3974 }
3975
3976 static void qed_iov_pf_get_pending_events(struct qed_hwfn *p_hwfn, u64 *events)
3977 {
3978         int i;
3979
3980         memset(events, 0, sizeof(u64) * QED_VF_ARRAY_LENGTH);
3981
3982         qed_for_each_vf(p_hwfn, i) {
3983                 struct qed_vf_info *p_vf;
3984
3985                 p_vf = &p_hwfn->pf_iov_info->vfs_array[i];
3986                 if (p_vf->vf_mbx.b_pending_msg)
3987                         events[i / 64] |= 1ULL << (i % 64);
3988         }
3989 }
3990
3991 static struct qed_vf_info *qed_sriov_get_vf_from_absid(struct qed_hwfn *p_hwfn,
3992                                                        u16 abs_vfid)
3993 {
3994         u8 min = (u8) p_hwfn->cdev->p_iov_info->first_vf_in_pf;
3995
3996         if (!_qed_iov_pf_sanity_check(p_hwfn, (int)abs_vfid - min, false)) {
3997                 DP_VERBOSE(p_hwfn,
3998                            QED_MSG_IOV,
3999                            "Got indication for VF [abs 0x%08x] that cannot be handled by PF\n",
4000                            abs_vfid);
4001                 return NULL;
4002         }
4003
4004         return &p_hwfn->pf_iov_info->vfs_array[(u8) abs_vfid - min];
4005 }
4006
4007 static int qed_sriov_vfpf_msg(struct qed_hwfn *p_hwfn,
4008                               u16 abs_vfid, struct regpair *vf_msg)
4009 {
4010         struct qed_vf_info *p_vf = qed_sriov_get_vf_from_absid(p_hwfn,
4011                            abs_vfid);
4012
4013         if (!p_vf)
4014                 return 0;
4015
4016         /* List the physical address of the request so that handler
4017          * could later on copy the message from it.
4018          */
4019         p_vf->vf_mbx.pending_req = HILO_64(vf_msg->hi, vf_msg->lo);
4020
4021         /* Mark the event and schedule the workqueue */
4022         p_vf->vf_mbx.b_pending_msg = true;
4023         qed_schedule_iov(p_hwfn, QED_IOV_WQ_MSG_FLAG);
4024
4025         return 0;
4026 }
4027
4028 static void qed_sriov_vfpf_malicious(struct qed_hwfn *p_hwfn,
4029                                      struct malicious_vf_eqe_data *p_data)
4030 {
4031         struct qed_vf_info *p_vf;
4032
4033         p_vf = qed_sriov_get_vf_from_absid(p_hwfn, p_data->vf_id);
4034
4035         if (!p_vf)
4036                 return;
4037
4038         if (!p_vf->b_malicious) {
4039                 DP_NOTICE(p_hwfn,
4040                           "VF [%d] - Malicious behavior [%02x]\n",
4041                           p_vf->abs_vf_id, p_data->err_id);
4042
4043                 p_vf->b_malicious = true;
4044         } else {
4045                 DP_INFO(p_hwfn,
4046                         "VF [%d] - Malicious behavior [%02x]\n",
4047                         p_vf->abs_vf_id, p_data->err_id);
4048         }
4049 }
4050
4051 static int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn, u8 opcode, __le16 echo,
4052                                union event_ring_data *data, u8 fw_return_code)
4053 {
4054         switch (opcode) {
4055         case COMMON_EVENT_VF_PF_CHANNEL:
4056                 return qed_sriov_vfpf_msg(p_hwfn, le16_to_cpu(echo),
4057                                           &data->vf_pf_channel.msg_addr);
4058         case COMMON_EVENT_MALICIOUS_VF:
4059                 qed_sriov_vfpf_malicious(p_hwfn, &data->malicious_vf);
4060                 return 0;
4061         default:
4062                 DP_INFO(p_hwfn->cdev, "Unknown sriov eqe event 0x%02x\n",
4063                         opcode);
4064                 return -EINVAL;
4065         }
4066 }
4067
4068 u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn, u16 rel_vf_id)
4069 {
4070         struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info;
4071         u16 i;
4072
4073         if (!p_iov)
4074                 goto out;
4075
4076         for (i = rel_vf_id; i < p_iov->total_vfs; i++)
4077                 if (qed_iov_is_valid_vfid(p_hwfn, rel_vf_id, true, false))
4078                         return i;
4079
4080 out:
4081         return MAX_NUM_VFS;
4082 }
4083
4084 static int qed_iov_copy_vf_msg(struct qed_hwfn *p_hwfn, struct qed_ptt *ptt,
4085                                int vfid)
4086 {
4087         struct qed_dmae_params params;
4088         struct qed_vf_info *vf_info;
4089
4090         vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
4091         if (!vf_info)
4092                 return -EINVAL;
4093
4094         memset(&params, 0, sizeof(params));
4095         SET_FIELD(params.flags, QED_DMAE_PARAMS_SRC_VF_VALID, 0x1);
4096         SET_FIELD(params.flags, QED_DMAE_PARAMS_COMPLETION_DST, 0x1);
4097         params.src_vfid = vf_info->abs_vf_id;
4098
4099         if (qed_dmae_host2host(p_hwfn, ptt,
4100                                vf_info->vf_mbx.pending_req,
4101                                vf_info->vf_mbx.req_phys,
4102                                sizeof(union vfpf_tlvs) / 4, &params)) {
4103                 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
4104                            "Failed to copy message from VF 0x%02x\n", vfid);
4105
4106                 return -EIO;
4107         }
4108
4109         return 0;
4110 }
4111
4112 static void qed_iov_bulletin_set_forced_mac(struct qed_hwfn *p_hwfn,
4113                                             u8 *mac, int vfid)
4114 {
4115         struct qed_vf_info *vf_info;
4116         u64 feature;
4117
4118         vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4119         if (!vf_info) {
4120                 DP_NOTICE(p_hwfn->cdev,
4121                           "Can not set forced MAC, invalid vfid [%d]\n", vfid);
4122                 return;
4123         }
4124
4125         if (vf_info->b_malicious) {
4126                 DP_NOTICE(p_hwfn->cdev,
4127                           "Can't set forced MAC to malicious VF [%d]\n", vfid);
4128                 return;
4129         }
4130
4131         if (vf_info->p_vf_info.is_trusted_configured) {
4132                 feature = BIT(VFPF_BULLETIN_MAC_ADDR);
4133                 /* Trust mode will disable Forced MAC */
4134                 vf_info->bulletin.p_virt->valid_bitmap &=
4135                         ~BIT(MAC_ADDR_FORCED);
4136         } else {
4137                 feature = BIT(MAC_ADDR_FORCED);
4138                 /* Forced MAC will disable MAC_ADDR */
4139                 vf_info->bulletin.p_virt->valid_bitmap &=
4140                         ~BIT(VFPF_BULLETIN_MAC_ADDR);
4141         }
4142
4143         memcpy(vf_info->bulletin.p_virt->mac, mac, ETH_ALEN);
4144
4145         vf_info->bulletin.p_virt->valid_bitmap |= feature;
4146
4147         qed_iov_configure_vport_forced(p_hwfn, vf_info, feature);
4148 }
4149
4150 static int qed_iov_bulletin_set_mac(struct qed_hwfn *p_hwfn, u8 *mac, int vfid)
4151 {
4152         struct qed_vf_info *vf_info;
4153         u64 feature;
4154
4155         vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4156         if (!vf_info) {
4157                 DP_NOTICE(p_hwfn->cdev, "Can not set MAC, invalid vfid [%d]\n",
4158                           vfid);
4159                 return -EINVAL;
4160         }
4161
4162         if (vf_info->b_malicious) {
4163                 DP_NOTICE(p_hwfn->cdev, "Can't set MAC to malicious VF [%d]\n",
4164                           vfid);
4165                 return -EINVAL;
4166         }
4167
4168         if (vf_info->bulletin.p_virt->valid_bitmap & BIT(MAC_ADDR_FORCED)) {
4169                 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
4170                            "Can not set MAC, Forced MAC is configured\n");
4171                 return -EINVAL;
4172         }
4173
4174         feature = BIT(VFPF_BULLETIN_MAC_ADDR);
4175         ether_addr_copy(vf_info->bulletin.p_virt->mac, mac);
4176
4177         vf_info->bulletin.p_virt->valid_bitmap |= feature;
4178
4179         if (vf_info->p_vf_info.is_trusted_configured)
4180                 qed_iov_configure_vport_forced(p_hwfn, vf_info, feature);
4181
4182         return 0;
4183 }
4184
4185 static void qed_iov_bulletin_set_forced_vlan(struct qed_hwfn *p_hwfn,
4186                                              u16 pvid, int vfid)
4187 {
4188         struct qed_vf_info *vf_info;
4189         u64 feature;
4190
4191         vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
4192         if (!vf_info) {
4193                 DP_NOTICE(p_hwfn->cdev,
4194                           "Can not set forced MAC, invalid vfid [%d]\n", vfid);
4195                 return;
4196         }
4197
4198         if (vf_info->b_malicious) {
4199                 DP_NOTICE(p_hwfn->cdev,
4200                           "Can't set forced vlan to malicious VF [%d]\n", vfid);
4201                 return;
4202         }
4203
4204         feature = 1 << VLAN_ADDR_FORCED;
4205         vf_info->bulletin.p_virt->pvid = pvid;
4206         if (pvid)
4207                 vf_info->bulletin.p_virt->valid_bitmap |= feature;
4208         else
4209                 vf_info->bulletin.p_virt->valid_bitmap &= ~feature;
4210
4211         qed_iov_configure_vport_forced(p_hwfn, vf_info, feature);
4212 }
4213
4214 void qed_iov_bulletin_set_udp_ports(struct qed_hwfn *p_hwfn,
4215                                     int vfid, u16 vxlan_port, u16 geneve_port)
4216 {
4217         struct qed_vf_info *vf_info;
4218
4219         vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4220         if (!vf_info) {
4221                 DP_NOTICE(p_hwfn->cdev,
4222                           "Can not set udp ports, invalid vfid [%d]\n", vfid);
4223                 return;
4224         }
4225
4226         if (vf_info->b_malicious) {
4227                 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
4228                            "Can not set udp ports to malicious VF [%d]\n",
4229                            vfid);
4230                 return;
4231         }
4232
4233         vf_info->bulletin.p_virt->vxlan_udp_port = vxlan_port;
4234         vf_info->bulletin.p_virt->geneve_udp_port = geneve_port;
4235 }
4236
4237 static bool qed_iov_vf_has_vport_instance(struct qed_hwfn *p_hwfn, int vfid)
4238 {
4239         struct qed_vf_info *p_vf_info;
4240
4241         p_vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
4242         if (!p_vf_info)
4243                 return false;
4244
4245         return !!p_vf_info->vport_instance;
4246 }
4247
4248 static bool qed_iov_is_vf_stopped(struct qed_hwfn *p_hwfn, int vfid)
4249 {
4250         struct qed_vf_info *p_vf_info;
4251
4252         p_vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
4253         if (!p_vf_info)
4254                 return true;
4255
4256         return p_vf_info->state == VF_STOPPED;
4257 }
4258
4259 static bool qed_iov_spoofchk_get(struct qed_hwfn *p_hwfn, int vfid)
4260 {
4261         struct qed_vf_info *vf_info;
4262
4263         vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
4264         if (!vf_info)
4265                 return false;
4266
4267         return vf_info->spoof_chk;
4268 }
4269
4270 static int qed_iov_spoofchk_set(struct qed_hwfn *p_hwfn, int vfid, bool val)
4271 {
4272         struct qed_vf_info *vf;
4273         int rc = -EINVAL;
4274
4275         if (!qed_iov_pf_sanity_check(p_hwfn, vfid)) {
4276                 DP_NOTICE(p_hwfn,
4277                           "SR-IOV sanity check failed, can't set spoofchk\n");
4278                 goto out;
4279         }
4280
4281         vf = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
4282         if (!vf)
4283                 goto out;
4284
4285         if (!qed_iov_vf_has_vport_instance(p_hwfn, vfid)) {
4286                 /* After VF VPORT start PF will configure spoof check */
4287                 vf->req_spoofchk_val = val;
4288                 rc = 0;
4289                 goto out;
4290         }
4291
4292         rc = __qed_iov_spoofchk_set(p_hwfn, vf, val);
4293
4294 out:
4295         return rc;
4296 }
4297
4298 static u8 *qed_iov_bulletin_get_mac(struct qed_hwfn *p_hwfn, u16 rel_vf_id)
4299 {
4300         struct qed_vf_info *p_vf;
4301
4302         p_vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4303         if (!p_vf || !p_vf->bulletin.p_virt)
4304                 return NULL;
4305
4306         if (!(p_vf->bulletin.p_virt->valid_bitmap &
4307               BIT(VFPF_BULLETIN_MAC_ADDR)))
4308                 return NULL;
4309
4310         return p_vf->bulletin.p_virt->mac;
4311 }
4312
4313 static u8 *qed_iov_bulletin_get_forced_mac(struct qed_hwfn *p_hwfn,
4314                                            u16 rel_vf_id)
4315 {
4316         struct qed_vf_info *p_vf;
4317
4318         p_vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4319         if (!p_vf || !p_vf->bulletin.p_virt)
4320                 return NULL;
4321
4322         if (!(p_vf->bulletin.p_virt->valid_bitmap & BIT(MAC_ADDR_FORCED)))
4323                 return NULL;
4324
4325         return p_vf->bulletin.p_virt->mac;
4326 }
4327
4328 static u16
4329 qed_iov_bulletin_get_forced_vlan(struct qed_hwfn *p_hwfn, u16 rel_vf_id)
4330 {
4331         struct qed_vf_info *p_vf;
4332
4333         p_vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4334         if (!p_vf || !p_vf->bulletin.p_virt)
4335                 return 0;
4336
4337         if (!(p_vf->bulletin.p_virt->valid_bitmap & BIT(VLAN_ADDR_FORCED)))
4338                 return 0;
4339
4340         return p_vf->bulletin.p_virt->pvid;
4341 }
4342
4343 static int qed_iov_configure_tx_rate(struct qed_hwfn *p_hwfn,
4344                                      struct qed_ptt *p_ptt, int vfid, int val)
4345 {
4346         struct qed_vf_info *vf;
4347         u8 abs_vp_id = 0;
4348         u16 rl_id;
4349         int rc;
4350
4351         vf = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4352         if (!vf)
4353                 return -EINVAL;
4354
4355         rc = qed_fw_vport(p_hwfn, vf->vport_id, &abs_vp_id);
4356         if (rc)
4357                 return rc;
4358
4359         rl_id = abs_vp_id;      /* The "rl_id" is set as the "vport_id" */
4360         return qed_init_global_rl(p_hwfn, p_ptt, rl_id, (u32)val);
4361 }
4362
4363 static int
4364 qed_iov_configure_min_tx_rate(struct qed_dev *cdev, int vfid, u32 rate)
4365 {
4366         struct qed_vf_info *vf;
4367         u8 vport_id;
4368         int i;
4369
4370         for_each_hwfn(cdev, i) {
4371                 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
4372
4373                 if (!qed_iov_pf_sanity_check(p_hwfn, vfid)) {
4374                         DP_NOTICE(p_hwfn,
4375                                   "SR-IOV sanity check failed, can't set min rate\n");
4376                         return -EINVAL;
4377                 }
4378         }
4379
4380         vf = qed_iov_get_vf_info(QED_LEADING_HWFN(cdev), (u16)vfid, true);
4381         if (!vf)
4382                 return -EINVAL;
4383
4384         vport_id = vf->vport_id;
4385
4386         return qed_configure_vport_wfq(cdev, vport_id, rate);
4387 }
4388
4389 static int qed_iov_get_vf_min_rate(struct qed_hwfn *p_hwfn, int vfid)
4390 {
4391         struct qed_wfq_data *vf_vp_wfq;
4392         struct qed_vf_info *vf_info;
4393
4394         vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
4395         if (!vf_info)
4396                 return 0;
4397
4398         vf_vp_wfq = &p_hwfn->qm_info.wfq_data[vf_info->vport_id];
4399
4400         if (vf_vp_wfq->configured)
4401                 return vf_vp_wfq->min_speed;
4402         else
4403                 return 0;
4404 }
4405
4406 /**
4407  * qed_schedule_iov - schedules IOV task for VF and PF
4408  * @hwfn: hardware function pointer
4409  * @flag: IOV flag for VF/PF
4410  */
4411 void qed_schedule_iov(struct qed_hwfn *hwfn, enum qed_iov_wq_flag flag)
4412 {
4413         smp_mb__before_atomic();
4414         set_bit(flag, &hwfn->iov_task_flags);
4415         smp_mb__after_atomic();
4416         DP_VERBOSE(hwfn, QED_MSG_IOV, "Scheduling iov task [Flag: %d]\n", flag);
4417         queue_delayed_work(hwfn->iov_wq, &hwfn->iov_task, 0);
4418 }
4419
4420 void qed_vf_start_iov_wq(struct qed_dev *cdev)
4421 {
4422         int i;
4423
4424         for_each_hwfn(cdev, i)
4425             queue_delayed_work(cdev->hwfns[i].iov_wq,
4426                                &cdev->hwfns[i].iov_task, 0);
4427 }
4428
4429 int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled)
4430 {
4431         int i, j;
4432
4433         for_each_hwfn(cdev, i)
4434             if (cdev->hwfns[i].iov_wq)
4435                 flush_workqueue(cdev->hwfns[i].iov_wq);
4436
4437         /* Mark VFs for disablement */
4438         qed_iov_set_vfs_to_disable(cdev, true);
4439
4440         if (cdev->p_iov_info && cdev->p_iov_info->num_vfs && pci_enabled)
4441                 pci_disable_sriov(cdev->pdev);
4442
4443         if (cdev->recov_in_prog) {
4444                 DP_VERBOSE(cdev,
4445                            QED_MSG_IOV,
4446                            "Skip SRIOV disable operations in the device since a recovery is in progress\n");
4447                 goto out;
4448         }
4449
4450         for_each_hwfn(cdev, i) {
4451                 struct qed_hwfn *hwfn = &cdev->hwfns[i];
4452                 struct qed_ptt *ptt = qed_ptt_acquire(hwfn);
4453
4454                 /* Failure to acquire the ptt in 100g creates an odd error
4455                  * where the first engine has already relased IOV.
4456                  */
4457                 if (!ptt) {
4458                         DP_ERR(hwfn, "Failed to acquire ptt\n");
4459                         return -EBUSY;
4460                 }
4461
4462                 /* Clean WFQ db and configure equal weight for all vports */
4463                 qed_clean_wfq_db(hwfn, ptt);
4464
4465                 qed_for_each_vf(hwfn, j) {
4466                         int k;
4467
4468                         if (!qed_iov_is_valid_vfid(hwfn, j, true, false))
4469                                 continue;
4470
4471                         /* Wait until VF is disabled before releasing */
4472                         for (k = 0; k < 100; k++) {
4473                                 if (!qed_iov_is_vf_stopped(hwfn, j))
4474                                         msleep(20);
4475                                 else
4476                                         break;
4477                         }
4478
4479                         if (k < 100)
4480                                 qed_iov_release_hw_for_vf(&cdev->hwfns[i],
4481                                                           ptt, j);
4482                         else
4483                                 DP_ERR(hwfn,
4484                                        "Timeout waiting for VF's FLR to end\n");
4485                 }
4486
4487                 qed_ptt_release(hwfn, ptt);
4488         }
4489 out:
4490         qed_iov_set_vfs_to_disable(cdev, false);
4491
4492         return 0;
4493 }
4494
4495 static void qed_sriov_enable_qid_config(struct qed_hwfn *hwfn,
4496                                         u16 vfid,
4497                                         struct qed_iov_vf_init_params *params)
4498 {
4499         u16 base, i;
4500
4501         /* Since we have an equal resource distribution per-VF, and we assume
4502          * PF has acquired the QED_PF_L2_QUE first queues, we start setting
4503          * sequentially from there.
4504          */
4505         base = FEAT_NUM(hwfn, QED_PF_L2_QUE) + vfid * params->num_queues;
4506
4507         params->rel_vf_id = vfid;
4508         for (i = 0; i < params->num_queues; i++) {
4509                 params->req_rx_queue[i] = base + i;
4510                 params->req_tx_queue[i] = base + i;
4511         }
4512 }
4513
4514 static int qed_sriov_enable(struct qed_dev *cdev, int num)
4515 {
4516         struct qed_iov_vf_init_params params;
4517         struct qed_hwfn *hwfn;
4518         struct qed_ptt *ptt;
4519         int i, j, rc;
4520
4521         if (num >= RESC_NUM(&cdev->hwfns[0], QED_VPORT)) {
4522                 DP_NOTICE(cdev, "Can start at most %d VFs\n",
4523                           RESC_NUM(&cdev->hwfns[0], QED_VPORT) - 1);
4524                 return -EINVAL;
4525         }
4526
4527         memset(&params, 0, sizeof(params));
4528
4529         /* Initialize HW for VF access */
4530         for_each_hwfn(cdev, j) {
4531                 hwfn = &cdev->hwfns[j];
4532                 ptt = qed_ptt_acquire(hwfn);
4533
4534                 /* Make sure not to use more than 16 queues per VF */
4535                 params.num_queues = min_t(int,
4536                                           FEAT_NUM(hwfn, QED_VF_L2_QUE) / num,
4537                                           16);
4538
4539                 if (!ptt) {
4540                         DP_ERR(hwfn, "Failed to acquire ptt\n");
4541                         rc = -EBUSY;
4542                         goto err;
4543                 }
4544
4545                 for (i = 0; i < num; i++) {
4546                         if (!qed_iov_is_valid_vfid(hwfn, i, false, true))
4547                                 continue;
4548
4549                         qed_sriov_enable_qid_config(hwfn, i, &params);
4550                         rc = qed_iov_init_hw_for_vf(hwfn, ptt, &params);
4551                         if (rc) {
4552                                 DP_ERR(cdev, "Failed to enable VF[%d]\n", i);
4553                                 qed_ptt_release(hwfn, ptt);
4554                                 goto err;
4555                         }
4556                 }
4557
4558                 qed_ptt_release(hwfn, ptt);
4559         }
4560
4561         /* Enable SRIOV PCIe functions */
4562         rc = pci_enable_sriov(cdev->pdev, num);
4563         if (rc) {
4564                 DP_ERR(cdev, "Failed to enable sriov [%d]\n", rc);
4565                 goto err;
4566         }
4567
4568         hwfn = QED_LEADING_HWFN(cdev);
4569         ptt = qed_ptt_acquire(hwfn);
4570         if (!ptt) {
4571                 DP_ERR(hwfn, "Failed to acquire ptt\n");
4572                 rc = -EBUSY;
4573                 goto err;
4574         }
4575
4576         rc = qed_mcp_ov_update_eswitch(hwfn, ptt, QED_OV_ESWITCH_VEB);
4577         if (rc)
4578                 DP_INFO(cdev, "Failed to update eswitch mode\n");
4579         qed_ptt_release(hwfn, ptt);
4580
4581         return num;
4582
4583 err:
4584         qed_sriov_disable(cdev, false);
4585         return rc;
4586 }
4587
4588 static int qed_sriov_configure(struct qed_dev *cdev, int num_vfs_param)
4589 {
4590         if (!IS_QED_SRIOV(cdev)) {
4591                 DP_VERBOSE(cdev, QED_MSG_IOV, "SR-IOV is not supported\n");
4592                 return -EOPNOTSUPP;
4593         }
4594
4595         if (num_vfs_param)
4596                 return qed_sriov_enable(cdev, num_vfs_param);
4597         else
4598                 return qed_sriov_disable(cdev, true);
4599 }
4600
4601 static int qed_sriov_pf_set_mac(struct qed_dev *cdev, u8 *mac, int vfid)
4602 {
4603         int i;
4604
4605         if (!IS_QED_SRIOV(cdev) || !IS_PF_SRIOV_ALLOC(&cdev->hwfns[0])) {
4606                 DP_VERBOSE(cdev, QED_MSG_IOV,
4607                            "Cannot set a VF MAC; Sriov is not enabled\n");
4608                 return -EINVAL;
4609         }
4610
4611         if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vfid, true, true)) {
4612                 DP_VERBOSE(cdev, QED_MSG_IOV,
4613                            "Cannot set VF[%d] MAC (VF is not active)\n", vfid);
4614                 return -EINVAL;
4615         }
4616
4617         for_each_hwfn(cdev, i) {
4618                 struct qed_hwfn *hwfn = &cdev->hwfns[i];
4619                 struct qed_public_vf_info *vf_info;
4620
4621                 vf_info = qed_iov_get_public_vf_info(hwfn, vfid, true);
4622                 if (!vf_info)
4623                         continue;
4624
4625                 /* Set the MAC, and schedule the IOV task */
4626                 if (vf_info->is_trusted_configured)
4627                         ether_addr_copy(vf_info->mac, mac);
4628                 else
4629                         ether_addr_copy(vf_info->forced_mac, mac);
4630
4631                 qed_schedule_iov(hwfn, QED_IOV_WQ_SET_UNICAST_FILTER_FLAG);
4632         }
4633
4634         return 0;
4635 }
4636
4637 static int qed_sriov_pf_set_vlan(struct qed_dev *cdev, u16 vid, int vfid)
4638 {
4639         int i;
4640
4641         if (!IS_QED_SRIOV(cdev) || !IS_PF_SRIOV_ALLOC(&cdev->hwfns[0])) {
4642                 DP_VERBOSE(cdev, QED_MSG_IOV,
4643                            "Cannot set a VF MAC; Sriov is not enabled\n");
4644                 return -EINVAL;
4645         }
4646
4647         if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vfid, true, true)) {
4648                 DP_VERBOSE(cdev, QED_MSG_IOV,
4649                            "Cannot set VF[%d] MAC (VF is not active)\n", vfid);
4650                 return -EINVAL;
4651         }
4652
4653         for_each_hwfn(cdev, i) {
4654                 struct qed_hwfn *hwfn = &cdev->hwfns[i];
4655                 struct qed_public_vf_info *vf_info;
4656
4657                 vf_info = qed_iov_get_public_vf_info(hwfn, vfid, true);
4658                 if (!vf_info)
4659                         continue;
4660
4661                 /* Set the forced vlan, and schedule the IOV task */
4662                 vf_info->forced_vlan = vid;
4663                 qed_schedule_iov(hwfn, QED_IOV_WQ_SET_UNICAST_FILTER_FLAG);
4664         }
4665
4666         return 0;
4667 }
4668
4669 static int qed_get_vf_config(struct qed_dev *cdev,
4670                              int vf_id, struct ifla_vf_info *ivi)
4671 {
4672         struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
4673         struct qed_public_vf_info *vf_info;
4674         struct qed_mcp_link_state link;
4675         u32 tx_rate;
4676         int ret;
4677
4678         /* Sanitize request */
4679         if (IS_VF(cdev))
4680                 return -EINVAL;
4681
4682         if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vf_id, true, false)) {
4683                 DP_VERBOSE(cdev, QED_MSG_IOV,
4684                            "VF index [%d] isn't active\n", vf_id);
4685                 return -EINVAL;
4686         }
4687
4688         vf_info = qed_iov_get_public_vf_info(hwfn, vf_id, true);
4689
4690         ret = qed_iov_get_link(hwfn, vf_id, NULL, &link, NULL);
4691         if (ret)
4692                 return ret;
4693
4694         /* Fill information about VF */
4695         ivi->vf = vf_id;
4696
4697         if (is_valid_ether_addr(vf_info->forced_mac))
4698                 ether_addr_copy(ivi->mac, vf_info->forced_mac);
4699         else
4700                 ether_addr_copy(ivi->mac, vf_info->mac);
4701
4702         ivi->vlan = vf_info->forced_vlan;
4703         ivi->spoofchk = qed_iov_spoofchk_get(hwfn, vf_id);
4704         ivi->linkstate = vf_info->link_state;
4705         tx_rate = vf_info->tx_rate;
4706         ivi->max_tx_rate = tx_rate ? tx_rate : link.speed;
4707         ivi->min_tx_rate = qed_iov_get_vf_min_rate(hwfn, vf_id);
4708         ivi->trusted = vf_info->is_trusted_request;
4709
4710         return 0;
4711 }
4712
4713 void qed_inform_vf_link_state(struct qed_hwfn *hwfn)
4714 {
4715         struct qed_hwfn *lead_hwfn = QED_LEADING_HWFN(hwfn->cdev);
4716         struct qed_mcp_link_capabilities caps;
4717         struct qed_mcp_link_params params;
4718         struct qed_mcp_link_state link;
4719         int i;
4720
4721         if (!hwfn->pf_iov_info)
4722                 return;
4723
4724         /* Update bulletin of all future possible VFs with link configuration */
4725         for (i = 0; i < hwfn->cdev->p_iov_info->total_vfs; i++) {
4726                 struct qed_public_vf_info *vf_info;
4727
4728                 vf_info = qed_iov_get_public_vf_info(hwfn, i, false);
4729                 if (!vf_info)
4730                         continue;
4731
4732                 /* Only hwfn0 is actually interested in the link speed.
4733                  * But since only it would receive an MFW indication of link,
4734                  * need to take configuration from it - otherwise things like
4735                  * rate limiting for hwfn1 VF would not work.
4736                  */
4737                 memcpy(&params, qed_mcp_get_link_params(lead_hwfn),
4738                        sizeof(params));
4739                 memcpy(&link, qed_mcp_get_link_state(lead_hwfn), sizeof(link));
4740                 memcpy(&caps, qed_mcp_get_link_capabilities(lead_hwfn),
4741                        sizeof(caps));
4742
4743                 /* Modify link according to the VF's configured link state */
4744                 switch (vf_info->link_state) {
4745                 case IFLA_VF_LINK_STATE_DISABLE:
4746                         link.link_up = false;
4747                         break;
4748                 case IFLA_VF_LINK_STATE_ENABLE:
4749                         link.link_up = true;
4750                         /* Set speed according to maximum supported by HW.
4751                          * that is 40G for regular devices and 100G for CMT
4752                          * mode devices.
4753                          */
4754                         link.speed = (hwfn->cdev->num_hwfns > 1) ?
4755                                      100000 : 40000;
4756                 default:
4757                         /* In auto mode pass PF link image to VF */
4758                         break;
4759                 }
4760
4761                 if (link.link_up && vf_info->tx_rate) {
4762                         struct qed_ptt *ptt;
4763                         int rate;
4764
4765                         rate = min_t(int, vf_info->tx_rate, link.speed);
4766
4767                         ptt = qed_ptt_acquire(hwfn);
4768                         if (!ptt) {
4769                                 DP_NOTICE(hwfn, "Failed to acquire PTT\n");
4770                                 return;
4771                         }
4772
4773                         if (!qed_iov_configure_tx_rate(hwfn, ptt, i, rate)) {
4774                                 vf_info->tx_rate = rate;
4775                                 link.speed = rate;
4776                         }
4777
4778                         qed_ptt_release(hwfn, ptt);
4779                 }
4780
4781                 qed_iov_set_link(hwfn, i, &params, &link, &caps);
4782         }
4783
4784         qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG);
4785 }
4786
4787 static int qed_set_vf_link_state(struct qed_dev *cdev,
4788                                  int vf_id, int link_state)
4789 {
4790         int i;
4791
4792         /* Sanitize request */
4793         if (IS_VF(cdev))
4794                 return -EINVAL;
4795
4796         if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vf_id, true, true)) {
4797                 DP_VERBOSE(cdev, QED_MSG_IOV,
4798                            "VF index [%d] isn't active\n", vf_id);
4799                 return -EINVAL;
4800         }
4801
4802         /* Handle configuration of link state */
4803         for_each_hwfn(cdev, i) {
4804                 struct qed_hwfn *hwfn = &cdev->hwfns[i];
4805                 struct qed_public_vf_info *vf;
4806
4807                 vf = qed_iov_get_public_vf_info(hwfn, vf_id, true);
4808                 if (!vf)
4809                         continue;
4810
4811                 if (vf->link_state == link_state)
4812                         continue;
4813
4814                 vf->link_state = link_state;
4815                 qed_inform_vf_link_state(&cdev->hwfns[i]);
4816         }
4817
4818         return 0;
4819 }
4820
4821 static int qed_spoof_configure(struct qed_dev *cdev, int vfid, bool val)
4822 {
4823         int i, rc = -EINVAL;
4824
4825         for_each_hwfn(cdev, i) {
4826                 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
4827
4828                 rc = qed_iov_spoofchk_set(p_hwfn, vfid, val);
4829                 if (rc)
4830                         break;
4831         }
4832
4833         return rc;
4834 }
4835
4836 static int qed_configure_max_vf_rate(struct qed_dev *cdev, int vfid, int rate)
4837 {
4838         int i;
4839
4840         for_each_hwfn(cdev, i) {
4841                 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
4842                 struct qed_public_vf_info *vf;
4843
4844                 if (!qed_iov_pf_sanity_check(p_hwfn, vfid)) {
4845                         DP_NOTICE(p_hwfn,
4846                                   "SR-IOV sanity check failed, can't set tx rate\n");
4847                         return -EINVAL;
4848                 }
4849
4850                 vf = qed_iov_get_public_vf_info(p_hwfn, vfid, true);
4851
4852                 vf->tx_rate = rate;
4853
4854                 qed_inform_vf_link_state(p_hwfn);
4855         }
4856
4857         return 0;
4858 }
4859
4860 static int qed_set_vf_rate(struct qed_dev *cdev,
4861                            int vfid, u32 min_rate, u32 max_rate)
4862 {
4863         int rc_min = 0, rc_max = 0;
4864
4865         if (max_rate)
4866                 rc_max = qed_configure_max_vf_rate(cdev, vfid, max_rate);
4867
4868         if (min_rate)
4869                 rc_min = qed_iov_configure_min_tx_rate(cdev, vfid, min_rate);
4870
4871         if (rc_max | rc_min)
4872                 return -EINVAL;
4873
4874         return 0;
4875 }
4876
4877 static int qed_set_vf_trust(struct qed_dev *cdev, int vfid, bool trust)
4878 {
4879         int i;
4880
4881         for_each_hwfn(cdev, i) {
4882                 struct qed_hwfn *hwfn = &cdev->hwfns[i];
4883                 struct qed_public_vf_info *vf;
4884
4885                 if (!qed_iov_pf_sanity_check(hwfn, vfid)) {
4886                         DP_NOTICE(hwfn,
4887                                   "SR-IOV sanity check failed, can't set trust\n");
4888                         return -EINVAL;
4889                 }
4890
4891                 vf = qed_iov_get_public_vf_info(hwfn, vfid, true);
4892
4893                 if (vf->is_trusted_request == trust)
4894                         return 0;
4895                 vf->is_trusted_request = trust;
4896
4897                 qed_schedule_iov(hwfn, QED_IOV_WQ_TRUST_FLAG);
4898         }
4899
4900         return 0;
4901 }
4902
4903 static void qed_handle_vf_msg(struct qed_hwfn *hwfn)
4904 {
4905         u64 events[QED_VF_ARRAY_LENGTH];
4906         struct qed_ptt *ptt;
4907         int i;
4908
4909         ptt = qed_ptt_acquire(hwfn);
4910         if (!ptt) {
4911                 DP_VERBOSE(hwfn, QED_MSG_IOV,
4912                            "Can't acquire PTT; re-scheduling\n");
4913                 qed_schedule_iov(hwfn, QED_IOV_WQ_MSG_FLAG);
4914                 return;
4915         }
4916
4917         qed_iov_pf_get_pending_events(hwfn, events);
4918
4919         DP_VERBOSE(hwfn, QED_MSG_IOV,
4920                    "Event mask of VF events: 0x%llx 0x%llx 0x%llx\n",
4921                    events[0], events[1], events[2]);
4922
4923         qed_for_each_vf(hwfn, i) {
4924                 /* Skip VFs with no pending messages */
4925                 if (!(events[i / 64] & (1ULL << (i % 64))))
4926                         continue;
4927
4928                 DP_VERBOSE(hwfn, QED_MSG_IOV,
4929                            "Handling VF message from VF 0x%02x [Abs 0x%02x]\n",
4930                            i, hwfn->cdev->p_iov_info->first_vf_in_pf + i);
4931
4932                 /* Copy VF's message to PF's request buffer for that VF */
4933                 if (qed_iov_copy_vf_msg(hwfn, ptt, i))
4934                         continue;
4935
4936                 qed_iov_process_mbx_req(hwfn, ptt, i);
4937         }
4938
4939         qed_ptt_release(hwfn, ptt);
4940 }
4941
4942 static bool qed_pf_validate_req_vf_mac(struct qed_hwfn *hwfn,
4943                                        u8 *mac,
4944                                        struct qed_public_vf_info *info)
4945 {
4946         if (info->is_trusted_configured) {
4947                 if (is_valid_ether_addr(info->mac) &&
4948                     (!mac || !ether_addr_equal(mac, info->mac)))
4949                         return true;
4950         } else {
4951                 if (is_valid_ether_addr(info->forced_mac) &&
4952                     (!mac || !ether_addr_equal(mac, info->forced_mac)))
4953                         return true;
4954         }
4955
4956         return false;
4957 }
4958
4959 static void qed_set_bulletin_mac(struct qed_hwfn *hwfn,
4960                                  struct qed_public_vf_info *info,
4961                                  int vfid)
4962 {
4963         if (info->is_trusted_configured)
4964                 qed_iov_bulletin_set_mac(hwfn, info->mac, vfid);
4965         else
4966                 qed_iov_bulletin_set_forced_mac(hwfn, info->forced_mac, vfid);
4967 }
4968
4969 static void qed_handle_pf_set_vf_unicast(struct qed_hwfn *hwfn)
4970 {
4971         int i;
4972
4973         qed_for_each_vf(hwfn, i) {
4974                 struct qed_public_vf_info *info;
4975                 bool update = false;
4976                 u8 *mac;
4977
4978                 info = qed_iov_get_public_vf_info(hwfn, i, true);
4979                 if (!info)
4980                         continue;
4981
4982                 /* Update data on bulletin board */
4983                 if (info->is_trusted_configured)
4984                         mac = qed_iov_bulletin_get_mac(hwfn, i);
4985                 else
4986                         mac = qed_iov_bulletin_get_forced_mac(hwfn, i);
4987
4988                 if (qed_pf_validate_req_vf_mac(hwfn, mac, info)) {
4989                         DP_VERBOSE(hwfn,
4990                                    QED_MSG_IOV,
4991                                    "Handling PF setting of VF MAC to VF 0x%02x [Abs 0x%02x]\n",
4992                                    i,
4993                                    hwfn->cdev->p_iov_info->first_vf_in_pf + i);
4994
4995                         /* Update bulletin board with MAC */
4996                         qed_set_bulletin_mac(hwfn, info, i);
4997                         update = true;
4998                 }
4999
5000                 if (qed_iov_bulletin_get_forced_vlan(hwfn, i) ^
5001                     info->forced_vlan) {
5002                         DP_VERBOSE(hwfn,
5003                                    QED_MSG_IOV,
5004                                    "Handling PF setting of pvid [0x%04x] to VF 0x%02x [Abs 0x%02x]\n",
5005                                    info->forced_vlan,
5006                                    i,
5007                                    hwfn->cdev->p_iov_info->first_vf_in_pf + i);
5008                         qed_iov_bulletin_set_forced_vlan(hwfn,
5009                                                          info->forced_vlan, i);
5010                         update = true;
5011                 }
5012
5013                 if (update)
5014                         qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG);
5015         }
5016 }
5017
5018 static void qed_handle_bulletin_post(struct qed_hwfn *hwfn)
5019 {
5020         struct qed_ptt *ptt;
5021         int i;
5022
5023         ptt = qed_ptt_acquire(hwfn);
5024         if (!ptt) {
5025                 DP_NOTICE(hwfn, "Failed allocating a ptt entry\n");
5026                 qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG);
5027                 return;
5028         }
5029
5030         qed_for_each_vf(hwfn, i)
5031             qed_iov_post_vf_bulletin(hwfn, i, ptt);
5032
5033         qed_ptt_release(hwfn, ptt);
5034 }
5035
5036 static void qed_update_mac_for_vf_trust_change(struct qed_hwfn *hwfn, int vf_id)
5037 {
5038         struct qed_public_vf_info *vf_info;
5039         struct qed_vf_info *vf;
5040         u8 *force_mac;
5041         int i;
5042
5043         vf_info = qed_iov_get_public_vf_info(hwfn, vf_id, true);
5044         vf = qed_iov_get_vf_info(hwfn, vf_id, true);
5045
5046         if (!vf_info || !vf)
5047                 return;
5048
5049         /* Force MAC converted to generic MAC in case of VF trust on */
5050         if (vf_info->is_trusted_configured &&
5051             (vf->bulletin.p_virt->valid_bitmap & BIT(MAC_ADDR_FORCED))) {
5052                 force_mac = qed_iov_bulletin_get_forced_mac(hwfn, vf_id);
5053
5054                 if (force_mac) {
5055                         /* Clear existing shadow copy of MAC to have a clean
5056                          * slate.
5057                          */
5058                         for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++) {
5059                                 if (ether_addr_equal(vf->shadow_config.macs[i],
5060                                                      vf_info->mac)) {
5061                                         eth_zero_addr(vf->shadow_config.macs[i]);
5062                                         DP_VERBOSE(hwfn, QED_MSG_IOV,
5063                                                    "Shadow MAC %pM removed for VF 0x%02x, VF trust mode is ON\n",
5064                                                     vf_info->mac, vf_id);
5065                                         break;
5066                                 }
5067                         }
5068
5069                         ether_addr_copy(vf_info->mac, force_mac);
5070                         eth_zero_addr(vf_info->forced_mac);
5071                         vf->bulletin.p_virt->valid_bitmap &=
5072                                         ~BIT(MAC_ADDR_FORCED);
5073                         qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG);
5074                 }
5075         }
5076
5077         /* Update shadow copy with VF MAC when trust mode is turned off */
5078         if (!vf_info->is_trusted_configured) {
5079                 u8 empty_mac[ETH_ALEN];
5080
5081                 eth_zero_addr(empty_mac);
5082                 for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++) {
5083                         if (ether_addr_equal(vf->shadow_config.macs[i],
5084                                              empty_mac)) {
5085                                 ether_addr_copy(vf->shadow_config.macs[i],
5086                                                 vf_info->mac);
5087                                 DP_VERBOSE(hwfn, QED_MSG_IOV,
5088                                            "Shadow is updated with %pM for VF 0x%02x, VF trust mode is OFF\n",
5089                                             vf_info->mac, vf_id);
5090                                 break;
5091                         }
5092                 }
5093                 /* Clear bulletin when trust mode is turned off,
5094                  * to have a clean slate for next (normal) operations.
5095                  */
5096                 qed_iov_bulletin_set_mac(hwfn, empty_mac, vf_id);
5097                 qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG);
5098         }
5099 }
5100
5101 static void qed_iov_handle_trust_change(struct qed_hwfn *hwfn)
5102 {
5103         struct qed_sp_vport_update_params params;
5104         struct qed_filter_accept_flags *flags;
5105         struct qed_public_vf_info *vf_info;
5106         struct qed_vf_info *vf;
5107         u8 mask;
5108         int i;
5109
5110         mask = QED_ACCEPT_UCAST_UNMATCHED | QED_ACCEPT_MCAST_UNMATCHED;
5111         flags = &params.accept_flags;
5112
5113         qed_for_each_vf(hwfn, i) {
5114                 /* Need to make sure current requested configuration didn't
5115                  * flip so that we'll end up configuring something that's not
5116                  * needed.
5117                  */
5118                 vf_info = qed_iov_get_public_vf_info(hwfn, i, true);
5119                 if (vf_info->is_trusted_configured ==
5120                     vf_info->is_trusted_request)
5121                         continue;
5122                 vf_info->is_trusted_configured = vf_info->is_trusted_request;
5123
5124                 /* Handle forced MAC mode */
5125                 qed_update_mac_for_vf_trust_change(hwfn, i);
5126
5127                 /* Validate that the VF has a configured vport */
5128                 vf = qed_iov_get_vf_info(hwfn, i, true);
5129                 if (!vf || !vf->vport_instance)
5130                         continue;
5131
5132                 memset(&params, 0, sizeof(params));
5133                 params.opaque_fid = vf->opaque_fid;
5134                 params.vport_id = vf->vport_id;
5135
5136                 params.update_ctl_frame_check = 1;
5137                 params.mac_chk_en = !vf_info->is_trusted_configured;
5138                 params.update_accept_any_vlan_flg = 0;
5139
5140                 if (vf_info->accept_any_vlan && vf_info->forced_vlan) {
5141                         params.update_accept_any_vlan_flg = 1;
5142                         params.accept_any_vlan = vf_info->accept_any_vlan;
5143                 }
5144
5145                 if (vf_info->rx_accept_mode & mask) {
5146                         flags->update_rx_mode_config = 1;
5147                         flags->rx_accept_filter = vf_info->rx_accept_mode;
5148                 }
5149
5150                 if (vf_info->tx_accept_mode & mask) {
5151                         flags->update_tx_mode_config = 1;
5152                         flags->tx_accept_filter = vf_info->tx_accept_mode;
5153                 }
5154
5155                 /* Remove if needed; Otherwise this would set the mask */
5156                 if (!vf_info->is_trusted_configured) {
5157                         flags->rx_accept_filter &= ~mask;
5158                         flags->tx_accept_filter &= ~mask;
5159                         params.accept_any_vlan = false;
5160                 }
5161
5162                 if (flags->update_rx_mode_config ||
5163                     flags->update_tx_mode_config ||
5164                     params.update_ctl_frame_check ||
5165                     params.update_accept_any_vlan_flg) {
5166                         DP_VERBOSE(hwfn, QED_MSG_IOV,
5167                                    "vport update config for %s VF[abs 0x%x rel 0x%x]\n",
5168                                    vf_info->is_trusted_configured ? "trusted" : "untrusted",
5169                                    vf->abs_vf_id, vf->relative_vf_id);
5170                         qed_sp_vport_update(hwfn, &params,
5171                                             QED_SPQ_MODE_EBLOCK, NULL);
5172                 }
5173         }
5174 }
5175
5176 static void qed_iov_pf_task(struct work_struct *work)
5177
5178 {
5179         struct qed_hwfn *hwfn = container_of(work, struct qed_hwfn,
5180                                              iov_task.work);
5181         int rc;
5182
5183         if (test_and_clear_bit(QED_IOV_WQ_STOP_WQ_FLAG, &hwfn->iov_task_flags))
5184                 return;
5185
5186         if (test_and_clear_bit(QED_IOV_WQ_FLR_FLAG, &hwfn->iov_task_flags)) {
5187                 struct qed_ptt *ptt = qed_ptt_acquire(hwfn);
5188
5189                 if (!ptt) {
5190                         qed_schedule_iov(hwfn, QED_IOV_WQ_FLR_FLAG);
5191                         return;
5192                 }
5193
5194                 rc = qed_iov_vf_flr_cleanup(hwfn, ptt);
5195                 if (rc)
5196                         qed_schedule_iov(hwfn, QED_IOV_WQ_FLR_FLAG);
5197
5198                 qed_ptt_release(hwfn, ptt);
5199         }
5200
5201         if (test_and_clear_bit(QED_IOV_WQ_MSG_FLAG, &hwfn->iov_task_flags))
5202                 qed_handle_vf_msg(hwfn);
5203
5204         if (test_and_clear_bit(QED_IOV_WQ_SET_UNICAST_FILTER_FLAG,
5205                                &hwfn->iov_task_flags))
5206                 qed_handle_pf_set_vf_unicast(hwfn);
5207
5208         if (test_and_clear_bit(QED_IOV_WQ_BULLETIN_UPDATE_FLAG,
5209                                &hwfn->iov_task_flags))
5210                 qed_handle_bulletin_post(hwfn);
5211
5212         if (test_and_clear_bit(QED_IOV_WQ_TRUST_FLAG, &hwfn->iov_task_flags))
5213                 qed_iov_handle_trust_change(hwfn);
5214 }
5215
5216 void qed_iov_wq_stop(struct qed_dev *cdev, bool schedule_first)
5217 {
5218         int i;
5219
5220         for_each_hwfn(cdev, i) {
5221                 if (!cdev->hwfns[i].iov_wq)
5222                         continue;
5223
5224                 if (schedule_first) {
5225                         qed_schedule_iov(&cdev->hwfns[i],
5226                                          QED_IOV_WQ_STOP_WQ_FLAG);
5227                         cancel_delayed_work_sync(&cdev->hwfns[i].iov_task);
5228                 }
5229
5230                 flush_workqueue(cdev->hwfns[i].iov_wq);
5231                 destroy_workqueue(cdev->hwfns[i].iov_wq);
5232         }
5233 }
5234
5235 int qed_iov_wq_start(struct qed_dev *cdev)
5236 {
5237         char name[NAME_SIZE];
5238         int i;
5239
5240         for_each_hwfn(cdev, i) {
5241                 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
5242
5243                 /* PFs needs a dedicated workqueue only if they support IOV.
5244                  * VFs always require one.
5245                  */
5246                 if (IS_PF(p_hwfn->cdev) && !IS_PF_SRIOV(p_hwfn))
5247                         continue;
5248
5249                 snprintf(name, NAME_SIZE, "iov-%02x:%02x.%02x",
5250                          cdev->pdev->bus->number,
5251                          PCI_SLOT(cdev->pdev->devfn), p_hwfn->abs_pf_id);
5252
5253                 p_hwfn->iov_wq = create_singlethread_workqueue(name);
5254                 if (!p_hwfn->iov_wq) {
5255                         DP_NOTICE(p_hwfn, "Cannot create iov workqueue\n");
5256                         return -ENOMEM;
5257                 }
5258
5259                 if (IS_PF(cdev))
5260                         INIT_DELAYED_WORK(&p_hwfn->iov_task, qed_iov_pf_task);
5261                 else
5262                         INIT_DELAYED_WORK(&p_hwfn->iov_task, qed_iov_vf_task);
5263         }
5264
5265         return 0;
5266 }
5267
5268 const struct qed_iov_hv_ops qed_iov_ops_pass = {
5269         .configure = &qed_sriov_configure,
5270         .set_mac = &qed_sriov_pf_set_mac,
5271         .set_vlan = &qed_sriov_pf_set_vlan,
5272         .get_config = &qed_get_vf_config,
5273         .set_link_state = &qed_set_vf_link_state,
5274         .set_spoof = &qed_spoof_configure,
5275         .set_rate = &qed_set_vf_rate,
5276         .set_trust = &qed_set_vf_trust,
5277 };