GNU Linux-libre 4.14.332-gnu1
[releases.git] / drivers / net / ethernet / qlogic / qlcnic / qlcnic_sriov_common.c
1 /*
2  * QLogic qlcnic NIC Driver
3  * Copyright (c) 2009-2013 QLogic Corporation
4  *
5  * See LICENSE.qlcnic for copyright and licensing details.
6  */
7
8 #include <linux/types.h>
9
10 #include "qlcnic_sriov.h"
11 #include "qlcnic.h"
12 #include "qlcnic_83xx_hw.h"
13
14 #define QLC_BC_COMMAND  0
15 #define QLC_BC_RESPONSE 1
16
17 #define QLC_MBOX_RESP_TIMEOUT           (10 * HZ)
18 #define QLC_MBOX_CH_FREE_TIMEOUT        (10 * HZ)
19
20 #define QLC_BC_MSG              0
21 #define QLC_BC_CFREE            1
22 #define QLC_BC_FLR              2
23 #define QLC_BC_HDR_SZ           16
24 #define QLC_BC_PAYLOAD_SZ       (1024 - QLC_BC_HDR_SZ)
25
26 #define QLC_DEFAULT_RCV_DESCRIPTORS_SRIOV_VF            2048
27 #define QLC_DEFAULT_JUMBO_RCV_DESCRIPTORS_SRIOV_VF      512
28
29 #define QLC_83XX_VF_RESET_FAIL_THRESH   8
30 #define QLC_BC_CMD_MAX_RETRY_CNT        5
31
32 static void qlcnic_sriov_handle_async_issue_cmd(struct work_struct *work);
33 static void qlcnic_sriov_vf_free_mac_list(struct qlcnic_adapter *);
34 static int qlcnic_sriov_alloc_bc_mbx_args(struct qlcnic_cmd_args *, u32);
35 static void qlcnic_sriov_vf_poll_dev_state(struct work_struct *);
36 static void qlcnic_sriov_vf_cancel_fw_work(struct qlcnic_adapter *);
37 static void qlcnic_sriov_cleanup_transaction(struct qlcnic_bc_trans *);
38 static int qlcnic_sriov_issue_cmd(struct qlcnic_adapter *,
39                                   struct qlcnic_cmd_args *);
40 static int qlcnic_sriov_channel_cfg_cmd(struct qlcnic_adapter *, u8);
41 static void qlcnic_sriov_process_bc_cmd(struct work_struct *);
42 static int qlcnic_sriov_vf_shutdown(struct pci_dev *);
43 static int qlcnic_sriov_vf_resume(struct qlcnic_adapter *);
44 static int qlcnic_sriov_async_issue_cmd(struct qlcnic_adapter *,
45                                         struct qlcnic_cmd_args *);
46
47 static struct qlcnic_hardware_ops qlcnic_sriov_vf_hw_ops = {
48         .read_crb                       = qlcnic_83xx_read_crb,
49         .write_crb                      = qlcnic_83xx_write_crb,
50         .read_reg                       = qlcnic_83xx_rd_reg_indirect,
51         .write_reg                      = qlcnic_83xx_wrt_reg_indirect,
52         .get_mac_address                = qlcnic_83xx_get_mac_address,
53         .setup_intr                     = qlcnic_83xx_setup_intr,
54         .alloc_mbx_args                 = qlcnic_83xx_alloc_mbx_args,
55         .mbx_cmd                        = qlcnic_sriov_issue_cmd,
56         .get_func_no                    = qlcnic_83xx_get_func_no,
57         .api_lock                       = qlcnic_83xx_cam_lock,
58         .api_unlock                     = qlcnic_83xx_cam_unlock,
59         .process_lb_rcv_ring_diag       = qlcnic_83xx_process_rcv_ring_diag,
60         .create_rx_ctx                  = qlcnic_83xx_create_rx_ctx,
61         .create_tx_ctx                  = qlcnic_83xx_create_tx_ctx,
62         .del_rx_ctx                     = qlcnic_83xx_del_rx_ctx,
63         .del_tx_ctx                     = qlcnic_83xx_del_tx_ctx,
64         .setup_link_event               = qlcnic_83xx_setup_link_event,
65         .get_nic_info                   = qlcnic_83xx_get_nic_info,
66         .get_pci_info                   = qlcnic_83xx_get_pci_info,
67         .set_nic_info                   = qlcnic_83xx_set_nic_info,
68         .change_macvlan                 = qlcnic_83xx_sre_macaddr_change,
69         .napi_enable                    = qlcnic_83xx_napi_enable,
70         .napi_disable                   = qlcnic_83xx_napi_disable,
71         .config_intr_coal               = qlcnic_83xx_config_intr_coal,
72         .config_rss                     = qlcnic_83xx_config_rss,
73         .config_hw_lro                  = qlcnic_83xx_config_hw_lro,
74         .config_promisc_mode            = qlcnic_83xx_nic_set_promisc,
75         .change_l2_filter               = qlcnic_83xx_change_l2_filter,
76         .get_board_info                 = qlcnic_83xx_get_port_info,
77         .free_mac_list                  = qlcnic_sriov_vf_free_mac_list,
78         .enable_sds_intr                = qlcnic_83xx_enable_sds_intr,
79         .disable_sds_intr               = qlcnic_83xx_disable_sds_intr,
80         .encap_rx_offload               = qlcnic_83xx_encap_rx_offload,
81         .encap_tx_offload               = qlcnic_83xx_encap_tx_offload,
82 };
83
84 static struct qlcnic_nic_template qlcnic_sriov_vf_ops = {
85         .config_bridged_mode    = qlcnic_config_bridged_mode,
86         .config_led             = qlcnic_config_led,
87         .cancel_idc_work        = qlcnic_sriov_vf_cancel_fw_work,
88         .napi_add               = qlcnic_83xx_napi_add,
89         .napi_del               = qlcnic_83xx_napi_del,
90         .shutdown               = qlcnic_sriov_vf_shutdown,
91         .resume                 = qlcnic_sriov_vf_resume,
92         .config_ipaddr          = qlcnic_83xx_config_ipaddr,
93         .clear_legacy_intr      = qlcnic_83xx_clear_legacy_intr,
94 };
95
96 static const struct qlcnic_mailbox_metadata qlcnic_sriov_bc_mbx_tbl[] = {
97         {QLCNIC_BC_CMD_CHANNEL_INIT, 2, 2},
98         {QLCNIC_BC_CMD_CHANNEL_TERM, 2, 2},
99         {QLCNIC_BC_CMD_GET_ACL, 3, 14},
100         {QLCNIC_BC_CMD_CFG_GUEST_VLAN, 2, 2},
101 };
102
103 static inline bool qlcnic_sriov_bc_msg_check(u32 val)
104 {
105         return (val & (1 << QLC_BC_MSG)) ? true : false;
106 }
107
108 static inline bool qlcnic_sriov_channel_free_check(u32 val)
109 {
110         return (val & (1 << QLC_BC_CFREE)) ? true : false;
111 }
112
113 static inline bool qlcnic_sriov_flr_check(u32 val)
114 {
115         return (val & (1 << QLC_BC_FLR)) ? true : false;
116 }
117
118 static inline u8 qlcnic_sriov_target_func_id(u32 val)
119 {
120         return (val >> 4) & 0xff;
121 }
122
123 static int qlcnic_sriov_virtid_fn(struct qlcnic_adapter *adapter, int vf_id)
124 {
125         struct pci_dev *dev = adapter->pdev;
126         int pos;
127         u16 stride, offset;
128
129         if (qlcnic_sriov_vf_check(adapter))
130                 return 0;
131
132         pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV);
133         if (!pos)
134                 return 0;
135         pci_read_config_word(dev, pos + PCI_SRIOV_VF_OFFSET, &offset);
136         pci_read_config_word(dev, pos + PCI_SRIOV_VF_STRIDE, &stride);
137
138         return (dev->devfn + offset + stride * vf_id) & 0xff;
139 }
140
141 int qlcnic_sriov_init(struct qlcnic_adapter *adapter, int num_vfs)
142 {
143         struct qlcnic_sriov *sriov;
144         struct qlcnic_back_channel *bc;
145         struct workqueue_struct *wq;
146         struct qlcnic_vport *vp;
147         struct qlcnic_vf_info *vf;
148         int err, i;
149
150         if (!qlcnic_sriov_enable_check(adapter))
151                 return -EIO;
152
153         sriov  = kzalloc(sizeof(struct qlcnic_sriov), GFP_KERNEL);
154         if (!sriov)
155                 return -ENOMEM;
156
157         adapter->ahw->sriov = sriov;
158         sriov->num_vfs = num_vfs;
159         bc = &sriov->bc;
160         sriov->vf_info = kzalloc(sizeof(struct qlcnic_vf_info) *
161                                  num_vfs, GFP_KERNEL);
162         if (!sriov->vf_info) {
163                 err = -ENOMEM;
164                 goto qlcnic_free_sriov;
165         }
166
167         wq = create_singlethread_workqueue("bc-trans");
168         if (wq == NULL) {
169                 err = -ENOMEM;
170                 dev_err(&adapter->pdev->dev,
171                         "Cannot create bc-trans workqueue\n");
172                 goto qlcnic_free_vf_info;
173         }
174
175         bc->bc_trans_wq = wq;
176
177         wq = create_singlethread_workqueue("async");
178         if (wq == NULL) {
179                 err = -ENOMEM;
180                 dev_err(&adapter->pdev->dev, "Cannot create async workqueue\n");
181                 goto qlcnic_destroy_trans_wq;
182         }
183
184         bc->bc_async_wq =  wq;
185         INIT_LIST_HEAD(&bc->async_cmd_list);
186         INIT_WORK(&bc->vf_async_work, qlcnic_sriov_handle_async_issue_cmd);
187         spin_lock_init(&bc->queue_lock);
188         bc->adapter = adapter;
189
190         for (i = 0; i < num_vfs; i++) {
191                 vf = &sriov->vf_info[i];
192                 vf->adapter = adapter;
193                 vf->pci_func = qlcnic_sriov_virtid_fn(adapter, i);
194                 mutex_init(&vf->send_cmd_lock);
195                 spin_lock_init(&vf->vlan_list_lock);
196                 INIT_LIST_HEAD(&vf->rcv_act.wait_list);
197                 INIT_LIST_HEAD(&vf->rcv_pend.wait_list);
198                 spin_lock_init(&vf->rcv_act.lock);
199                 spin_lock_init(&vf->rcv_pend.lock);
200                 init_completion(&vf->ch_free_cmpl);
201
202                 INIT_WORK(&vf->trans_work, qlcnic_sriov_process_bc_cmd);
203
204                 if (qlcnic_sriov_pf_check(adapter)) {
205                         vp = kzalloc(sizeof(struct qlcnic_vport), GFP_KERNEL);
206                         if (!vp) {
207                                 err = -ENOMEM;
208                                 goto qlcnic_destroy_async_wq;
209                         }
210                         sriov->vf_info[i].vp = vp;
211                         vp->vlan_mode = QLC_GUEST_VLAN_MODE;
212                         vp->max_tx_bw = MAX_BW;
213                         vp->min_tx_bw = MIN_BW;
214                         vp->spoofchk = false;
215                         random_ether_addr(vp->mac);
216                         dev_info(&adapter->pdev->dev,
217                                  "MAC Address %pM is configured for VF %d\n",
218                                  vp->mac, i);
219                 }
220         }
221
222         return 0;
223
224 qlcnic_destroy_async_wq:
225         while (i--)
226                 kfree(sriov->vf_info[i].vp);
227         destroy_workqueue(bc->bc_async_wq);
228
229 qlcnic_destroy_trans_wq:
230         destroy_workqueue(bc->bc_trans_wq);
231
232 qlcnic_free_vf_info:
233         kfree(sriov->vf_info);
234
235 qlcnic_free_sriov:
236         kfree(adapter->ahw->sriov);
237         return err;
238 }
239
240 void qlcnic_sriov_cleanup_list(struct qlcnic_trans_list *t_list)
241 {
242         struct qlcnic_bc_trans *trans;
243         struct qlcnic_cmd_args cmd;
244         unsigned long flags;
245
246         spin_lock_irqsave(&t_list->lock, flags);
247
248         while (!list_empty(&t_list->wait_list)) {
249                 trans = list_first_entry(&t_list->wait_list,
250                                          struct qlcnic_bc_trans, list);
251                 list_del(&trans->list);
252                 t_list->count--;
253                 cmd.req.arg = (u32 *)trans->req_pay;
254                 cmd.rsp.arg = (u32 *)trans->rsp_pay;
255                 qlcnic_free_mbx_args(&cmd);
256                 qlcnic_sriov_cleanup_transaction(trans);
257         }
258
259         spin_unlock_irqrestore(&t_list->lock, flags);
260 }
261
262 void __qlcnic_sriov_cleanup(struct qlcnic_adapter *adapter)
263 {
264         struct qlcnic_sriov *sriov = adapter->ahw->sriov;
265         struct qlcnic_back_channel *bc = &sriov->bc;
266         struct qlcnic_vf_info *vf;
267         int i;
268
269         if (!qlcnic_sriov_enable_check(adapter))
270                 return;
271
272         qlcnic_sriov_cleanup_async_list(bc);
273         destroy_workqueue(bc->bc_async_wq);
274
275         for (i = 0; i < sriov->num_vfs; i++) {
276                 vf = &sriov->vf_info[i];
277                 qlcnic_sriov_cleanup_list(&vf->rcv_pend);
278                 cancel_work_sync(&vf->trans_work);
279                 qlcnic_sriov_cleanup_list(&vf->rcv_act);
280         }
281
282         destroy_workqueue(bc->bc_trans_wq);
283
284         for (i = 0; i < sriov->num_vfs; i++)
285                 kfree(sriov->vf_info[i].vp);
286
287         kfree(sriov->vf_info);
288         kfree(adapter->ahw->sriov);
289 }
290
291 static void qlcnic_sriov_vf_cleanup(struct qlcnic_adapter *adapter)
292 {
293         qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_TERM);
294         qlcnic_sriov_cfg_bc_intr(adapter, 0);
295         __qlcnic_sriov_cleanup(adapter);
296 }
297
298 void qlcnic_sriov_cleanup(struct qlcnic_adapter *adapter)
299 {
300         if (!test_bit(__QLCNIC_SRIOV_ENABLE, &adapter->state))
301                 return;
302
303         qlcnic_sriov_free_vlans(adapter);
304
305         if (qlcnic_sriov_pf_check(adapter))
306                 qlcnic_sriov_pf_cleanup(adapter);
307
308         if (qlcnic_sriov_vf_check(adapter))
309                 qlcnic_sriov_vf_cleanup(adapter);
310 }
311
312 static int qlcnic_sriov_post_bc_msg(struct qlcnic_adapter *adapter, u32 *hdr,
313                                     u32 *pay, u8 pci_func, u8 size)
314 {
315         struct qlcnic_hardware_context *ahw = adapter->ahw;
316         struct qlcnic_mailbox *mbx = ahw->mailbox;
317         struct qlcnic_cmd_args cmd;
318         unsigned long timeout;
319         int err;
320
321         memset(&cmd, 0, sizeof(struct qlcnic_cmd_args));
322         cmd.hdr = hdr;
323         cmd.pay = pay;
324         cmd.pay_size = size;
325         cmd.func_num = pci_func;
326         cmd.op_type = QLC_83XX_MBX_POST_BC_OP;
327         cmd.cmd_op = ((struct qlcnic_bc_hdr *)hdr)->cmd_op;
328
329         err = mbx->ops->enqueue_cmd(adapter, &cmd, &timeout);
330         if (err) {
331                 dev_err(&adapter->pdev->dev,
332                         "%s: Mailbox not available, cmd_op=0x%x, cmd_type=0x%x, pci_func=0x%x, op_mode=0x%x\n",
333                         __func__, cmd.cmd_op, cmd.type, ahw->pci_func,
334                         ahw->op_mode);
335                 return err;
336         }
337
338         if (!wait_for_completion_timeout(&cmd.completion, timeout)) {
339                 dev_err(&adapter->pdev->dev,
340                         "%s: Mailbox command timed out, cmd_op=0x%x, cmd_type=0x%x, pci_func=0x%x, op_mode=0x%x\n",
341                         __func__, cmd.cmd_op, cmd.type, ahw->pci_func,
342                         ahw->op_mode);
343                 flush_workqueue(mbx->work_q);
344         }
345
346         return cmd.rsp_opcode;
347 }
348
349 static void qlcnic_sriov_vf_cfg_buff_desc(struct qlcnic_adapter *adapter)
350 {
351         adapter->num_rxd = QLC_DEFAULT_RCV_DESCRIPTORS_SRIOV_VF;
352         adapter->max_rxd = MAX_RCV_DESCRIPTORS_10G;
353         adapter->num_jumbo_rxd = QLC_DEFAULT_JUMBO_RCV_DESCRIPTORS_SRIOV_VF;
354         adapter->max_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
355         adapter->num_txd = MAX_CMD_DESCRIPTORS;
356         adapter->max_rds_rings = MAX_RDS_RINGS;
357 }
358
359 int qlcnic_sriov_get_vf_vport_info(struct qlcnic_adapter *adapter,
360                                    struct qlcnic_info *npar_info, u16 vport_id)
361 {
362         struct device *dev = &adapter->pdev->dev;
363         struct qlcnic_cmd_args cmd;
364         int err;
365         u32 status;
366
367         err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_NIC_INFO);
368         if (err)
369                 return err;
370
371         cmd.req.arg[1] = vport_id << 16 | 0x1;
372         err = qlcnic_issue_cmd(adapter, &cmd);
373         if (err) {
374                 dev_err(&adapter->pdev->dev,
375                         "Failed to get vport info, err=%d\n", err);
376                 qlcnic_free_mbx_args(&cmd);
377                 return err;
378         }
379
380         status = cmd.rsp.arg[2] & 0xffff;
381         if (status & BIT_0)
382                 npar_info->min_tx_bw = MSW(cmd.rsp.arg[2]);
383         if (status & BIT_1)
384                 npar_info->max_tx_bw = LSW(cmd.rsp.arg[3]);
385         if (status & BIT_2)
386                 npar_info->max_tx_ques = MSW(cmd.rsp.arg[3]);
387         if (status & BIT_3)
388                 npar_info->max_tx_mac_filters = LSW(cmd.rsp.arg[4]);
389         if (status & BIT_4)
390                 npar_info->max_rx_mcast_mac_filters = MSW(cmd.rsp.arg[4]);
391         if (status & BIT_5)
392                 npar_info->max_rx_ucast_mac_filters = LSW(cmd.rsp.arg[5]);
393         if (status & BIT_6)
394                 npar_info->max_rx_ip_addr = MSW(cmd.rsp.arg[5]);
395         if (status & BIT_7)
396                 npar_info->max_rx_lro_flow = LSW(cmd.rsp.arg[6]);
397         if (status & BIT_8)
398                 npar_info->max_rx_status_rings = MSW(cmd.rsp.arg[6]);
399         if (status & BIT_9)
400                 npar_info->max_rx_buf_rings = LSW(cmd.rsp.arg[7]);
401
402         npar_info->max_rx_ques = MSW(cmd.rsp.arg[7]);
403         npar_info->max_tx_vlan_keys = LSW(cmd.rsp.arg[8]);
404         npar_info->max_local_ipv6_addrs = MSW(cmd.rsp.arg[8]);
405         npar_info->max_remote_ipv6_addrs = LSW(cmd.rsp.arg[9]);
406
407         dev_info(dev, "\n\tmin_tx_bw: %d, max_tx_bw: %d max_tx_ques: %d,\n"
408                  "\tmax_tx_mac_filters: %d max_rx_mcast_mac_filters: %d,\n"
409                  "\tmax_rx_ucast_mac_filters: 0x%x, max_rx_ip_addr: %d,\n"
410                  "\tmax_rx_lro_flow: %d max_rx_status_rings: %d,\n"
411                  "\tmax_rx_buf_rings: %d, max_rx_ques: %d, max_tx_vlan_keys %d\n"
412                  "\tlocal_ipv6_addr: %d, remote_ipv6_addr: %d\n",
413                  npar_info->min_tx_bw, npar_info->max_tx_bw,
414                  npar_info->max_tx_ques, npar_info->max_tx_mac_filters,
415                  npar_info->max_rx_mcast_mac_filters,
416                  npar_info->max_rx_ucast_mac_filters, npar_info->max_rx_ip_addr,
417                  npar_info->max_rx_lro_flow, npar_info->max_rx_status_rings,
418                  npar_info->max_rx_buf_rings, npar_info->max_rx_ques,
419                  npar_info->max_tx_vlan_keys, npar_info->max_local_ipv6_addrs,
420                  npar_info->max_remote_ipv6_addrs);
421
422         qlcnic_free_mbx_args(&cmd);
423         return err;
424 }
425
426 static int qlcnic_sriov_set_pvid_mode(struct qlcnic_adapter *adapter,
427                                       struct qlcnic_cmd_args *cmd)
428 {
429         adapter->rx_pvid = MSW(cmd->rsp.arg[1]) & 0xffff;
430         adapter->flags &= ~QLCNIC_TAGGING_ENABLED;
431         return 0;
432 }
433
434 static int qlcnic_sriov_set_guest_vlan_mode(struct qlcnic_adapter *adapter,
435                                             struct qlcnic_cmd_args *cmd)
436 {
437         struct qlcnic_sriov *sriov = adapter->ahw->sriov;
438         int i, num_vlans, ret;
439         u16 *vlans;
440
441         if (sriov->allowed_vlans)
442                 return 0;
443
444         sriov->any_vlan = cmd->rsp.arg[2] & 0xf;
445         sriov->num_allowed_vlans = cmd->rsp.arg[2] >> 16;
446         dev_info(&adapter->pdev->dev, "Number of allowed Guest VLANs = %d\n",
447                  sriov->num_allowed_vlans);
448
449         ret = qlcnic_sriov_alloc_vlans(adapter);
450         if (ret)
451                 return ret;
452
453         if (!sriov->any_vlan)
454                 return 0;
455
456         num_vlans = sriov->num_allowed_vlans;
457         sriov->allowed_vlans = kzalloc(sizeof(u16) * num_vlans, GFP_KERNEL);
458         if (!sriov->allowed_vlans)
459                 return -ENOMEM;
460
461         vlans = (u16 *)&cmd->rsp.arg[3];
462         for (i = 0; i < num_vlans; i++)
463                 sriov->allowed_vlans[i] = vlans[i];
464
465         return 0;
466 }
467
468 static int qlcnic_sriov_get_vf_acl(struct qlcnic_adapter *adapter)
469 {
470         struct qlcnic_sriov *sriov = adapter->ahw->sriov;
471         struct qlcnic_cmd_args cmd;
472         int ret = 0;
473
474         memset(&cmd, 0, sizeof(cmd));
475         ret = qlcnic_sriov_alloc_bc_mbx_args(&cmd, QLCNIC_BC_CMD_GET_ACL);
476         if (ret)
477                 return ret;
478
479         ret = qlcnic_issue_cmd(adapter, &cmd);
480         if (ret) {
481                 dev_err(&adapter->pdev->dev, "Failed to get ACL, err=%d\n",
482                         ret);
483         } else {
484                 sriov->vlan_mode = cmd.rsp.arg[1] & 0x3;
485                 switch (sriov->vlan_mode) {
486                 case QLC_GUEST_VLAN_MODE:
487                         ret = qlcnic_sriov_set_guest_vlan_mode(adapter, &cmd);
488                         break;
489                 case QLC_PVID_MODE:
490                         ret = qlcnic_sriov_set_pvid_mode(adapter, &cmd);
491                         break;
492                 }
493         }
494
495         qlcnic_free_mbx_args(&cmd);
496         return ret;
497 }
498
499 static int qlcnic_sriov_vf_init_driver(struct qlcnic_adapter *adapter)
500 {
501         struct qlcnic_hardware_context *ahw = adapter->ahw;
502         struct qlcnic_info nic_info;
503         int err;
504
505         err = qlcnic_sriov_get_vf_vport_info(adapter, &nic_info, 0);
506         if (err)
507                 return err;
508
509         ahw->max_mc_count = nic_info.max_rx_mcast_mac_filters;
510
511         err = qlcnic_get_nic_info(adapter, &nic_info, ahw->pci_func);
512         if (err)
513                 return -EIO;
514
515         if (qlcnic_83xx_get_port_info(adapter))
516                 return -EIO;
517
518         qlcnic_sriov_vf_cfg_buff_desc(adapter);
519         adapter->flags |= QLCNIC_ADAPTER_INITIALIZED;
520         dev_info(&adapter->pdev->dev, "HAL Version: %d\n",
521                  adapter->ahw->fw_hal_version);
522
523         ahw->physical_port = (u8) nic_info.phys_port;
524         ahw->switch_mode = nic_info.switch_mode;
525         ahw->max_mtu = nic_info.max_mtu;
526         ahw->op_mode = nic_info.op_mode;
527         ahw->capabilities = nic_info.capabilities;
528         return 0;
529 }
530
531 static int qlcnic_sriov_setup_vf(struct qlcnic_adapter *adapter,
532                                  int pci_using_dac)
533 {
534         int err;
535
536         adapter->flags |= QLCNIC_VLAN_FILTERING;
537         adapter->ahw->total_nic_func = 1;
538         INIT_LIST_HEAD(&adapter->vf_mc_list);
539         if (!qlcnic_use_msi_x && !!qlcnic_use_msi)
540                 dev_warn(&adapter->pdev->dev,
541                          "Device does not support MSI interrupts\n");
542
543         /* compute and set default and max tx/sds rings */
544         qlcnic_set_tx_ring_count(adapter, QLCNIC_SINGLE_RING);
545         qlcnic_set_sds_ring_count(adapter, QLCNIC_SINGLE_RING);
546
547         err = qlcnic_setup_intr(adapter);
548         if (err) {
549                 dev_err(&adapter->pdev->dev, "Failed to setup interrupt\n");
550                 goto err_out_disable_msi;
551         }
552
553         err = qlcnic_83xx_setup_mbx_intr(adapter);
554         if (err)
555                 goto err_out_disable_msi;
556
557         err = qlcnic_sriov_init(adapter, 1);
558         if (err)
559                 goto err_out_disable_mbx_intr;
560
561         err = qlcnic_sriov_cfg_bc_intr(adapter, 1);
562         if (err)
563                 goto err_out_cleanup_sriov;
564
565         err = qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_INIT);
566         if (err)
567                 goto err_out_disable_bc_intr;
568
569         err = qlcnic_sriov_vf_init_driver(adapter);
570         if (err)
571                 goto err_out_send_channel_term;
572
573         err = qlcnic_sriov_get_vf_acl(adapter);
574         if (err)
575                 goto err_out_send_channel_term;
576
577         err = qlcnic_setup_netdev(adapter, adapter->netdev, pci_using_dac);
578         if (err)
579                 goto err_out_send_channel_term;
580
581         pci_set_drvdata(adapter->pdev, adapter);
582         dev_info(&adapter->pdev->dev, "%s: XGbE port initialized\n",
583                  adapter->netdev->name);
584
585         qlcnic_schedule_work(adapter, qlcnic_sriov_vf_poll_dev_state,
586                              adapter->ahw->idc.delay);
587         return 0;
588
589 err_out_send_channel_term:
590         qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_TERM);
591
592 err_out_disable_bc_intr:
593         qlcnic_sriov_cfg_bc_intr(adapter, 0);
594
595 err_out_cleanup_sriov:
596         __qlcnic_sriov_cleanup(adapter);
597
598 err_out_disable_mbx_intr:
599         qlcnic_83xx_free_mbx_intr(adapter);
600
601 err_out_disable_msi:
602         qlcnic_teardown_intr(adapter);
603         return err;
604 }
605
606 static int qlcnic_sriov_check_dev_ready(struct qlcnic_adapter *adapter)
607 {
608         u32 state;
609
610         do {
611                 msleep(20);
612                 if (++adapter->fw_fail_cnt > QLC_BC_CMD_MAX_RETRY_CNT)
613                         return -EIO;
614                 state = QLCRDX(adapter->ahw, QLC_83XX_IDC_DEV_STATE);
615         } while (state != QLC_83XX_IDC_DEV_READY);
616
617         return 0;
618 }
619
620 int qlcnic_sriov_vf_init(struct qlcnic_adapter *adapter, int pci_using_dac)
621 {
622         struct qlcnic_hardware_context *ahw = adapter->ahw;
623         int err;
624
625         set_bit(QLC_83XX_MODULE_LOADED, &ahw->idc.status);
626         ahw->idc.delay = QLC_83XX_IDC_FW_POLL_DELAY;
627         ahw->reset_context = 0;
628         adapter->fw_fail_cnt = 0;
629         ahw->msix_supported = 1;
630         adapter->need_fw_reset = 0;
631         adapter->flags |= QLCNIC_TX_INTR_SHARED;
632
633         err = qlcnic_sriov_check_dev_ready(adapter);
634         if (err)
635                 return err;
636
637         err = qlcnic_sriov_setup_vf(adapter, pci_using_dac);
638         if (err)
639                 return err;
640
641         if (qlcnic_read_mac_addr(adapter))
642                 dev_warn(&adapter->pdev->dev, "failed to read mac addr\n");
643
644         INIT_DELAYED_WORK(&adapter->idc_aen_work, qlcnic_83xx_idc_aen_work);
645
646         clear_bit(__QLCNIC_RESETTING, &adapter->state);
647         return 0;
648 }
649
650 void qlcnic_sriov_vf_set_ops(struct qlcnic_adapter *adapter)
651 {
652         struct qlcnic_hardware_context *ahw = adapter->ahw;
653
654         ahw->op_mode = QLCNIC_SRIOV_VF_FUNC;
655         dev_info(&adapter->pdev->dev,
656                  "HAL Version: %d Non Privileged SRIOV function\n",
657                  ahw->fw_hal_version);
658         adapter->nic_ops = &qlcnic_sriov_vf_ops;
659         set_bit(__QLCNIC_SRIOV_ENABLE, &adapter->state);
660         return;
661 }
662
663 void qlcnic_sriov_vf_register_map(struct qlcnic_hardware_context *ahw)
664 {
665         ahw->hw_ops             = &qlcnic_sriov_vf_hw_ops;
666         ahw->reg_tbl            = (u32 *)qlcnic_83xx_reg_tbl;
667         ahw->ext_reg_tbl        = (u32 *)qlcnic_83xx_ext_reg_tbl;
668 }
669
670 static u32 qlcnic_sriov_get_bc_paysize(u32 real_pay_size, u8 curr_frag)
671 {
672         u32 pay_size;
673
674         pay_size = real_pay_size / ((curr_frag + 1) * QLC_BC_PAYLOAD_SZ);
675
676         if (pay_size)
677                 pay_size = QLC_BC_PAYLOAD_SZ;
678         else
679                 pay_size = real_pay_size % QLC_BC_PAYLOAD_SZ;
680
681         return pay_size;
682 }
683
684 int qlcnic_sriov_func_to_index(struct qlcnic_adapter *adapter, u8 pci_func)
685 {
686         struct qlcnic_vf_info *vf_info = adapter->ahw->sriov->vf_info;
687         u8 i;
688
689         if (qlcnic_sriov_vf_check(adapter))
690                 return 0;
691
692         for (i = 0; i < adapter->ahw->sriov->num_vfs; i++) {
693                 if (vf_info[i].pci_func == pci_func)
694                         return i;
695         }
696
697         return -EINVAL;
698 }
699
700 static inline int qlcnic_sriov_alloc_bc_trans(struct qlcnic_bc_trans **trans)
701 {
702         *trans = kzalloc(sizeof(struct qlcnic_bc_trans), GFP_ATOMIC);
703         if (!*trans)
704                 return -ENOMEM;
705
706         init_completion(&(*trans)->resp_cmpl);
707         return 0;
708 }
709
710 static inline int qlcnic_sriov_alloc_bc_msg(struct qlcnic_bc_hdr **hdr,
711                                             u32 size)
712 {
713         *hdr = kzalloc(sizeof(struct qlcnic_bc_hdr) * size, GFP_ATOMIC);
714         if (!*hdr)
715                 return -ENOMEM;
716
717         return 0;
718 }
719
720 static int qlcnic_sriov_alloc_bc_mbx_args(struct qlcnic_cmd_args *mbx, u32 type)
721 {
722         const struct qlcnic_mailbox_metadata *mbx_tbl;
723         int i, size;
724
725         mbx_tbl = qlcnic_sriov_bc_mbx_tbl;
726         size = ARRAY_SIZE(qlcnic_sriov_bc_mbx_tbl);
727
728         for (i = 0; i < size; i++) {
729                 if (type == mbx_tbl[i].cmd) {
730                         mbx->op_type = QLC_BC_CMD;
731                         mbx->req.num = mbx_tbl[i].in_args;
732                         mbx->rsp.num = mbx_tbl[i].out_args;
733                         mbx->req.arg = kcalloc(mbx->req.num, sizeof(u32),
734                                                GFP_ATOMIC);
735                         if (!mbx->req.arg)
736                                 return -ENOMEM;
737                         mbx->rsp.arg = kcalloc(mbx->rsp.num, sizeof(u32),
738                                                GFP_ATOMIC);
739                         if (!mbx->rsp.arg) {
740                                 kfree(mbx->req.arg);
741                                 mbx->req.arg = NULL;
742                                 return -ENOMEM;
743                         }
744                         mbx->req.arg[0] = (type | (mbx->req.num << 16) |
745                                            (3 << 29));
746                         mbx->rsp.arg[0] = (type & 0xffff) | mbx->rsp.num << 16;
747                         return 0;
748                 }
749         }
750         return -EINVAL;
751 }
752
753 static int qlcnic_sriov_prepare_bc_hdr(struct qlcnic_bc_trans *trans,
754                                        struct qlcnic_cmd_args *cmd,
755                                        u16 seq, u8 msg_type)
756 {
757         struct qlcnic_bc_hdr *hdr;
758         int i;
759         u32 num_regs, bc_pay_sz;
760         u16 remainder;
761         u8 cmd_op, num_frags, t_num_frags;
762
763         bc_pay_sz = QLC_BC_PAYLOAD_SZ;
764         if (msg_type == QLC_BC_COMMAND) {
765                 trans->req_pay = (struct qlcnic_bc_payload *)cmd->req.arg;
766                 trans->rsp_pay = (struct qlcnic_bc_payload *)cmd->rsp.arg;
767                 num_regs = cmd->req.num;
768                 trans->req_pay_size = (num_regs * 4);
769                 num_regs = cmd->rsp.num;
770                 trans->rsp_pay_size = (num_regs * 4);
771                 cmd_op = cmd->req.arg[0] & 0xff;
772                 remainder = (trans->req_pay_size) % (bc_pay_sz);
773                 num_frags = (trans->req_pay_size) / (bc_pay_sz);
774                 if (remainder)
775                         num_frags++;
776                 t_num_frags = num_frags;
777                 if (qlcnic_sriov_alloc_bc_msg(&trans->req_hdr, num_frags))
778                         return -ENOMEM;
779                 remainder = (trans->rsp_pay_size) % (bc_pay_sz);
780                 num_frags = (trans->rsp_pay_size) / (bc_pay_sz);
781                 if (remainder)
782                         num_frags++;
783                 if (qlcnic_sriov_alloc_bc_msg(&trans->rsp_hdr, num_frags))
784                         return -ENOMEM;
785                 num_frags  = t_num_frags;
786                 hdr = trans->req_hdr;
787         }  else {
788                 cmd->req.arg = (u32 *)trans->req_pay;
789                 cmd->rsp.arg = (u32 *)trans->rsp_pay;
790                 cmd_op = cmd->req.arg[0] & 0xff;
791                 cmd->cmd_op = cmd_op;
792                 remainder = (trans->rsp_pay_size) % (bc_pay_sz);
793                 num_frags = (trans->rsp_pay_size) / (bc_pay_sz);
794                 if (remainder)
795                         num_frags++;
796                 cmd->req.num = trans->req_pay_size / 4;
797                 cmd->rsp.num = trans->rsp_pay_size / 4;
798                 hdr = trans->rsp_hdr;
799                 cmd->op_type = trans->req_hdr->op_type;
800         }
801
802         trans->trans_id = seq;
803         trans->cmd_id = cmd_op;
804         for (i = 0; i < num_frags; i++) {
805                 hdr[i].version = 2;
806                 hdr[i].msg_type = msg_type;
807                 hdr[i].op_type = cmd->op_type;
808                 hdr[i].num_cmds = 1;
809                 hdr[i].num_frags = num_frags;
810                 hdr[i].frag_num = i + 1;
811                 hdr[i].cmd_op = cmd_op;
812                 hdr[i].seq_id = seq;
813         }
814         return 0;
815 }
816
817 static void qlcnic_sriov_cleanup_transaction(struct qlcnic_bc_trans *trans)
818 {
819         if (!trans)
820                 return;
821         kfree(trans->req_hdr);
822         kfree(trans->rsp_hdr);
823         kfree(trans);
824 }
825
826 static int qlcnic_sriov_clear_trans(struct qlcnic_vf_info *vf,
827                                     struct qlcnic_bc_trans *trans, u8 type)
828 {
829         struct qlcnic_trans_list *t_list;
830         unsigned long flags;
831         int ret = 0;
832
833         if (type == QLC_BC_RESPONSE) {
834                 t_list = &vf->rcv_act;
835                 spin_lock_irqsave(&t_list->lock, flags);
836                 t_list->count--;
837                 list_del(&trans->list);
838                 if (t_list->count > 0)
839                         ret = 1;
840                 spin_unlock_irqrestore(&t_list->lock, flags);
841         }
842         if (type == QLC_BC_COMMAND) {
843                 while (test_and_set_bit(QLC_BC_VF_SEND, &vf->state))
844                         msleep(100);
845                 vf->send_cmd = NULL;
846                 clear_bit(QLC_BC_VF_SEND, &vf->state);
847         }
848         return ret;
849 }
850
851 static void qlcnic_sriov_schedule_bc_cmd(struct qlcnic_sriov *sriov,
852                                          struct qlcnic_vf_info *vf,
853                                          work_func_t func)
854 {
855         if (test_bit(QLC_BC_VF_FLR, &vf->state) ||
856             vf->adapter->need_fw_reset)
857                 return;
858
859         queue_work(sriov->bc.bc_trans_wq, &vf->trans_work);
860 }
861
862 static inline void qlcnic_sriov_wait_for_resp(struct qlcnic_bc_trans *trans)
863 {
864         struct completion *cmpl = &trans->resp_cmpl;
865
866         if (wait_for_completion_timeout(cmpl, QLC_MBOX_RESP_TIMEOUT))
867                 trans->trans_state = QLC_END;
868         else
869                 trans->trans_state = QLC_ABORT;
870
871         return;
872 }
873
874 static void qlcnic_sriov_handle_multi_frags(struct qlcnic_bc_trans *trans,
875                                             u8 type)
876 {
877         if (type == QLC_BC_RESPONSE) {
878                 trans->curr_rsp_frag++;
879                 if (trans->curr_rsp_frag < trans->rsp_hdr->num_frags)
880                         trans->trans_state = QLC_INIT;
881                 else
882                         trans->trans_state = QLC_END;
883         } else {
884                 trans->curr_req_frag++;
885                 if (trans->curr_req_frag < trans->req_hdr->num_frags)
886                         trans->trans_state = QLC_INIT;
887                 else
888                         trans->trans_state = QLC_WAIT_FOR_RESP;
889         }
890 }
891
892 static void qlcnic_sriov_wait_for_channel_free(struct qlcnic_bc_trans *trans,
893                                                u8 type)
894 {
895         struct qlcnic_vf_info *vf = trans->vf;
896         struct completion *cmpl = &vf->ch_free_cmpl;
897
898         if (!wait_for_completion_timeout(cmpl, QLC_MBOX_CH_FREE_TIMEOUT)) {
899                 trans->trans_state = QLC_ABORT;
900                 return;
901         }
902
903         clear_bit(QLC_BC_VF_CHANNEL, &vf->state);
904         qlcnic_sriov_handle_multi_frags(trans, type);
905 }
906
907 static void qlcnic_sriov_pull_bc_msg(struct qlcnic_adapter *adapter,
908                                      u32 *hdr, u32 *pay, u32 size)
909 {
910         struct qlcnic_hardware_context *ahw = adapter->ahw;
911         u32 fw_mbx;
912         u8 i, max = 2, hdr_size, j;
913
914         hdr_size = (sizeof(struct qlcnic_bc_hdr) / sizeof(u32));
915         max = (size / sizeof(u32)) + hdr_size;
916
917         fw_mbx = readl(QLCNIC_MBX_FW(ahw, 0));
918         for (i = 2, j = 0; j < hdr_size; i++, j++)
919                 *(hdr++) = readl(QLCNIC_MBX_FW(ahw, i));
920         for (; j < max; i++, j++)
921                 *(pay++) = readl(QLCNIC_MBX_FW(ahw, i));
922 }
923
924 static int __qlcnic_sriov_issue_bc_post(struct qlcnic_vf_info *vf)
925 {
926         int ret = -EBUSY;
927         u32 timeout = 10000;
928
929         do {
930                 if (!test_and_set_bit(QLC_BC_VF_CHANNEL, &vf->state)) {
931                         ret = 0;
932                         break;
933                 }
934                 mdelay(1);
935         } while (--timeout);
936
937         return ret;
938 }
939
940 static int qlcnic_sriov_issue_bc_post(struct qlcnic_bc_trans *trans, u8 type)
941 {
942         struct qlcnic_vf_info *vf = trans->vf;
943         u32 pay_size, hdr_size;
944         u32 *hdr, *pay;
945         int ret;
946         u8 pci_func = trans->func_id;
947
948         if (__qlcnic_sriov_issue_bc_post(vf))
949                 return -EBUSY;
950
951         if (type == QLC_BC_COMMAND) {
952                 hdr = (u32 *)(trans->req_hdr + trans->curr_req_frag);
953                 pay = (u32 *)(trans->req_pay + trans->curr_req_frag);
954                 hdr_size = (sizeof(struct qlcnic_bc_hdr) / sizeof(u32));
955                 pay_size = qlcnic_sriov_get_bc_paysize(trans->req_pay_size,
956                                                        trans->curr_req_frag);
957                 pay_size = (pay_size / sizeof(u32));
958         } else {
959                 hdr = (u32 *)(trans->rsp_hdr + trans->curr_rsp_frag);
960                 pay = (u32 *)(trans->rsp_pay + trans->curr_rsp_frag);
961                 hdr_size = (sizeof(struct qlcnic_bc_hdr) / sizeof(u32));
962                 pay_size = qlcnic_sriov_get_bc_paysize(trans->rsp_pay_size,
963                                                        trans->curr_rsp_frag);
964                 pay_size = (pay_size / sizeof(u32));
965         }
966
967         ret = qlcnic_sriov_post_bc_msg(vf->adapter, hdr, pay,
968                                        pci_func, pay_size);
969         return ret;
970 }
971
972 static int __qlcnic_sriov_send_bc_msg(struct qlcnic_bc_trans *trans,
973                                       struct qlcnic_vf_info *vf, u8 type)
974 {
975         bool flag = true;
976         int err = -EIO;
977
978         while (flag) {
979                 if (test_bit(QLC_BC_VF_FLR, &vf->state) ||
980                     vf->adapter->need_fw_reset)
981                         trans->trans_state = QLC_ABORT;
982
983                 switch (trans->trans_state) {
984                 case QLC_INIT:
985                         trans->trans_state = QLC_WAIT_FOR_CHANNEL_FREE;
986                         if (qlcnic_sriov_issue_bc_post(trans, type))
987                                 trans->trans_state = QLC_ABORT;
988                         break;
989                 case QLC_WAIT_FOR_CHANNEL_FREE:
990                         qlcnic_sriov_wait_for_channel_free(trans, type);
991                         break;
992                 case QLC_WAIT_FOR_RESP:
993                         qlcnic_sriov_wait_for_resp(trans);
994                         break;
995                 case QLC_END:
996                         err = 0;
997                         flag = false;
998                         break;
999                 case QLC_ABORT:
1000                         err = -EIO;
1001                         flag = false;
1002                         clear_bit(QLC_BC_VF_CHANNEL, &vf->state);
1003                         break;
1004                 default:
1005                         err = -EIO;
1006                         flag = false;
1007                 }
1008         }
1009         return err;
1010 }
1011
1012 static int qlcnic_sriov_send_bc_cmd(struct qlcnic_adapter *adapter,
1013                                     struct qlcnic_bc_trans *trans, int pci_func)
1014 {
1015         struct qlcnic_vf_info *vf;
1016         int err, index = qlcnic_sriov_func_to_index(adapter, pci_func);
1017
1018         if (index < 0)
1019                 return -EIO;
1020
1021         vf = &adapter->ahw->sriov->vf_info[index];
1022         trans->vf = vf;
1023         trans->func_id = pci_func;
1024
1025         if (!test_bit(QLC_BC_VF_STATE, &vf->state)) {
1026                 if (qlcnic_sriov_pf_check(adapter))
1027                         return -EIO;
1028                 if (qlcnic_sriov_vf_check(adapter) &&
1029                     trans->cmd_id != QLCNIC_BC_CMD_CHANNEL_INIT)
1030                         return -EIO;
1031         }
1032
1033         mutex_lock(&vf->send_cmd_lock);
1034         vf->send_cmd = trans;
1035         err = __qlcnic_sriov_send_bc_msg(trans, vf, QLC_BC_COMMAND);
1036         qlcnic_sriov_clear_trans(vf, trans, QLC_BC_COMMAND);
1037         mutex_unlock(&vf->send_cmd_lock);
1038         return err;
1039 }
1040
1041 static void __qlcnic_sriov_process_bc_cmd(struct qlcnic_adapter *adapter,
1042                                           struct qlcnic_bc_trans *trans,
1043                                           struct qlcnic_cmd_args *cmd)
1044 {
1045 #ifdef CONFIG_QLCNIC_SRIOV
1046         if (qlcnic_sriov_pf_check(adapter)) {
1047                 qlcnic_sriov_pf_process_bc_cmd(adapter, trans, cmd);
1048                 return;
1049         }
1050 #endif
1051         cmd->rsp.arg[0] |= (0x9 << 25);
1052         return;
1053 }
1054
1055 static void qlcnic_sriov_process_bc_cmd(struct work_struct *work)
1056 {
1057         struct qlcnic_vf_info *vf = container_of(work, struct qlcnic_vf_info,
1058                                                  trans_work);
1059         struct qlcnic_bc_trans *trans = NULL;
1060         struct qlcnic_adapter *adapter  = vf->adapter;
1061         struct qlcnic_cmd_args cmd;
1062         u8 req;
1063
1064         if (adapter->need_fw_reset)
1065                 return;
1066
1067         if (test_bit(QLC_BC_VF_FLR, &vf->state))
1068                 return;
1069
1070         memset(&cmd, 0, sizeof(struct qlcnic_cmd_args));
1071         trans = list_first_entry(&vf->rcv_act.wait_list,
1072                                  struct qlcnic_bc_trans, list);
1073         adapter = vf->adapter;
1074
1075         if (qlcnic_sriov_prepare_bc_hdr(trans, &cmd, trans->req_hdr->seq_id,
1076                                         QLC_BC_RESPONSE))
1077                 goto cleanup_trans;
1078
1079         __qlcnic_sriov_process_bc_cmd(adapter, trans, &cmd);
1080         trans->trans_state = QLC_INIT;
1081         __qlcnic_sriov_send_bc_msg(trans, vf, QLC_BC_RESPONSE);
1082
1083 cleanup_trans:
1084         qlcnic_free_mbx_args(&cmd);
1085         req = qlcnic_sriov_clear_trans(vf, trans, QLC_BC_RESPONSE);
1086         qlcnic_sriov_cleanup_transaction(trans);
1087         if (req)
1088                 qlcnic_sriov_schedule_bc_cmd(adapter->ahw->sriov, vf,
1089                                              qlcnic_sriov_process_bc_cmd);
1090 }
1091
1092 static void qlcnic_sriov_handle_bc_resp(struct qlcnic_bc_hdr *hdr,
1093                                         struct qlcnic_vf_info *vf)
1094 {
1095         struct qlcnic_bc_trans *trans;
1096         u32 pay_size;
1097
1098         if (test_and_set_bit(QLC_BC_VF_SEND, &vf->state))
1099                 return;
1100
1101         trans = vf->send_cmd;
1102
1103         if (trans == NULL)
1104                 goto clear_send;
1105
1106         if (trans->trans_id != hdr->seq_id)
1107                 goto clear_send;
1108
1109         pay_size = qlcnic_sriov_get_bc_paysize(trans->rsp_pay_size,
1110                                                trans->curr_rsp_frag);
1111         qlcnic_sriov_pull_bc_msg(vf->adapter,
1112                                  (u32 *)(trans->rsp_hdr + trans->curr_rsp_frag),
1113                                  (u32 *)(trans->rsp_pay + trans->curr_rsp_frag),
1114                                  pay_size);
1115         if (++trans->curr_rsp_frag < trans->rsp_hdr->num_frags)
1116                 goto clear_send;
1117
1118         complete(&trans->resp_cmpl);
1119
1120 clear_send:
1121         clear_bit(QLC_BC_VF_SEND, &vf->state);
1122 }
1123
1124 int __qlcnic_sriov_add_act_list(struct qlcnic_sriov *sriov,
1125                                 struct qlcnic_vf_info *vf,
1126                                 struct qlcnic_bc_trans *trans)
1127 {
1128         struct qlcnic_trans_list *t_list = &vf->rcv_act;
1129
1130         t_list->count++;
1131         list_add_tail(&trans->list, &t_list->wait_list);
1132         if (t_list->count == 1)
1133                 qlcnic_sriov_schedule_bc_cmd(sriov, vf,
1134                                              qlcnic_sriov_process_bc_cmd);
1135         return 0;
1136 }
1137
1138 static int qlcnic_sriov_add_act_list(struct qlcnic_sriov *sriov,
1139                                      struct qlcnic_vf_info *vf,
1140                                      struct qlcnic_bc_trans *trans)
1141 {
1142         struct qlcnic_trans_list *t_list = &vf->rcv_act;
1143
1144         spin_lock(&t_list->lock);
1145
1146         __qlcnic_sriov_add_act_list(sriov, vf, trans);
1147
1148         spin_unlock(&t_list->lock);
1149         return 0;
1150 }
1151
1152 static void qlcnic_sriov_handle_pending_trans(struct qlcnic_sriov *sriov,
1153                                               struct qlcnic_vf_info *vf,
1154                                               struct qlcnic_bc_hdr *hdr)
1155 {
1156         struct qlcnic_bc_trans *trans = NULL;
1157         struct list_head *node;
1158         u32 pay_size, curr_frag;
1159         u8 found = 0, active = 0;
1160
1161         spin_lock(&vf->rcv_pend.lock);
1162         if (vf->rcv_pend.count > 0) {
1163                 list_for_each(node, &vf->rcv_pend.wait_list) {
1164                         trans = list_entry(node, struct qlcnic_bc_trans, list);
1165                         if (trans->trans_id == hdr->seq_id) {
1166                                 found = 1;
1167                                 break;
1168                         }
1169                 }
1170         }
1171
1172         if (found) {
1173                 curr_frag = trans->curr_req_frag;
1174                 pay_size = qlcnic_sriov_get_bc_paysize(trans->req_pay_size,
1175                                                        curr_frag);
1176                 qlcnic_sriov_pull_bc_msg(vf->adapter,
1177                                          (u32 *)(trans->req_hdr + curr_frag),
1178                                          (u32 *)(trans->req_pay + curr_frag),
1179                                          pay_size);
1180                 trans->curr_req_frag++;
1181                 if (trans->curr_req_frag >= hdr->num_frags) {
1182                         vf->rcv_pend.count--;
1183                         list_del(&trans->list);
1184                         active = 1;
1185                 }
1186         }
1187         spin_unlock(&vf->rcv_pend.lock);
1188
1189         if (active)
1190                 if (qlcnic_sriov_add_act_list(sriov, vf, trans))
1191                         qlcnic_sriov_cleanup_transaction(trans);
1192
1193         return;
1194 }
1195
1196 static void qlcnic_sriov_handle_bc_cmd(struct qlcnic_sriov *sriov,
1197                                        struct qlcnic_bc_hdr *hdr,
1198                                        struct qlcnic_vf_info *vf)
1199 {
1200         struct qlcnic_bc_trans *trans;
1201         struct qlcnic_adapter *adapter = vf->adapter;
1202         struct qlcnic_cmd_args cmd;
1203         u32 pay_size;
1204         int err;
1205         u8 cmd_op;
1206
1207         if (adapter->need_fw_reset)
1208                 return;
1209
1210         if (!test_bit(QLC_BC_VF_STATE, &vf->state) &&
1211             hdr->op_type != QLC_BC_CMD &&
1212             hdr->cmd_op != QLCNIC_BC_CMD_CHANNEL_INIT)
1213                 return;
1214
1215         if (hdr->frag_num > 1) {
1216                 qlcnic_sriov_handle_pending_trans(sriov, vf, hdr);
1217                 return;
1218         }
1219
1220         memset(&cmd, 0, sizeof(struct qlcnic_cmd_args));
1221         cmd_op = hdr->cmd_op;
1222         if (qlcnic_sriov_alloc_bc_trans(&trans))
1223                 return;
1224
1225         if (hdr->op_type == QLC_BC_CMD)
1226                 err = qlcnic_sriov_alloc_bc_mbx_args(&cmd, cmd_op);
1227         else
1228                 err = qlcnic_alloc_mbx_args(&cmd, adapter, cmd_op);
1229
1230         if (err) {
1231                 qlcnic_sriov_cleanup_transaction(trans);
1232                 return;
1233         }
1234
1235         cmd.op_type = hdr->op_type;
1236         if (qlcnic_sriov_prepare_bc_hdr(trans, &cmd, hdr->seq_id,
1237                                         QLC_BC_COMMAND)) {
1238                 qlcnic_free_mbx_args(&cmd);
1239                 qlcnic_sriov_cleanup_transaction(trans);
1240                 return;
1241         }
1242
1243         pay_size = qlcnic_sriov_get_bc_paysize(trans->req_pay_size,
1244                                          trans->curr_req_frag);
1245         qlcnic_sriov_pull_bc_msg(vf->adapter,
1246                                  (u32 *)(trans->req_hdr + trans->curr_req_frag),
1247                                  (u32 *)(trans->req_pay + trans->curr_req_frag),
1248                                  pay_size);
1249         trans->func_id = vf->pci_func;
1250         trans->vf = vf;
1251         trans->trans_id = hdr->seq_id;
1252         trans->curr_req_frag++;
1253
1254         if (qlcnic_sriov_soft_flr_check(adapter, trans, vf))
1255                 return;
1256
1257         if (trans->curr_req_frag == trans->req_hdr->num_frags) {
1258                 if (qlcnic_sriov_add_act_list(sriov, vf, trans)) {
1259                         qlcnic_free_mbx_args(&cmd);
1260                         qlcnic_sriov_cleanup_transaction(trans);
1261                 }
1262         } else {
1263                 spin_lock(&vf->rcv_pend.lock);
1264                 list_add_tail(&trans->list, &vf->rcv_pend.wait_list);
1265                 vf->rcv_pend.count++;
1266                 spin_unlock(&vf->rcv_pend.lock);
1267         }
1268 }
1269
1270 static void qlcnic_sriov_handle_msg_event(struct qlcnic_sriov *sriov,
1271                                           struct qlcnic_vf_info *vf)
1272 {
1273         struct qlcnic_bc_hdr hdr;
1274         u32 *ptr = (u32 *)&hdr;
1275         u8 msg_type, i;
1276
1277         for (i = 2; i < 6; i++)
1278                 ptr[i - 2] = readl(QLCNIC_MBX_FW(vf->adapter->ahw, i));
1279         msg_type = hdr.msg_type;
1280
1281         switch (msg_type) {
1282         case QLC_BC_COMMAND:
1283                 qlcnic_sriov_handle_bc_cmd(sriov, &hdr, vf);
1284                 break;
1285         case QLC_BC_RESPONSE:
1286                 qlcnic_sriov_handle_bc_resp(&hdr, vf);
1287                 break;
1288         }
1289 }
1290
1291 static void qlcnic_sriov_handle_flr_event(struct qlcnic_sriov *sriov,
1292                                           struct qlcnic_vf_info *vf)
1293 {
1294         struct qlcnic_adapter *adapter = vf->adapter;
1295
1296         if (qlcnic_sriov_pf_check(adapter))
1297                 qlcnic_sriov_pf_handle_flr(sriov, vf);
1298         else
1299                 dev_err(&adapter->pdev->dev,
1300                         "Invalid event to VF. VF should not get FLR event\n");
1301 }
1302
1303 void qlcnic_sriov_handle_bc_event(struct qlcnic_adapter *adapter, u32 event)
1304 {
1305         struct qlcnic_vf_info *vf;
1306         struct qlcnic_sriov *sriov;
1307         int index;
1308         u8 pci_func;
1309
1310         sriov = adapter->ahw->sriov;
1311         pci_func = qlcnic_sriov_target_func_id(event);
1312         index = qlcnic_sriov_func_to_index(adapter, pci_func);
1313
1314         if (index < 0)
1315                 return;
1316
1317         vf = &sriov->vf_info[index];
1318         vf->pci_func = pci_func;
1319
1320         if (qlcnic_sriov_channel_free_check(event))
1321                 complete(&vf->ch_free_cmpl);
1322
1323         if (qlcnic_sriov_flr_check(event)) {
1324                 qlcnic_sriov_handle_flr_event(sriov, vf);
1325                 return;
1326         }
1327
1328         if (qlcnic_sriov_bc_msg_check(event))
1329                 qlcnic_sriov_handle_msg_event(sriov, vf);
1330 }
1331
1332 int qlcnic_sriov_cfg_bc_intr(struct qlcnic_adapter *adapter, u8 enable)
1333 {
1334         struct qlcnic_cmd_args cmd;
1335         int err;
1336
1337         if (!test_bit(__QLCNIC_SRIOV_ENABLE, &adapter->state))
1338                 return 0;
1339
1340         if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_BC_EVENT_SETUP))
1341                 return -ENOMEM;
1342
1343         if (enable)
1344                 cmd.req.arg[1] = (1 << 4) | (1 << 5) | (1 << 6) | (1 << 7);
1345
1346         err = qlcnic_83xx_issue_cmd(adapter, &cmd);
1347
1348         if (err != QLCNIC_RCODE_SUCCESS) {
1349                 dev_err(&adapter->pdev->dev,
1350                         "Failed to %s bc events, err=%d\n",
1351                         (enable ? "enable" : "disable"), err);
1352         }
1353
1354         qlcnic_free_mbx_args(&cmd);
1355         return err;
1356 }
1357
1358 static int qlcnic_sriov_retry_bc_cmd(struct qlcnic_adapter *adapter,
1359                                      struct qlcnic_bc_trans *trans)
1360 {
1361         u8 max = QLC_BC_CMD_MAX_RETRY_CNT;
1362         u32 state;
1363
1364         state = QLCRDX(adapter->ahw, QLC_83XX_IDC_DEV_STATE);
1365         if (state == QLC_83XX_IDC_DEV_READY) {
1366                 msleep(20);
1367                 clear_bit(QLC_BC_VF_CHANNEL, &trans->vf->state);
1368                 trans->trans_state = QLC_INIT;
1369                 if (++adapter->fw_fail_cnt > max)
1370                         return -EIO;
1371                 else
1372                         return 0;
1373         }
1374
1375         return -EIO;
1376 }
1377
1378 static int __qlcnic_sriov_issue_cmd(struct qlcnic_adapter *adapter,
1379                                   struct qlcnic_cmd_args *cmd)
1380 {
1381         struct qlcnic_hardware_context *ahw = adapter->ahw;
1382         struct qlcnic_mailbox *mbx = ahw->mailbox;
1383         struct device *dev = &adapter->pdev->dev;
1384         struct qlcnic_bc_trans *trans;
1385         int err;
1386         u32 rsp_data, opcode, mbx_err_code, rsp;
1387         u16 seq = ++adapter->ahw->sriov->bc.trans_counter;
1388         u8 func = ahw->pci_func;
1389
1390         rsp = qlcnic_sriov_alloc_bc_trans(&trans);
1391         if (rsp)
1392                 goto free_cmd;
1393
1394         rsp = qlcnic_sriov_prepare_bc_hdr(trans, cmd, seq, QLC_BC_COMMAND);
1395         if (rsp)
1396                 goto cleanup_transaction;
1397
1398 retry:
1399         if (!test_bit(QLC_83XX_MBX_READY, &mbx->status)) {
1400                 rsp = -EIO;
1401                 QLCDB(adapter, DRV, "MBX not Ready!(cmd 0x%x) for VF 0x%x\n",
1402                       QLCNIC_MBX_RSP(cmd->req.arg[0]), func);
1403                 goto err_out;
1404         }
1405
1406         err = qlcnic_sriov_send_bc_cmd(adapter, trans, func);
1407         if (err) {
1408                 dev_err(dev, "MBX command 0x%x timed out for VF %d\n",
1409                         (cmd->req.arg[0] & 0xffff), func);
1410                 rsp = QLCNIC_RCODE_TIMEOUT;
1411
1412                 /* After adapter reset PF driver may take some time to
1413                  * respond to VF's request. Retry request till maximum retries.
1414                  */
1415                 if ((trans->req_hdr->cmd_op == QLCNIC_BC_CMD_CHANNEL_INIT) &&
1416                     !qlcnic_sriov_retry_bc_cmd(adapter, trans))
1417                         goto retry;
1418
1419                 goto err_out;
1420         }
1421
1422         rsp_data = cmd->rsp.arg[0];
1423         mbx_err_code = QLCNIC_MBX_STATUS(rsp_data);
1424         opcode = QLCNIC_MBX_RSP(cmd->req.arg[0]);
1425
1426         if ((mbx_err_code == QLCNIC_MBX_RSP_OK) ||
1427             (mbx_err_code == QLCNIC_MBX_PORT_RSP_OK)) {
1428                 rsp = QLCNIC_RCODE_SUCCESS;
1429         } else {
1430                 if (cmd->type == QLC_83XX_MBX_CMD_NO_WAIT) {
1431                         rsp = QLCNIC_RCODE_SUCCESS;
1432                 } else {
1433                         rsp = mbx_err_code;
1434                         if (!rsp)
1435                                 rsp = 1;
1436
1437                         dev_err(dev,
1438                                 "MBX command 0x%x failed with err:0x%x for VF %d\n",
1439                                 opcode, mbx_err_code, func);
1440                 }
1441         }
1442
1443 err_out:
1444         if (rsp == QLCNIC_RCODE_TIMEOUT) {
1445                 ahw->reset_context = 1;
1446                 adapter->need_fw_reset = 1;
1447                 clear_bit(QLC_83XX_MBX_READY, &mbx->status);
1448         }
1449
1450 cleanup_transaction:
1451         qlcnic_sriov_cleanup_transaction(trans);
1452
1453 free_cmd:
1454         if (cmd->type == QLC_83XX_MBX_CMD_NO_WAIT) {
1455                 qlcnic_free_mbx_args(cmd);
1456                 kfree(cmd);
1457         }
1458
1459         return rsp;
1460 }
1461
1462
1463 static int qlcnic_sriov_issue_cmd(struct qlcnic_adapter *adapter,
1464                                   struct qlcnic_cmd_args *cmd)
1465 {
1466         if (cmd->type == QLC_83XX_MBX_CMD_NO_WAIT)
1467                 return qlcnic_sriov_async_issue_cmd(adapter, cmd);
1468         else
1469                 return __qlcnic_sriov_issue_cmd(adapter, cmd);
1470 }
1471
1472 static int qlcnic_sriov_channel_cfg_cmd(struct qlcnic_adapter *adapter, u8 cmd_op)
1473 {
1474         struct qlcnic_cmd_args cmd;
1475         struct qlcnic_vf_info *vf = &adapter->ahw->sriov->vf_info[0];
1476         int ret;
1477
1478         memset(&cmd, 0, sizeof(cmd));
1479         if (qlcnic_sriov_alloc_bc_mbx_args(&cmd, cmd_op))
1480                 return -ENOMEM;
1481
1482         ret = qlcnic_issue_cmd(adapter, &cmd);
1483         if (ret) {
1484                 dev_err(&adapter->pdev->dev,
1485                         "Failed bc channel %s %d\n", cmd_op ? "term" : "init",
1486                         ret);
1487                 goto out;
1488         }
1489
1490         cmd_op = (cmd.rsp.arg[0] & 0xff);
1491         if (cmd.rsp.arg[0] >> 25 == 2)
1492                 return 2;
1493         if (cmd_op == QLCNIC_BC_CMD_CHANNEL_INIT)
1494                 set_bit(QLC_BC_VF_STATE, &vf->state);
1495         else
1496                 clear_bit(QLC_BC_VF_STATE, &vf->state);
1497
1498 out:
1499         qlcnic_free_mbx_args(&cmd);
1500         return ret;
1501 }
1502
1503 static void qlcnic_vf_add_mc_list(struct net_device *netdev, const u8 *mac,
1504                                   enum qlcnic_mac_type mac_type)
1505 {
1506         struct qlcnic_adapter *adapter = netdev_priv(netdev);
1507         struct qlcnic_sriov *sriov = adapter->ahw->sriov;
1508         struct qlcnic_vf_info *vf;
1509         u16 vlan_id;
1510         int i;
1511
1512         vf = &adapter->ahw->sriov->vf_info[0];
1513
1514         if (!qlcnic_sriov_check_any_vlan(vf)) {
1515                 qlcnic_nic_add_mac(adapter, mac, 0, mac_type);
1516         } else {
1517                 spin_lock(&vf->vlan_list_lock);
1518                 for (i = 0; i < sriov->num_allowed_vlans; i++) {
1519                         vlan_id = vf->sriov_vlans[i];
1520                         if (vlan_id)
1521                                 qlcnic_nic_add_mac(adapter, mac, vlan_id,
1522                                                    mac_type);
1523                 }
1524                 spin_unlock(&vf->vlan_list_lock);
1525                 if (qlcnic_84xx_check(adapter))
1526                         qlcnic_nic_add_mac(adapter, mac, 0, mac_type);
1527         }
1528 }
1529
1530 void qlcnic_sriov_cleanup_async_list(struct qlcnic_back_channel *bc)
1531 {
1532         struct list_head *head = &bc->async_cmd_list;
1533         struct qlcnic_async_cmd *entry;
1534
1535         flush_workqueue(bc->bc_async_wq);
1536         cancel_work_sync(&bc->vf_async_work);
1537
1538         spin_lock(&bc->queue_lock);
1539         while (!list_empty(head)) {
1540                 entry = list_entry(head->next, struct qlcnic_async_cmd,
1541                                    list);
1542                 list_del(&entry->list);
1543                 kfree(entry->cmd);
1544                 kfree(entry);
1545         }
1546         spin_unlock(&bc->queue_lock);
1547 }
1548
1549 void qlcnic_sriov_vf_set_multi(struct net_device *netdev)
1550 {
1551         struct qlcnic_adapter *adapter = netdev_priv(netdev);
1552         struct qlcnic_hardware_context *ahw = adapter->ahw;
1553         static const u8 bcast_addr[ETH_ALEN] = {
1554                 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
1555         };
1556         struct netdev_hw_addr *ha;
1557         u32 mode = VPORT_MISS_MODE_DROP;
1558
1559         if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state))
1560                 return;
1561
1562         if (netdev->flags & IFF_PROMISC) {
1563                 if (!(adapter->flags & QLCNIC_PROMISC_DISABLED))
1564                         mode = VPORT_MISS_MODE_ACCEPT_ALL;
1565         } else if ((netdev->flags & IFF_ALLMULTI) ||
1566                    (netdev_mc_count(netdev) > ahw->max_mc_count)) {
1567                 mode = VPORT_MISS_MODE_ACCEPT_MULTI;
1568         } else {
1569                 qlcnic_vf_add_mc_list(netdev, bcast_addr, QLCNIC_BROADCAST_MAC);
1570                 if (!netdev_mc_empty(netdev)) {
1571                         qlcnic_flush_mcast_mac(adapter);
1572                         netdev_for_each_mc_addr(ha, netdev)
1573                                 qlcnic_vf_add_mc_list(netdev, ha->addr,
1574                                                       QLCNIC_MULTICAST_MAC);
1575                 }
1576         }
1577
1578         /* configure unicast MAC address, if there is not sufficient space
1579          * to store all the unicast addresses then enable promiscuous mode
1580          */
1581         if (netdev_uc_count(netdev) > ahw->max_uc_count) {
1582                 mode = VPORT_MISS_MODE_ACCEPT_ALL;
1583         } else if (!netdev_uc_empty(netdev)) {
1584                 netdev_for_each_uc_addr(ha, netdev)
1585                         qlcnic_vf_add_mc_list(netdev, ha->addr,
1586                                               QLCNIC_UNICAST_MAC);
1587         }
1588
1589         if (adapter->pdev->is_virtfn) {
1590                 if (mode == VPORT_MISS_MODE_ACCEPT_ALL &&
1591                     !adapter->fdb_mac_learn) {
1592                         qlcnic_alloc_lb_filters_mem(adapter);
1593                         adapter->drv_mac_learn = 1;
1594                         adapter->rx_mac_learn = true;
1595                 } else {
1596                         adapter->drv_mac_learn = 0;
1597                         adapter->rx_mac_learn = false;
1598                 }
1599         }
1600
1601         qlcnic_nic_set_promisc(adapter, mode);
1602 }
1603
1604 static void qlcnic_sriov_handle_async_issue_cmd(struct work_struct *work)
1605 {
1606         struct qlcnic_async_cmd *entry, *tmp;
1607         struct qlcnic_back_channel *bc;
1608         struct qlcnic_cmd_args *cmd;
1609         struct list_head *head;
1610         LIST_HEAD(del_list);
1611
1612         bc = container_of(work, struct qlcnic_back_channel, vf_async_work);
1613         head = &bc->async_cmd_list;
1614
1615         spin_lock(&bc->queue_lock);
1616         list_splice_init(head, &del_list);
1617         spin_unlock(&bc->queue_lock);
1618
1619         list_for_each_entry_safe(entry, tmp, &del_list, list) {
1620                 list_del(&entry->list);
1621                 cmd = entry->cmd;
1622                 __qlcnic_sriov_issue_cmd(bc->adapter, cmd);
1623                 kfree(entry);
1624         }
1625
1626         if (!list_empty(head))
1627                 queue_work(bc->bc_async_wq, &bc->vf_async_work);
1628
1629         return;
1630 }
1631
1632 static struct qlcnic_async_cmd *
1633 qlcnic_sriov_alloc_async_cmd(struct qlcnic_back_channel *bc,
1634                              struct qlcnic_cmd_args *cmd)
1635 {
1636         struct qlcnic_async_cmd *entry = NULL;
1637
1638         entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
1639         if (!entry)
1640                 return NULL;
1641
1642         entry->cmd = cmd;
1643
1644         spin_lock(&bc->queue_lock);
1645         list_add_tail(&entry->list, &bc->async_cmd_list);
1646         spin_unlock(&bc->queue_lock);
1647
1648         return entry;
1649 }
1650
1651 static void qlcnic_sriov_schedule_async_cmd(struct qlcnic_back_channel *bc,
1652                                             struct qlcnic_cmd_args *cmd)
1653 {
1654         struct qlcnic_async_cmd *entry = NULL;
1655
1656         entry = qlcnic_sriov_alloc_async_cmd(bc, cmd);
1657         if (!entry) {
1658                 qlcnic_free_mbx_args(cmd);
1659                 kfree(cmd);
1660                 return;
1661         }
1662
1663         queue_work(bc->bc_async_wq, &bc->vf_async_work);
1664 }
1665
1666 static int qlcnic_sriov_async_issue_cmd(struct qlcnic_adapter *adapter,
1667                                         struct qlcnic_cmd_args *cmd)
1668 {
1669
1670         struct qlcnic_back_channel *bc = &adapter->ahw->sriov->bc;
1671
1672         if (adapter->need_fw_reset)
1673                 return -EIO;
1674
1675         qlcnic_sriov_schedule_async_cmd(bc, cmd);
1676
1677         return 0;
1678 }
1679
1680 static int qlcnic_sriov_vf_reinit_driver(struct qlcnic_adapter *adapter)
1681 {
1682         int err;
1683
1684         adapter->need_fw_reset = 0;
1685         qlcnic_83xx_reinit_mbx_work(adapter->ahw->mailbox);
1686         qlcnic_83xx_enable_mbx_interrupt(adapter);
1687
1688         err = qlcnic_sriov_cfg_bc_intr(adapter, 1);
1689         if (err)
1690                 return err;
1691
1692         err = qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_INIT);
1693         if (err)
1694                 goto err_out_cleanup_bc_intr;
1695
1696         err = qlcnic_sriov_vf_init_driver(adapter);
1697         if (err)
1698                 goto err_out_term_channel;
1699
1700         return 0;
1701
1702 err_out_term_channel:
1703         qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_TERM);
1704
1705 err_out_cleanup_bc_intr:
1706         qlcnic_sriov_cfg_bc_intr(adapter, 0);
1707         return err;
1708 }
1709
1710 static void qlcnic_sriov_vf_attach(struct qlcnic_adapter *adapter)
1711 {
1712         struct net_device *netdev = adapter->netdev;
1713
1714         if (netif_running(netdev)) {
1715                 if (!qlcnic_up(adapter, netdev))
1716                         qlcnic_restore_indev_addr(netdev, NETDEV_UP);
1717         }
1718
1719         netif_device_attach(netdev);
1720 }
1721
1722 static void qlcnic_sriov_vf_detach(struct qlcnic_adapter *adapter)
1723 {
1724         struct qlcnic_hardware_context *ahw = adapter->ahw;
1725         struct qlcnic_intrpt_config *intr_tbl = ahw->intr_tbl;
1726         struct net_device *netdev = adapter->netdev;
1727         u8 i, max_ints = ahw->num_msix - 1;
1728
1729         netif_device_detach(netdev);
1730         qlcnic_83xx_detach_mailbox_work(adapter);
1731         qlcnic_83xx_disable_mbx_intr(adapter);
1732
1733         if (netif_running(netdev))
1734                 qlcnic_down(adapter, netdev);
1735
1736         for (i = 0; i < max_ints; i++) {
1737                 intr_tbl[i].id = i;
1738                 intr_tbl[i].enabled = 0;
1739                 intr_tbl[i].src = 0;
1740         }
1741         ahw->reset_context = 0;
1742 }
1743
1744 static int qlcnic_sriov_vf_handle_dev_ready(struct qlcnic_adapter *adapter)
1745 {
1746         struct qlcnic_hardware_context *ahw = adapter->ahw;
1747         struct device *dev = &adapter->pdev->dev;
1748         struct qlc_83xx_idc *idc = &ahw->idc;
1749         u8 func = ahw->pci_func;
1750         u32 state;
1751
1752         if ((idc->prev_state == QLC_83XX_IDC_DEV_NEED_RESET) ||
1753             (idc->prev_state == QLC_83XX_IDC_DEV_INIT)) {
1754                 if (!qlcnic_sriov_vf_reinit_driver(adapter)) {
1755                         qlcnic_sriov_vf_attach(adapter);
1756                         adapter->fw_fail_cnt = 0;
1757                         dev_info(dev,
1758                                  "%s: Reinitialization of VF 0x%x done after FW reset\n",
1759                                  __func__, func);
1760                 } else {
1761                         dev_err(dev,
1762                                 "%s: Reinitialization of VF 0x%x failed after FW reset\n",
1763                                 __func__, func);
1764                         state = QLCRDX(ahw, QLC_83XX_IDC_DEV_STATE);
1765                         dev_info(dev, "Current state 0x%x after FW reset\n",
1766                                  state);
1767                 }
1768         }
1769
1770         return 0;
1771 }
1772
1773 static int qlcnic_sriov_vf_handle_context_reset(struct qlcnic_adapter *adapter)
1774 {
1775         struct qlcnic_hardware_context *ahw = adapter->ahw;
1776         struct qlcnic_mailbox *mbx = ahw->mailbox;
1777         struct device *dev = &adapter->pdev->dev;
1778         struct qlc_83xx_idc *idc = &ahw->idc;
1779         u8 func = ahw->pci_func;
1780         u32 state;
1781
1782         adapter->reset_ctx_cnt++;
1783
1784         /* Skip the context reset and check if FW is hung */
1785         if (adapter->reset_ctx_cnt < 3) {
1786                 adapter->need_fw_reset = 1;
1787                 clear_bit(QLC_83XX_MBX_READY, &mbx->status);
1788                 dev_info(dev,
1789                          "Resetting context, wait here to check if FW is in failed state\n");
1790                 return 0;
1791         }
1792
1793         /* Check if number of resets exceed the threshold.
1794          * If it exceeds the threshold just fail the VF.
1795          */
1796         if (adapter->reset_ctx_cnt > QLC_83XX_VF_RESET_FAIL_THRESH) {
1797                 clear_bit(QLC_83XX_MODULE_LOADED, &idc->status);
1798                 adapter->tx_timeo_cnt = 0;
1799                 adapter->fw_fail_cnt = 0;
1800                 adapter->reset_ctx_cnt = 0;
1801                 qlcnic_sriov_vf_detach(adapter);
1802                 dev_err(dev,
1803                         "Device context resets have exceeded the threshold, device interface will be shutdown\n");
1804                 return -EIO;
1805         }
1806
1807         dev_info(dev, "Resetting context of VF 0x%x\n", func);
1808         dev_info(dev, "%s: Context reset count %d for VF 0x%x\n",
1809                  __func__, adapter->reset_ctx_cnt, func);
1810         set_bit(__QLCNIC_RESETTING, &adapter->state);
1811         adapter->need_fw_reset = 1;
1812         clear_bit(QLC_83XX_MBX_READY, &mbx->status);
1813         qlcnic_sriov_vf_detach(adapter);
1814         adapter->need_fw_reset = 0;
1815
1816         if (!qlcnic_sriov_vf_reinit_driver(adapter)) {
1817                 qlcnic_sriov_vf_attach(adapter);
1818                 adapter->tx_timeo_cnt = 0;
1819                 adapter->reset_ctx_cnt = 0;
1820                 adapter->fw_fail_cnt = 0;
1821                 dev_info(dev, "Done resetting context for VF 0x%x\n", func);
1822         } else {
1823                 dev_err(dev, "%s: Reinitialization of VF 0x%x failed\n",
1824                         __func__, func);
1825                 state = QLCRDX(ahw, QLC_83XX_IDC_DEV_STATE);
1826                 dev_info(dev, "%s: Current state 0x%x\n", __func__, state);
1827         }
1828
1829         return 0;
1830 }
1831
1832 static int qlcnic_sriov_vf_idc_ready_state(struct qlcnic_adapter *adapter)
1833 {
1834         struct qlcnic_hardware_context *ahw = adapter->ahw;
1835         int ret = 0;
1836
1837         if (ahw->idc.prev_state != QLC_83XX_IDC_DEV_READY)
1838                 ret = qlcnic_sriov_vf_handle_dev_ready(adapter);
1839         else if (ahw->reset_context)
1840                 ret = qlcnic_sriov_vf_handle_context_reset(adapter);
1841
1842         clear_bit(__QLCNIC_RESETTING, &adapter->state);
1843         return ret;
1844 }
1845
1846 static int qlcnic_sriov_vf_idc_failed_state(struct qlcnic_adapter *adapter)
1847 {
1848         struct qlc_83xx_idc *idc = &adapter->ahw->idc;
1849
1850         dev_err(&adapter->pdev->dev, "Device is in failed state\n");
1851         if (idc->prev_state == QLC_83XX_IDC_DEV_READY)
1852                 qlcnic_sriov_vf_detach(adapter);
1853
1854         clear_bit(QLC_83XX_MODULE_LOADED, &idc->status);
1855         clear_bit(__QLCNIC_RESETTING, &adapter->state);
1856         return -EIO;
1857 }
1858
1859 static int
1860 qlcnic_sriov_vf_idc_need_quiescent_state(struct qlcnic_adapter *adapter)
1861 {
1862         struct qlcnic_mailbox *mbx = adapter->ahw->mailbox;
1863         struct qlc_83xx_idc *idc = &adapter->ahw->idc;
1864
1865         dev_info(&adapter->pdev->dev, "Device is in quiescent state\n");
1866         if (idc->prev_state == QLC_83XX_IDC_DEV_READY) {
1867                 set_bit(__QLCNIC_RESETTING, &adapter->state);
1868                 adapter->tx_timeo_cnt = 0;
1869                 adapter->reset_ctx_cnt = 0;
1870                 clear_bit(QLC_83XX_MBX_READY, &mbx->status);
1871                 qlcnic_sriov_vf_detach(adapter);
1872         }
1873
1874         return 0;
1875 }
1876
1877 static int qlcnic_sriov_vf_idc_init_reset_state(struct qlcnic_adapter *adapter)
1878 {
1879         struct qlcnic_mailbox *mbx = adapter->ahw->mailbox;
1880         struct qlc_83xx_idc *idc = &adapter->ahw->idc;
1881         u8 func = adapter->ahw->pci_func;
1882
1883         if (idc->prev_state == QLC_83XX_IDC_DEV_READY) {
1884                 dev_err(&adapter->pdev->dev,
1885                         "Firmware hang detected by VF 0x%x\n", func);
1886                 set_bit(__QLCNIC_RESETTING, &adapter->state);
1887                 adapter->tx_timeo_cnt = 0;
1888                 adapter->reset_ctx_cnt = 0;
1889                 clear_bit(QLC_83XX_MBX_READY, &mbx->status);
1890                 qlcnic_sriov_vf_detach(adapter);
1891         }
1892         return 0;
1893 }
1894
1895 static int qlcnic_sriov_vf_idc_unknown_state(struct qlcnic_adapter *adapter)
1896 {
1897         dev_err(&adapter->pdev->dev, "%s: Device in unknown state\n", __func__);
1898         return 0;
1899 }
1900
1901 static void qlcnic_sriov_vf_periodic_tasks(struct qlcnic_adapter *adapter)
1902 {
1903         if (adapter->fhash.fnum)
1904                 qlcnic_prune_lb_filters(adapter);
1905 }
1906
1907 static void qlcnic_sriov_vf_poll_dev_state(struct work_struct *work)
1908 {
1909         struct qlcnic_adapter *adapter;
1910         struct qlc_83xx_idc *idc;
1911         int ret = 0;
1912
1913         adapter = container_of(work, struct qlcnic_adapter, fw_work.work);
1914         idc = &adapter->ahw->idc;
1915         idc->curr_state = QLCRDX(adapter->ahw, QLC_83XX_IDC_DEV_STATE);
1916
1917         switch (idc->curr_state) {
1918         case QLC_83XX_IDC_DEV_READY:
1919                 ret = qlcnic_sriov_vf_idc_ready_state(adapter);
1920                 break;
1921         case QLC_83XX_IDC_DEV_NEED_RESET:
1922         case QLC_83XX_IDC_DEV_INIT:
1923                 ret = qlcnic_sriov_vf_idc_init_reset_state(adapter);
1924                 break;
1925         case QLC_83XX_IDC_DEV_NEED_QUISCENT:
1926                 ret = qlcnic_sriov_vf_idc_need_quiescent_state(adapter);
1927                 break;
1928         case QLC_83XX_IDC_DEV_FAILED:
1929                 ret = qlcnic_sriov_vf_idc_failed_state(adapter);
1930                 break;
1931         case QLC_83XX_IDC_DEV_QUISCENT:
1932                 break;
1933         default:
1934                 ret = qlcnic_sriov_vf_idc_unknown_state(adapter);
1935         }
1936
1937         idc->prev_state = idc->curr_state;
1938         qlcnic_sriov_vf_periodic_tasks(adapter);
1939
1940         if (!ret && test_bit(QLC_83XX_MODULE_LOADED, &idc->status))
1941                 qlcnic_schedule_work(adapter, qlcnic_sriov_vf_poll_dev_state,
1942                                      idc->delay);
1943 }
1944
1945 static void qlcnic_sriov_vf_cancel_fw_work(struct qlcnic_adapter *adapter)
1946 {
1947         while (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
1948                 msleep(20);
1949
1950         clear_bit(QLC_83XX_MODULE_LOADED, &adapter->ahw->idc.status);
1951         clear_bit(__QLCNIC_RESETTING, &adapter->state);
1952         cancel_delayed_work_sync(&adapter->fw_work);
1953 }
1954
1955 static int qlcnic_sriov_check_vlan_id(struct qlcnic_sriov *sriov,
1956                                       struct qlcnic_vf_info *vf, u16 vlan_id)
1957 {
1958         int i, err = -EINVAL;
1959
1960         if (!vf->sriov_vlans)
1961                 return err;
1962
1963         spin_lock_bh(&vf->vlan_list_lock);
1964
1965         for (i = 0; i < sriov->num_allowed_vlans; i++) {
1966                 if (vf->sriov_vlans[i] == vlan_id) {
1967                         err = 0;
1968                         break;
1969                 }
1970         }
1971
1972         spin_unlock_bh(&vf->vlan_list_lock);
1973         return err;
1974 }
1975
1976 static int qlcnic_sriov_validate_num_vlans(struct qlcnic_sriov *sriov,
1977                                            struct qlcnic_vf_info *vf)
1978 {
1979         int err = 0;
1980
1981         spin_lock_bh(&vf->vlan_list_lock);
1982
1983         if (vf->num_vlan >= sriov->num_allowed_vlans)
1984                 err = -EINVAL;
1985
1986         spin_unlock_bh(&vf->vlan_list_lock);
1987         return err;
1988 }
1989
1990 static int qlcnic_sriov_validate_vlan_cfg(struct qlcnic_adapter *adapter,
1991                                           u16 vid, u8 enable)
1992 {
1993         struct qlcnic_sriov *sriov = adapter->ahw->sriov;
1994         struct qlcnic_vf_info *vf;
1995         bool vlan_exist;
1996         u8 allowed = 0;
1997         int i;
1998
1999         vf = &adapter->ahw->sriov->vf_info[0];
2000         vlan_exist = qlcnic_sriov_check_any_vlan(vf);
2001         if (sriov->vlan_mode != QLC_GUEST_VLAN_MODE)
2002                 return -EINVAL;
2003
2004         if (enable) {
2005                 if (qlcnic_83xx_vf_check(adapter) && vlan_exist)
2006                         return -EINVAL;
2007
2008                 if (qlcnic_sriov_validate_num_vlans(sriov, vf))
2009                         return -EINVAL;
2010
2011                 if (sriov->any_vlan) {
2012                         for (i = 0; i < sriov->num_allowed_vlans; i++) {
2013                                 if (sriov->allowed_vlans[i] == vid)
2014                                         allowed = 1;
2015                         }
2016
2017                         if (!allowed)
2018                                 return -EINVAL;
2019                 }
2020         } else {
2021                 if (!vlan_exist || qlcnic_sriov_check_vlan_id(sriov, vf, vid))
2022                         return -EINVAL;
2023         }
2024
2025         return 0;
2026 }
2027
2028 static void qlcnic_sriov_vlan_operation(struct qlcnic_vf_info *vf, u16 vlan_id,
2029                                         enum qlcnic_vlan_operations opcode)
2030 {
2031         struct qlcnic_adapter *adapter = vf->adapter;
2032         struct qlcnic_sriov *sriov;
2033
2034         sriov = adapter->ahw->sriov;
2035
2036         if (!vf->sriov_vlans)
2037                 return;
2038
2039         spin_lock_bh(&vf->vlan_list_lock);
2040
2041         switch (opcode) {
2042         case QLC_VLAN_ADD:
2043                 qlcnic_sriov_add_vlan_id(sriov, vf, vlan_id);
2044                 break;
2045         case QLC_VLAN_DELETE:
2046                 qlcnic_sriov_del_vlan_id(sriov, vf, vlan_id);
2047                 break;
2048         default:
2049                 netdev_err(adapter->netdev, "Invalid VLAN operation\n");
2050         }
2051
2052         spin_unlock_bh(&vf->vlan_list_lock);
2053         return;
2054 }
2055
2056 int qlcnic_sriov_cfg_vf_guest_vlan(struct qlcnic_adapter *adapter,
2057                                    u16 vid, u8 enable)
2058 {
2059         struct qlcnic_sriov *sriov = adapter->ahw->sriov;
2060         struct net_device *netdev = adapter->netdev;
2061         struct qlcnic_vf_info *vf;
2062         struct qlcnic_cmd_args cmd;
2063         int ret;
2064
2065         memset(&cmd, 0, sizeof(cmd));
2066         if (vid == 0)
2067                 return 0;
2068
2069         vf = &adapter->ahw->sriov->vf_info[0];
2070         ret = qlcnic_sriov_validate_vlan_cfg(adapter, vid, enable);
2071         if (ret)
2072                 return ret;
2073
2074         ret = qlcnic_sriov_alloc_bc_mbx_args(&cmd,
2075                                              QLCNIC_BC_CMD_CFG_GUEST_VLAN);
2076         if (ret)
2077                 return ret;
2078
2079         cmd.req.arg[1] = (enable & 1) | vid << 16;
2080
2081         qlcnic_sriov_cleanup_async_list(&sriov->bc);
2082         ret = qlcnic_issue_cmd(adapter, &cmd);
2083         if (ret) {
2084                 dev_err(&adapter->pdev->dev,
2085                         "Failed to configure guest VLAN, err=%d\n", ret);
2086         } else {
2087                 netif_addr_lock_bh(netdev);
2088                 qlcnic_free_mac_list(adapter);
2089                 netif_addr_unlock_bh(netdev);
2090
2091                 if (enable)
2092                         qlcnic_sriov_vlan_operation(vf, vid, QLC_VLAN_ADD);
2093                 else
2094                         qlcnic_sriov_vlan_operation(vf, vid, QLC_VLAN_DELETE);
2095
2096                 netif_addr_lock_bh(netdev);
2097                 qlcnic_set_multi(netdev);
2098                 netif_addr_unlock_bh(netdev);
2099         }
2100
2101         qlcnic_free_mbx_args(&cmd);
2102         return ret;
2103 }
2104
2105 static void qlcnic_sriov_vf_free_mac_list(struct qlcnic_adapter *adapter)
2106 {
2107         struct list_head *head = &adapter->mac_list;
2108         struct qlcnic_mac_vlan_list *cur;
2109
2110         while (!list_empty(head)) {
2111                 cur = list_entry(head->next, struct qlcnic_mac_vlan_list, list);
2112                 qlcnic_sre_macaddr_change(adapter, cur->mac_addr, cur->vlan_id,
2113                                           QLCNIC_MAC_DEL);
2114                 list_del(&cur->list);
2115                 kfree(cur);
2116         }
2117 }
2118
2119
2120 static int qlcnic_sriov_vf_shutdown(struct pci_dev *pdev)
2121 {
2122         struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
2123         struct net_device *netdev = adapter->netdev;
2124         int retval;
2125
2126         netif_device_detach(netdev);
2127         qlcnic_cancel_idc_work(adapter);
2128
2129         if (netif_running(netdev))
2130                 qlcnic_down(adapter, netdev);
2131
2132         qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_TERM);
2133         qlcnic_sriov_cfg_bc_intr(adapter, 0);
2134         qlcnic_83xx_disable_mbx_intr(adapter);
2135         cancel_delayed_work_sync(&adapter->idc_aen_work);
2136
2137         retval = pci_save_state(pdev);
2138         if (retval)
2139                 return retval;
2140
2141         return 0;
2142 }
2143
2144 static int qlcnic_sriov_vf_resume(struct qlcnic_adapter *adapter)
2145 {
2146         struct qlc_83xx_idc *idc = &adapter->ahw->idc;
2147         struct net_device *netdev = adapter->netdev;
2148         int err;
2149
2150         set_bit(QLC_83XX_MODULE_LOADED, &idc->status);
2151         qlcnic_83xx_enable_mbx_interrupt(adapter);
2152         err = qlcnic_sriov_cfg_bc_intr(adapter, 1);
2153         if (err)
2154                 return err;
2155
2156         err = qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_INIT);
2157         if (!err) {
2158                 if (netif_running(netdev)) {
2159                         err = qlcnic_up(adapter, netdev);
2160                         if (!err)
2161                                 qlcnic_restore_indev_addr(netdev, NETDEV_UP);
2162                 }
2163         }
2164
2165         netif_device_attach(netdev);
2166         qlcnic_schedule_work(adapter, qlcnic_sriov_vf_poll_dev_state,
2167                              idc->delay);
2168         return err;
2169 }
2170
2171 int qlcnic_sriov_alloc_vlans(struct qlcnic_adapter *adapter)
2172 {
2173         struct qlcnic_sriov *sriov = adapter->ahw->sriov;
2174         struct qlcnic_vf_info *vf;
2175         int i;
2176
2177         for (i = 0; i < sriov->num_vfs; i++) {
2178                 vf = &sriov->vf_info[i];
2179                 vf->sriov_vlans = kcalloc(sriov->num_allowed_vlans,
2180                                           sizeof(*vf->sriov_vlans), GFP_KERNEL);
2181                 if (!vf->sriov_vlans)
2182                         return -ENOMEM;
2183         }
2184
2185         return 0;
2186 }
2187
2188 void qlcnic_sriov_free_vlans(struct qlcnic_adapter *adapter)
2189 {
2190         struct qlcnic_sriov *sriov = adapter->ahw->sriov;
2191         struct qlcnic_vf_info *vf;
2192         int i;
2193
2194         for (i = 0; i < sriov->num_vfs; i++) {
2195                 vf = &sriov->vf_info[i];
2196                 kfree(vf->sriov_vlans);
2197                 vf->sriov_vlans = NULL;
2198         }
2199 }
2200
2201 void qlcnic_sriov_add_vlan_id(struct qlcnic_sriov *sriov,
2202                               struct qlcnic_vf_info *vf, u16 vlan_id)
2203 {
2204         int i;
2205
2206         for (i = 0; i < sriov->num_allowed_vlans; i++) {
2207                 if (!vf->sriov_vlans[i]) {
2208                         vf->sriov_vlans[i] = vlan_id;
2209                         vf->num_vlan++;
2210                         return;
2211                 }
2212         }
2213 }
2214
2215 void qlcnic_sriov_del_vlan_id(struct qlcnic_sriov *sriov,
2216                               struct qlcnic_vf_info *vf, u16 vlan_id)
2217 {
2218         int i;
2219
2220         for (i = 0; i < sriov->num_allowed_vlans; i++) {
2221                 if (vf->sriov_vlans[i] == vlan_id) {
2222                         vf->sriov_vlans[i] = 0;
2223                         vf->num_vlan--;
2224                         return;
2225                 }
2226         }
2227 }
2228
2229 bool qlcnic_sriov_check_any_vlan(struct qlcnic_vf_info *vf)
2230 {
2231         bool err = false;
2232
2233         spin_lock_bh(&vf->vlan_list_lock);
2234
2235         if (vf->num_vlan)
2236                 err = true;
2237
2238         spin_unlock_bh(&vf->vlan_list_lock);
2239         return err;
2240 }