GNU Linux-libre 5.19-rc6-gnu
[releases.git] / drivers / scsi / be2iscsi / be_main.c
1 /*
2  * This file is part of the Emulex Linux Device Driver for Enterprise iSCSI
3  * Host Bus Adapters. Refer to the README file included with this package
4  * for driver version and adapter compatibility.
5  *
6  * Copyright (c) 2018 Broadcom. All Rights Reserved.
7  * The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
8  *
9  * This program is free software; you can redistribute it and/or modify it
10  * under the terms of version 2 of the GNU General Public License as published
11  * by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful. ALL EXPRESS
14  * OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES, INCLUDING ANY
15  * IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE,
16  * OR NON-INFRINGEMENT, ARE DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH
17  * DISCLAIMERS ARE HELD TO BE LEGALLY INVALID.
18  * See the GNU General Public License for more details, a copy of which
19  * can be found in the file COPYING included with this package.
20  *
21  * Contact Information:
22  * linux-drivers@broadcom.com
23  *
24  */
25
26 #include <linux/reboot.h>
27 #include <linux/delay.h>
28 #include <linux/slab.h>
29 #include <linux/interrupt.h>
30 #include <linux/blkdev.h>
31 #include <linux/pci.h>
32 #include <linux/string.h>
33 #include <linux/kernel.h>
34 #include <linux/semaphore.h>
35 #include <linux/iscsi_boot_sysfs.h>
36 #include <linux/module.h>
37 #include <linux/bsg-lib.h>
38 #include <linux/irq_poll.h>
39
40 #include <scsi/libiscsi.h>
41 #include <scsi/scsi_bsg_iscsi.h>
42 #include <scsi/scsi_netlink.h>
43 #include <scsi/scsi_transport_iscsi.h>
44 #include <scsi/scsi_transport.h>
45 #include <scsi/scsi_cmnd.h>
46 #include <scsi/scsi_device.h>
47 #include <scsi/scsi_host.h>
48 #include <scsi/scsi.h>
49 #include "be_main.h"
50 #include "be_iscsi.h"
51 #include "be_mgmt.h"
52 #include "be_cmds.h"
53
54 static unsigned int be_iopoll_budget = 10;
55 static unsigned int be_max_phys_size = 64;
56 static unsigned int enable_msix = 1;
57
58 MODULE_DESCRIPTION(DRV_DESC " " BUILD_STR);
59 MODULE_VERSION(BUILD_STR);
60 MODULE_AUTHOR("Emulex Corporation");
61 MODULE_LICENSE("GPL");
62 module_param(be_iopoll_budget, int, 0);
63 module_param(enable_msix, int, 0);
64 module_param(be_max_phys_size, uint, S_IRUGO);
65 MODULE_PARM_DESC(be_max_phys_size,
66                 "Maximum Size (In Kilobytes) of physically contiguous "
67                 "memory that can be allocated. Range is 16 - 128");
68
69 #define beiscsi_disp_param(_name)\
70 static ssize_t  \
71 beiscsi_##_name##_disp(struct device *dev,\
72                         struct device_attribute *attrib, char *buf)     \
73 {       \
74         struct Scsi_Host *shost = class_to_shost(dev);\
75         struct beiscsi_hba *phba = iscsi_host_priv(shost); \
76         return snprintf(buf, PAGE_SIZE, "%d\n",\
77                         phba->attr_##_name);\
78 }
79
80 #define beiscsi_change_param(_name, _minval, _maxval, _defaval)\
81 static int \
82 beiscsi_##_name##_change(struct beiscsi_hba *phba, uint32_t val)\
83 {\
84         if (val >= _minval && val <= _maxval) {\
85                 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,\
86                             "BA_%d : beiscsi_"#_name" updated "\
87                             "from 0x%x ==> 0x%x\n",\
88                             phba->attr_##_name, val); \
89                 phba->attr_##_name = val;\
90                 return 0;\
91         } \
92         beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, \
93                     "BA_%d beiscsi_"#_name" attribute "\
94                     "cannot be updated to 0x%x, "\
95                     "range allowed is ["#_minval" - "#_maxval"]\n", val);\
96                 return -EINVAL;\
97 }
98
99 #define beiscsi_store_param(_name)  \
100 static ssize_t \
101 beiscsi_##_name##_store(struct device *dev,\
102                          struct device_attribute *attr, const char *buf,\
103                          size_t count) \
104 { \
105         struct Scsi_Host  *shost = class_to_shost(dev);\
106         struct beiscsi_hba *phba = iscsi_host_priv(shost);\
107         uint32_t param_val = 0;\
108         if (!isdigit(buf[0]))\
109                 return -EINVAL;\
110         if (sscanf(buf, "%i", &param_val) != 1)\
111                 return -EINVAL;\
112         if (beiscsi_##_name##_change(phba, param_val) == 0) \
113                 return strlen(buf);\
114         else \
115                 return -EINVAL;\
116 }
117
118 #define beiscsi_init_param(_name, _minval, _maxval, _defval) \
119 static int \
120 beiscsi_##_name##_init(struct beiscsi_hba *phba, uint32_t val) \
121 { \
122         if (val >= _minval && val <= _maxval) {\
123                 phba->attr_##_name = val;\
124                 return 0;\
125         } \
126         beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,\
127                     "BA_%d beiscsi_"#_name" attribute " \
128                     "cannot be updated to 0x%x, "\
129                     "range allowed is ["#_minval" - "#_maxval"]\n", val);\
130         phba->attr_##_name = _defval;\
131         return -EINVAL;\
132 }
133
134 #define BEISCSI_RW_ATTR(_name, _minval, _maxval, _defval, _descp) \
135 static uint beiscsi_##_name = _defval;\
136 module_param(beiscsi_##_name, uint, S_IRUGO);\
137 MODULE_PARM_DESC(beiscsi_##_name, _descp);\
138 beiscsi_disp_param(_name)\
139 beiscsi_change_param(_name, _minval, _maxval, _defval)\
140 beiscsi_store_param(_name)\
141 beiscsi_init_param(_name, _minval, _maxval, _defval)\
142 DEVICE_ATTR(beiscsi_##_name, S_IRUGO | S_IWUSR,\
143               beiscsi_##_name##_disp, beiscsi_##_name##_store)
144
145 /*
146  * When new log level added update MAX allowed value for log_enable
147  */
148 BEISCSI_RW_ATTR(log_enable, 0x00,
149                 0xFF, 0x00, "Enable logging Bit Mask\n"
150                 "\t\t\t\tInitialization Events  : 0x01\n"
151                 "\t\t\t\tMailbox Events         : 0x02\n"
152                 "\t\t\t\tMiscellaneous Events   : 0x04\n"
153                 "\t\t\t\tError Handling         : 0x08\n"
154                 "\t\t\t\tIO Path Events         : 0x10\n"
155                 "\t\t\t\tConfiguration Path     : 0x20\n"
156                 "\t\t\t\tiSCSI Protocol         : 0x40\n");
157
158 DEVICE_ATTR(beiscsi_drvr_ver, S_IRUGO, beiscsi_drvr_ver_disp, NULL);
159 DEVICE_ATTR(beiscsi_adapter_family, S_IRUGO, beiscsi_adap_family_disp, NULL);
160 DEVICE_ATTR(beiscsi_fw_ver, S_IRUGO, beiscsi_fw_ver_disp, NULL);
161 DEVICE_ATTR(beiscsi_phys_port, S_IRUGO, beiscsi_phys_port_disp, NULL);
162 DEVICE_ATTR(beiscsi_active_session_count, S_IRUGO,
163              beiscsi_active_session_disp, NULL);
164 DEVICE_ATTR(beiscsi_free_session_count, S_IRUGO,
165              beiscsi_free_session_disp, NULL);
166
167 static struct attribute *beiscsi_attrs[] = {
168         &dev_attr_beiscsi_log_enable.attr,
169         &dev_attr_beiscsi_drvr_ver.attr,
170         &dev_attr_beiscsi_adapter_family.attr,
171         &dev_attr_beiscsi_fw_ver.attr,
172         &dev_attr_beiscsi_active_session_count.attr,
173         &dev_attr_beiscsi_free_session_count.attr,
174         &dev_attr_beiscsi_phys_port.attr,
175         NULL,
176 };
177
178 ATTRIBUTE_GROUPS(beiscsi);
179
180 static char const *cqe_desc[] = {
181         "RESERVED_DESC",
182         "SOL_CMD_COMPLETE",
183         "SOL_CMD_KILLED_DATA_DIGEST_ERR",
184         "CXN_KILLED_PDU_SIZE_EXCEEDS_DSL",
185         "CXN_KILLED_BURST_LEN_MISMATCH",
186         "CXN_KILLED_AHS_RCVD",
187         "CXN_KILLED_HDR_DIGEST_ERR",
188         "CXN_KILLED_UNKNOWN_HDR",
189         "CXN_KILLED_STALE_ITT_TTT_RCVD",
190         "CXN_KILLED_INVALID_ITT_TTT_RCVD",
191         "CXN_KILLED_RST_RCVD",
192         "CXN_KILLED_TIMED_OUT",
193         "CXN_KILLED_RST_SENT",
194         "CXN_KILLED_FIN_RCVD",
195         "CXN_KILLED_BAD_UNSOL_PDU_RCVD",
196         "CXN_KILLED_BAD_WRB_INDEX_ERROR",
197         "CXN_KILLED_OVER_RUN_RESIDUAL",
198         "CXN_KILLED_UNDER_RUN_RESIDUAL",
199         "CMD_KILLED_INVALID_STATSN_RCVD",
200         "CMD_KILLED_INVALID_R2T_RCVD",
201         "CMD_CXN_KILLED_LUN_INVALID",
202         "CMD_CXN_KILLED_ICD_INVALID",
203         "CMD_CXN_KILLED_ITT_INVALID",
204         "CMD_CXN_KILLED_SEQ_OUTOFORDER",
205         "CMD_CXN_KILLED_INVALID_DATASN_RCVD",
206         "CXN_INVALIDATE_NOTIFY",
207         "CXN_INVALIDATE_INDEX_NOTIFY",
208         "CMD_INVALIDATED_NOTIFY",
209         "UNSOL_HDR_NOTIFY",
210         "UNSOL_DATA_NOTIFY",
211         "UNSOL_DATA_DIGEST_ERROR_NOTIFY",
212         "DRIVERMSG_NOTIFY",
213         "CXN_KILLED_CMND_DATA_NOT_ON_SAME_CONN",
214         "SOL_CMD_KILLED_DIF_ERR",
215         "CXN_KILLED_SYN_RCVD",
216         "CXN_KILLED_IMM_DATA_RCVD"
217 };
218
219 static int beiscsi_eh_abort(struct scsi_cmnd *sc)
220 {
221         struct iscsi_task *abrt_task = iscsi_cmd(sc)->task;
222         struct iscsi_cls_session *cls_session;
223         struct beiscsi_io_task *abrt_io_task;
224         struct beiscsi_conn *beiscsi_conn;
225         struct iscsi_session *session;
226         struct invldt_cmd_tbl inv_tbl;
227         struct beiscsi_hba *phba;
228         struct iscsi_conn *conn;
229         int rc;
230
231         cls_session = starget_to_session(scsi_target(sc->device));
232         session = cls_session->dd_data;
233
234         /* check if we raced, task just got cleaned up under us */
235         spin_lock_bh(&session->back_lock);
236         if (!abrt_task || !abrt_task->sc) {
237                 spin_unlock_bh(&session->back_lock);
238                 return SUCCESS;
239         }
240         /* get a task ref till FW processes the req for the ICD used */
241         __iscsi_get_task(abrt_task);
242         abrt_io_task = abrt_task->dd_data;
243         conn = abrt_task->conn;
244         beiscsi_conn = conn->dd_data;
245         phba = beiscsi_conn->phba;
246         /* mark WRB invalid which have been not processed by FW yet */
247         if (is_chip_be2_be3r(phba)) {
248                 AMAP_SET_BITS(struct amap_iscsi_wrb, invld,
249                               abrt_io_task->pwrb_handle->pwrb, 1);
250         } else {
251                 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, invld,
252                               abrt_io_task->pwrb_handle->pwrb, 1);
253         }
254         inv_tbl.cid = beiscsi_conn->beiscsi_conn_cid;
255         inv_tbl.icd = abrt_io_task->psgl_handle->sgl_index;
256         spin_unlock_bh(&session->back_lock);
257
258         rc = beiscsi_mgmt_invalidate_icds(phba, &inv_tbl, 1);
259         iscsi_put_task(abrt_task);
260         if (rc) {
261                 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_EH,
262                             "BM_%d : sc %p invalidation failed %d\n",
263                             sc, rc);
264                 return FAILED;
265         }
266
267         return iscsi_eh_abort(sc);
268 }
269
270 static int beiscsi_eh_device_reset(struct scsi_cmnd *sc)
271 {
272         struct beiscsi_invldt_cmd_tbl {
273                 struct invldt_cmd_tbl tbl[BE_INVLDT_CMD_TBL_SZ];
274                 struct iscsi_task *task[BE_INVLDT_CMD_TBL_SZ];
275         } *inv_tbl;
276         struct iscsi_cls_session *cls_session;
277         struct beiscsi_conn *beiscsi_conn;
278         struct beiscsi_io_task *io_task;
279         struct iscsi_session *session;
280         struct beiscsi_hba *phba;
281         struct iscsi_conn *conn;
282         struct iscsi_task *task;
283         unsigned int i, nents;
284         int rc, more = 0;
285
286         cls_session = starget_to_session(scsi_target(sc->device));
287         session = cls_session->dd_data;
288
289         spin_lock_bh(&session->frwd_lock);
290         if (!session->leadconn || session->state != ISCSI_STATE_LOGGED_IN) {
291                 spin_unlock_bh(&session->frwd_lock);
292                 return FAILED;
293         }
294
295         conn = session->leadconn;
296         beiscsi_conn = conn->dd_data;
297         phba = beiscsi_conn->phba;
298
299         inv_tbl = kzalloc(sizeof(*inv_tbl), GFP_ATOMIC);
300         if (!inv_tbl) {
301                 spin_unlock_bh(&session->frwd_lock);
302                 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_EH,
303                             "BM_%d : invldt_cmd_tbl alloc failed\n");
304                 return FAILED;
305         }
306         nents = 0;
307         /* take back_lock to prevent task from getting cleaned up under us */
308         spin_lock(&session->back_lock);
309         for (i = 0; i < conn->session->cmds_max; i++) {
310                 task = conn->session->cmds[i];
311                 if (!task->sc)
312                         continue;
313
314                 if (sc->device->lun != task->sc->device->lun)
315                         continue;
316                 /**
317                  * Can't fit in more cmds? Normally this won't happen b'coz
318                  * BEISCSI_CMD_PER_LUN is same as BE_INVLDT_CMD_TBL_SZ.
319                  */
320                 if (nents == BE_INVLDT_CMD_TBL_SZ) {
321                         more = 1;
322                         break;
323                 }
324
325                 /* get a task ref till FW processes the req for the ICD used */
326                 __iscsi_get_task(task);
327                 io_task = task->dd_data;
328                 /* mark WRB invalid which have been not processed by FW yet */
329                 if (is_chip_be2_be3r(phba)) {
330                         AMAP_SET_BITS(struct amap_iscsi_wrb, invld,
331                                       io_task->pwrb_handle->pwrb, 1);
332                 } else {
333                         AMAP_SET_BITS(struct amap_iscsi_wrb_v2, invld,
334                                       io_task->pwrb_handle->pwrb, 1);
335                 }
336
337                 inv_tbl->tbl[nents].cid = beiscsi_conn->beiscsi_conn_cid;
338                 inv_tbl->tbl[nents].icd = io_task->psgl_handle->sgl_index;
339                 inv_tbl->task[nents] = task;
340                 nents++;
341         }
342         spin_unlock(&session->back_lock);
343         spin_unlock_bh(&session->frwd_lock);
344
345         rc = SUCCESS;
346         if (!nents)
347                 goto end_reset;
348
349         if (more) {
350                 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_EH,
351                             "BM_%d : number of cmds exceeds size of invalidation table\n");
352                 rc = FAILED;
353                 goto end_reset;
354         }
355
356         if (beiscsi_mgmt_invalidate_icds(phba, &inv_tbl->tbl[0], nents)) {
357                 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_EH,
358                             "BM_%d : cid %u scmds invalidation failed\n",
359                             beiscsi_conn->beiscsi_conn_cid);
360                 rc = FAILED;
361         }
362
363 end_reset:
364         for (i = 0; i < nents; i++)
365                 iscsi_put_task(inv_tbl->task[i]);
366         kfree(inv_tbl);
367
368         if (rc == SUCCESS)
369                 rc = iscsi_eh_device_reset(sc);
370         return rc;
371 }
372
373 /*------------------- PCI Driver operations and data ----------------- */
374 static const struct pci_device_id beiscsi_pci_id_table[] = {
375         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
376         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
377         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
378         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
379         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID3) },
380         { PCI_DEVICE(ELX_VENDOR_ID, OC_SKH_ID1) },
381         { 0 }
382 };
383 MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table);
384
385
386 static struct scsi_host_template beiscsi_sht = {
387         .module = THIS_MODULE,
388         .name = "Emulex 10Gbe open-iscsi Initiator Driver",
389         .proc_name = DRV_NAME,
390         .queuecommand = iscsi_queuecommand,
391         .change_queue_depth = scsi_change_queue_depth,
392         .target_alloc = iscsi_target_alloc,
393         .eh_timed_out = iscsi_eh_cmd_timed_out,
394         .eh_abort_handler = beiscsi_eh_abort,
395         .eh_device_reset_handler = beiscsi_eh_device_reset,
396         .eh_target_reset_handler = iscsi_eh_session_reset,
397         .shost_groups = beiscsi_groups,
398         .sg_tablesize = BEISCSI_SGLIST_ELEMENTS,
399         .can_queue = BE2_IO_DEPTH,
400         .this_id = -1,
401         .max_sectors = BEISCSI_MAX_SECTORS,
402         .max_segment_size = 65536,
403         .cmd_per_lun = BEISCSI_CMD_PER_LUN,
404         .vendor_id = SCSI_NL_VID_TYPE_PCI | BE_VENDOR_ID,
405         .track_queue_depth = 1,
406         .cmd_size = sizeof(struct iscsi_cmd),
407 };
408
409 static struct scsi_transport_template *beiscsi_scsi_transport;
410
411 static struct beiscsi_hba *beiscsi_hba_alloc(struct pci_dev *pcidev)
412 {
413         struct beiscsi_hba *phba;
414         struct Scsi_Host *shost;
415
416         shost = iscsi_host_alloc(&beiscsi_sht, sizeof(*phba), 0);
417         if (!shost) {
418                 dev_err(&pcidev->dev,
419                         "beiscsi_hba_alloc - iscsi_host_alloc failed\n");
420                 return NULL;
421         }
422         shost->max_id = BE2_MAX_SESSIONS - 1;
423         shost->max_channel = 0;
424         shost->max_cmd_len = BEISCSI_MAX_CMD_LEN;
425         shost->max_lun = BEISCSI_NUM_MAX_LUN;
426         shost->transportt = beiscsi_scsi_transport;
427         phba = iscsi_host_priv(shost);
428         memset(phba, 0, sizeof(*phba));
429         phba->shost = shost;
430         phba->pcidev = pci_dev_get(pcidev);
431         pci_set_drvdata(pcidev, phba);
432         phba->interface_handle = 0xFFFFFFFF;
433
434         return phba;
435 }
436
437 static void beiscsi_unmap_pci_function(struct beiscsi_hba *phba)
438 {
439         if (phba->csr_va) {
440                 iounmap(phba->csr_va);
441                 phba->csr_va = NULL;
442         }
443         if (phba->db_va) {
444                 iounmap(phba->db_va);
445                 phba->db_va = NULL;
446         }
447         if (phba->pci_va) {
448                 iounmap(phba->pci_va);
449                 phba->pci_va = NULL;
450         }
451 }
452
453 static int beiscsi_map_pci_bars(struct beiscsi_hba *phba,
454                                 struct pci_dev *pcidev)
455 {
456         u8 __iomem *addr;
457         int pcicfg_reg;
458
459         addr = ioremap(pci_resource_start(pcidev, 2),
460                                pci_resource_len(pcidev, 2));
461         if (addr == NULL)
462                 return -ENOMEM;
463         phba->ctrl.csr = addr;
464         phba->csr_va = addr;
465
466         addr = ioremap(pci_resource_start(pcidev, 4), 128 * 1024);
467         if (addr == NULL)
468                 goto pci_map_err;
469         phba->ctrl.db = addr;
470         phba->db_va = addr;
471
472         if (phba->generation == BE_GEN2)
473                 pcicfg_reg = 1;
474         else
475                 pcicfg_reg = 0;
476
477         addr = ioremap(pci_resource_start(pcidev, pcicfg_reg),
478                                pci_resource_len(pcidev, pcicfg_reg));
479
480         if (addr == NULL)
481                 goto pci_map_err;
482         phba->ctrl.pcicfg = addr;
483         phba->pci_va = addr;
484         return 0;
485
486 pci_map_err:
487         beiscsi_unmap_pci_function(phba);
488         return -ENOMEM;
489 }
490
491 static int beiscsi_enable_pci(struct pci_dev *pcidev)
492 {
493         int ret;
494
495         ret = pci_enable_device(pcidev);
496         if (ret) {
497                 dev_err(&pcidev->dev,
498                         "beiscsi_enable_pci - enable device failed\n");
499                 return ret;
500         }
501
502         ret = pci_request_regions(pcidev, DRV_NAME);
503         if (ret) {
504                 dev_err(&pcidev->dev,
505                                 "beiscsi_enable_pci - request region failed\n");
506                 goto pci_dev_disable;
507         }
508
509         pci_set_master(pcidev);
510         ret = dma_set_mask_and_coherent(&pcidev->dev, DMA_BIT_MASK(64));
511         if (ret) {
512                 ret = dma_set_mask_and_coherent(&pcidev->dev, DMA_BIT_MASK(32));
513                 if (ret) {
514                         dev_err(&pcidev->dev, "Could not set PCI DMA Mask\n");
515                         goto pci_region_release;
516                 }
517         }
518         return 0;
519
520 pci_region_release:
521         pci_release_regions(pcidev);
522 pci_dev_disable:
523         pci_disable_device(pcidev);
524
525         return ret;
526 }
527
528 static int be_ctrl_init(struct beiscsi_hba *phba, struct pci_dev *pdev)
529 {
530         struct be_ctrl_info *ctrl = &phba->ctrl;
531         struct be_dma_mem *mbox_mem_alloc = &ctrl->mbox_mem_alloced;
532         struct be_dma_mem *mbox_mem_align = &ctrl->mbox_mem;
533         int status = 0;
534
535         ctrl->pdev = pdev;
536         status = beiscsi_map_pci_bars(phba, pdev);
537         if (status)
538                 return status;
539         mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
540         mbox_mem_alloc->va = dma_alloc_coherent(&pdev->dev,
541                         mbox_mem_alloc->size, &mbox_mem_alloc->dma, GFP_KERNEL);
542         if (!mbox_mem_alloc->va) {
543                 beiscsi_unmap_pci_function(phba);
544                 return -ENOMEM;
545         }
546
547         mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
548         mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
549         mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
550         memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
551         mutex_init(&ctrl->mbox_lock);
552         spin_lock_init(&phba->ctrl.mcc_lock);
553
554         return status;
555 }
556
557 /**
558  * beiscsi_get_params()- Set the config paramters
559  * @phba: ptr  device priv structure
560  **/
561 static void beiscsi_get_params(struct beiscsi_hba *phba)
562 {
563         uint32_t total_cid_count = 0;
564         uint32_t total_icd_count = 0;
565         uint8_t ulp_num = 0;
566
567         total_cid_count = BEISCSI_GET_CID_COUNT(phba, BEISCSI_ULP0) +
568                           BEISCSI_GET_CID_COUNT(phba, BEISCSI_ULP1);
569
570         for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
571                 uint32_t align_mask = 0;
572                 uint32_t icd_post_per_page = 0;
573                 uint32_t icd_count_unavailable = 0;
574                 uint32_t icd_start = 0, icd_count = 0;
575                 uint32_t icd_start_align = 0, icd_count_align = 0;
576
577                 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) {
578                         icd_start = phba->fw_config.iscsi_icd_start[ulp_num];
579                         icd_count = phba->fw_config.iscsi_icd_count[ulp_num];
580
581                         /* Get ICD count that can be posted on each page */
582                         icd_post_per_page = (PAGE_SIZE / (BE2_SGE *
583                                              sizeof(struct iscsi_sge)));
584                         align_mask = (icd_post_per_page - 1);
585
586                         /* Check if icd_start is aligned ICD per page posting */
587                         if (icd_start % icd_post_per_page) {
588                                 icd_start_align = ((icd_start +
589                                                     icd_post_per_page) &
590                                                     ~(align_mask));
591                                 phba->fw_config.
592                                         iscsi_icd_start[ulp_num] =
593                                         icd_start_align;
594                         }
595
596                         icd_count_align = (icd_count & ~align_mask);
597
598                         /* ICD discarded in the process of alignment */
599                         if (icd_start_align)
600                                 icd_count_unavailable = ((icd_start_align -
601                                                           icd_start) +
602                                                          (icd_count -
603                                                           icd_count_align));
604
605                         /* Updated ICD count available */
606                         phba->fw_config.iscsi_icd_count[ulp_num] = (icd_count -
607                                         icd_count_unavailable);
608
609                         beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
610                                         "BM_%d : Aligned ICD values\n"
611                                         "\t ICD Start : %d\n"
612                                         "\t ICD Count : %d\n"
613                                         "\t ICD Discarded : %d\n",
614                                         phba->fw_config.
615                                         iscsi_icd_start[ulp_num],
616                                         phba->fw_config.
617                                         iscsi_icd_count[ulp_num],
618                                         icd_count_unavailable);
619                         break;
620                 }
621         }
622
623         total_icd_count = phba->fw_config.iscsi_icd_count[ulp_num];
624         phba->params.ios_per_ctrl = (total_icd_count -
625                                     (total_cid_count +
626                                      BE2_TMFS + BE2_NOPOUT_REQ));
627         phba->params.cxns_per_ctrl = total_cid_count;
628         phba->params.icds_per_ctrl = total_icd_count;
629         phba->params.num_sge_per_io = BE2_SGE;
630         phba->params.defpdu_hdr_sz = BE2_DEFPDU_HDR_SZ;
631         phba->params.defpdu_data_sz = BE2_DEFPDU_DATA_SZ;
632         phba->params.num_eq_entries = 1024;
633         phba->params.num_cq_entries = 1024;
634         phba->params.wrbs_per_cxn = 256;
635 }
636
637 static void hwi_ring_eq_db(struct beiscsi_hba *phba,
638                            unsigned int id, unsigned int clr_interrupt,
639                            unsigned int num_processed,
640                            unsigned char rearm, unsigned char event)
641 {
642         u32 val = 0;
643
644         if (rearm)
645                 val |= 1 << DB_EQ_REARM_SHIFT;
646         if (clr_interrupt)
647                 val |= 1 << DB_EQ_CLR_SHIFT;
648         if (event)
649                 val |= 1 << DB_EQ_EVNT_SHIFT;
650
651         val |= num_processed << DB_EQ_NUM_POPPED_SHIFT;
652         /* Setting lower order EQ_ID Bits */
653         val |= (id & DB_EQ_RING_ID_LOW_MASK);
654
655         /* Setting Higher order EQ_ID Bits */
656         val |= (((id >> DB_EQ_HIGH_FEILD_SHIFT) &
657                   DB_EQ_RING_ID_HIGH_MASK)
658                   << DB_EQ_HIGH_SET_SHIFT);
659
660         iowrite32(val, phba->db_va + DB_EQ_OFFSET);
661 }
662
663 /**
664  * be_isr_mcc - The isr routine of the driver.
665  * @irq: Not used
666  * @dev_id: Pointer to host adapter structure
667  */
668 static irqreturn_t be_isr_mcc(int irq, void *dev_id)
669 {
670         struct beiscsi_hba *phba;
671         struct be_eq_entry *eqe;
672         struct be_queue_info *eq;
673         struct be_queue_info *mcc;
674         unsigned int mcc_events;
675         struct be_eq_obj *pbe_eq;
676
677         pbe_eq = dev_id;
678         eq = &pbe_eq->q;
679         phba =  pbe_eq->phba;
680         mcc = &phba->ctrl.mcc_obj.cq;
681         eqe = queue_tail_node(eq);
682
683         mcc_events = 0;
684         while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
685                                 & EQE_VALID_MASK) {
686                 if (((eqe->dw[offsetof(struct amap_eq_entry,
687                      resource_id) / 32] &
688                      EQE_RESID_MASK) >> 16) == mcc->id) {
689                         mcc_events++;
690                 }
691                 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
692                 queue_tail_inc(eq);
693                 eqe = queue_tail_node(eq);
694         }
695
696         if (mcc_events) {
697                 queue_work(phba->wq, &pbe_eq->mcc_work);
698                 hwi_ring_eq_db(phba, eq->id, 1, mcc_events, 1, 1);
699         }
700         return IRQ_HANDLED;
701 }
702
703 /**
704  * be_isr_msix - The isr routine of the driver.
705  * @irq: Not used
706  * @dev_id: Pointer to host adapter structure
707  */
708 static irqreturn_t be_isr_msix(int irq, void *dev_id)
709 {
710         struct beiscsi_hba *phba;
711         struct be_queue_info *eq;
712         struct be_eq_obj *pbe_eq;
713
714         pbe_eq = dev_id;
715         eq = &pbe_eq->q;
716
717         phba = pbe_eq->phba;
718         /* disable interrupt till iopoll completes */
719         hwi_ring_eq_db(phba, eq->id, 1, 0, 0, 1);
720         irq_poll_sched(&pbe_eq->iopoll);
721
722         return IRQ_HANDLED;
723 }
724
725 /**
726  * be_isr - The isr routine of the driver.
727  * @irq: Not used
728  * @dev_id: Pointer to host adapter structure
729  */
730 static irqreturn_t be_isr(int irq, void *dev_id)
731 {
732         struct beiscsi_hba *phba;
733         struct hwi_controller *phwi_ctrlr;
734         struct hwi_context_memory *phwi_context;
735         struct be_eq_entry *eqe;
736         struct be_queue_info *eq;
737         struct be_queue_info *mcc;
738         unsigned int mcc_events, io_events;
739         struct be_ctrl_info *ctrl;
740         struct be_eq_obj *pbe_eq;
741         int isr, rearm;
742
743         phba = dev_id;
744         ctrl = &phba->ctrl;
745         isr = ioread32(ctrl->csr + CEV_ISR0_OFFSET +
746                        (PCI_FUNC(ctrl->pdev->devfn) * CEV_ISR_SIZE));
747         if (!isr)
748                 return IRQ_NONE;
749
750         phwi_ctrlr = phba->phwi_ctrlr;
751         phwi_context = phwi_ctrlr->phwi_ctxt;
752         pbe_eq = &phwi_context->be_eq[0];
753
754         eq = &phwi_context->be_eq[0].q;
755         mcc = &phba->ctrl.mcc_obj.cq;
756         eqe = queue_tail_node(eq);
757
758         io_events = 0;
759         mcc_events = 0;
760         while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
761                                 & EQE_VALID_MASK) {
762                 if (((eqe->dw[offsetof(struct amap_eq_entry,
763                       resource_id) / 32] & EQE_RESID_MASK) >> 16) == mcc->id)
764                         mcc_events++;
765                 else
766                         io_events++;
767                 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
768                 queue_tail_inc(eq);
769                 eqe = queue_tail_node(eq);
770         }
771         if (!io_events && !mcc_events)
772                 return IRQ_NONE;
773
774         /* no need to rearm if interrupt is only for IOs */
775         rearm = 0;
776         if (mcc_events) {
777                 queue_work(phba->wq, &pbe_eq->mcc_work);
778                 /* rearm for MCCQ */
779                 rearm = 1;
780         }
781         if (io_events)
782                 irq_poll_sched(&pbe_eq->iopoll);
783         hwi_ring_eq_db(phba, eq->id, 0, (io_events + mcc_events), rearm, 1);
784         return IRQ_HANDLED;
785 }
786
787 static void beiscsi_free_irqs(struct beiscsi_hba *phba)
788 {
789         struct hwi_context_memory *phwi_context;
790         int i;
791
792         if (!phba->pcidev->msix_enabled) {
793                 if (phba->pcidev->irq)
794                         free_irq(phba->pcidev->irq, phba);
795                 return;
796         }
797
798         phwi_context = phba->phwi_ctrlr->phwi_ctxt;
799         for (i = 0; i <= phba->num_cpus; i++) {
800                 free_irq(pci_irq_vector(phba->pcidev, i),
801                          &phwi_context->be_eq[i]);
802                 kfree(phba->msi_name[i]);
803         }
804 }
805
806 static int beiscsi_init_irqs(struct beiscsi_hba *phba)
807 {
808         struct pci_dev *pcidev = phba->pcidev;
809         struct hwi_controller *phwi_ctrlr;
810         struct hwi_context_memory *phwi_context;
811         int ret, i, j;
812
813         phwi_ctrlr = phba->phwi_ctrlr;
814         phwi_context = phwi_ctrlr->phwi_ctxt;
815
816         if (pcidev->msix_enabled) {
817                 for (i = 0; i < phba->num_cpus; i++) {
818                         phba->msi_name[i] = kasprintf(GFP_KERNEL,
819                                                       "beiscsi_%02x_%02x",
820                                                       phba->shost->host_no, i);
821                         if (!phba->msi_name[i]) {
822                                 ret = -ENOMEM;
823                                 goto free_msix_irqs;
824                         }
825
826                         ret = request_irq(pci_irq_vector(pcidev, i),
827                                           be_isr_msix, 0, phba->msi_name[i],
828                                           &phwi_context->be_eq[i]);
829                         if (ret) {
830                                 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
831                                             "BM_%d : %s-Failed to register msix for i = %d\n",
832                                             __func__, i);
833                                 kfree(phba->msi_name[i]);
834                                 goto free_msix_irqs;
835                         }
836                 }
837                 phba->msi_name[i] = kasprintf(GFP_KERNEL, "beiscsi_mcc_%02x",
838                                               phba->shost->host_no);
839                 if (!phba->msi_name[i]) {
840                         ret = -ENOMEM;
841                         goto free_msix_irqs;
842                 }
843                 ret = request_irq(pci_irq_vector(pcidev, i), be_isr_mcc, 0,
844                                   phba->msi_name[i], &phwi_context->be_eq[i]);
845                 if (ret) {
846                         beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
847                                     "BM_%d : %s-Failed to register beiscsi_msix_mcc\n",
848                                     __func__);
849                         kfree(phba->msi_name[i]);
850                         goto free_msix_irqs;
851                 }
852
853         } else {
854                 ret = request_irq(pcidev->irq, be_isr, IRQF_SHARED,
855                                   "beiscsi", phba);
856                 if (ret) {
857                         beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
858                                     "BM_%d : %s-Failed to register irq\n",
859                                     __func__);
860                         return ret;
861                 }
862         }
863         return 0;
864 free_msix_irqs:
865         for (j = i - 1; j >= 0; j--) {
866                 free_irq(pci_irq_vector(pcidev, i), &phwi_context->be_eq[j]);
867                 kfree(phba->msi_name[j]);
868         }
869         return ret;
870 }
871
872 void hwi_ring_cq_db(struct beiscsi_hba *phba,
873                            unsigned int id, unsigned int num_processed,
874                            unsigned char rearm)
875 {
876         u32 val = 0;
877
878         if (rearm)
879                 val |= 1 << DB_CQ_REARM_SHIFT;
880
881         val |= num_processed << DB_CQ_NUM_POPPED_SHIFT;
882
883         /* Setting lower order CQ_ID Bits */
884         val |= (id & DB_CQ_RING_ID_LOW_MASK);
885
886         /* Setting Higher order CQ_ID Bits */
887         val |= (((id >> DB_CQ_HIGH_FEILD_SHIFT) &
888                   DB_CQ_RING_ID_HIGH_MASK)
889                   << DB_CQ_HIGH_SET_SHIFT);
890
891         iowrite32(val, phba->db_va + DB_CQ_OFFSET);
892 }
893
894 static struct sgl_handle *alloc_io_sgl_handle(struct beiscsi_hba *phba)
895 {
896         struct sgl_handle *psgl_handle;
897         unsigned long flags;
898
899         spin_lock_irqsave(&phba->io_sgl_lock, flags);
900         if (phba->io_sgl_hndl_avbl) {
901                 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_IO,
902                             "BM_%d : In alloc_io_sgl_handle,"
903                             " io_sgl_alloc_index=%d\n",
904                             phba->io_sgl_alloc_index);
905
906                 psgl_handle = phba->io_sgl_hndl_base[phba->
907                                                 io_sgl_alloc_index];
908                 phba->io_sgl_hndl_base[phba->io_sgl_alloc_index] = NULL;
909                 phba->io_sgl_hndl_avbl--;
910                 if (phba->io_sgl_alloc_index == (phba->params.
911                                                  ios_per_ctrl - 1))
912                         phba->io_sgl_alloc_index = 0;
913                 else
914                         phba->io_sgl_alloc_index++;
915         } else
916                 psgl_handle = NULL;
917         spin_unlock_irqrestore(&phba->io_sgl_lock, flags);
918         return psgl_handle;
919 }
920
921 static void
922 free_io_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
923 {
924         unsigned long flags;
925
926         spin_lock_irqsave(&phba->io_sgl_lock, flags);
927         beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_IO,
928                     "BM_%d : In free_,io_sgl_free_index=%d\n",
929                     phba->io_sgl_free_index);
930
931         if (phba->io_sgl_hndl_base[phba->io_sgl_free_index]) {
932                 /*
933                  * this can happen if clean_task is called on a task that
934                  * failed in xmit_task or alloc_pdu.
935                  */
936                 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_IO,
937                             "BM_%d : Double Free in IO SGL io_sgl_free_index=%d, value there=%p\n",
938                             phba->io_sgl_free_index,
939                             phba->io_sgl_hndl_base[phba->io_sgl_free_index]);
940                 spin_unlock_irqrestore(&phba->io_sgl_lock, flags);
941                 return;
942         }
943         phba->io_sgl_hndl_base[phba->io_sgl_free_index] = psgl_handle;
944         phba->io_sgl_hndl_avbl++;
945         if (phba->io_sgl_free_index == (phba->params.ios_per_ctrl - 1))
946                 phba->io_sgl_free_index = 0;
947         else
948                 phba->io_sgl_free_index++;
949         spin_unlock_irqrestore(&phba->io_sgl_lock, flags);
950 }
951
952 static inline struct wrb_handle *
953 beiscsi_get_wrb_handle(struct hwi_wrb_context *pwrb_context,
954                        unsigned int wrbs_per_cxn)
955 {
956         struct wrb_handle *pwrb_handle;
957         unsigned long flags;
958
959         spin_lock_irqsave(&pwrb_context->wrb_lock, flags);
960         if (!pwrb_context->wrb_handles_available) {
961                 spin_unlock_irqrestore(&pwrb_context->wrb_lock, flags);
962                 return NULL;
963         }
964         pwrb_handle = pwrb_context->pwrb_handle_base[pwrb_context->alloc_index];
965         pwrb_context->wrb_handles_available--;
966         if (pwrb_context->alloc_index == (wrbs_per_cxn - 1))
967                 pwrb_context->alloc_index = 0;
968         else
969                 pwrb_context->alloc_index++;
970         spin_unlock_irqrestore(&pwrb_context->wrb_lock, flags);
971
972         if (pwrb_handle)
973                 memset(pwrb_handle->pwrb, 0, sizeof(*pwrb_handle->pwrb));
974
975         return pwrb_handle;
976 }
977
978 /**
979  * alloc_wrb_handle - To allocate a wrb handle
980  * @phba: The hba pointer
981  * @cid: The cid to use for allocation
982  * @pcontext: ptr to ptr to wrb context
983  *
984  * This happens under session_lock until submission to chip
985  */
986 struct wrb_handle *alloc_wrb_handle(struct beiscsi_hba *phba, unsigned int cid,
987                                     struct hwi_wrb_context **pcontext)
988 {
989         struct hwi_wrb_context *pwrb_context;
990         struct hwi_controller *phwi_ctrlr;
991         uint16_t cri_index = BE_GET_CRI_FROM_CID(cid);
992
993         phwi_ctrlr = phba->phwi_ctrlr;
994         pwrb_context = &phwi_ctrlr->wrb_context[cri_index];
995         /* return the context address */
996         *pcontext = pwrb_context;
997         return beiscsi_get_wrb_handle(pwrb_context, phba->params.wrbs_per_cxn);
998 }
999
1000 static inline void
1001 beiscsi_put_wrb_handle(struct hwi_wrb_context *pwrb_context,
1002                        struct wrb_handle *pwrb_handle,
1003                        unsigned int wrbs_per_cxn)
1004 {
1005         unsigned long flags;
1006
1007         spin_lock_irqsave(&pwrb_context->wrb_lock, flags);
1008         pwrb_context->pwrb_handle_base[pwrb_context->free_index] = pwrb_handle;
1009         pwrb_context->wrb_handles_available++;
1010         if (pwrb_context->free_index == (wrbs_per_cxn - 1))
1011                 pwrb_context->free_index = 0;
1012         else
1013                 pwrb_context->free_index++;
1014         pwrb_handle->pio_handle = NULL;
1015         spin_unlock_irqrestore(&pwrb_context->wrb_lock, flags);
1016 }
1017
1018 /**
1019  * free_wrb_handle - To free the wrb handle back to pool
1020  * @phba: The hba pointer
1021  * @pwrb_context: The context to free from
1022  * @pwrb_handle: The wrb_handle to free
1023  *
1024  * This happens under session_lock until submission to chip
1025  */
1026 static void
1027 free_wrb_handle(struct beiscsi_hba *phba, struct hwi_wrb_context *pwrb_context,
1028                 struct wrb_handle *pwrb_handle)
1029 {
1030         beiscsi_put_wrb_handle(pwrb_context,
1031                                pwrb_handle,
1032                                phba->params.wrbs_per_cxn);
1033         beiscsi_log(phba, KERN_INFO,
1034                     BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
1035                     "BM_%d : FREE WRB: pwrb_handle=%p free_index=0x%x "
1036                     "wrb_handles_available=%d\n",
1037                     pwrb_handle, pwrb_context->free_index,
1038                     pwrb_context->wrb_handles_available);
1039 }
1040
1041 static struct sgl_handle *alloc_mgmt_sgl_handle(struct beiscsi_hba *phba)
1042 {
1043         struct sgl_handle *psgl_handle;
1044         unsigned long flags;
1045
1046         spin_lock_irqsave(&phba->mgmt_sgl_lock, flags);
1047         if (phba->eh_sgl_hndl_avbl) {
1048                 psgl_handle = phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index];
1049                 phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index] = NULL;
1050                 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
1051                             "BM_%d : mgmt_sgl_alloc_index=%d=0x%x\n",
1052                             phba->eh_sgl_alloc_index,
1053                             phba->eh_sgl_alloc_index);
1054
1055                 phba->eh_sgl_hndl_avbl--;
1056                 if (phba->eh_sgl_alloc_index ==
1057                     (phba->params.icds_per_ctrl - phba->params.ios_per_ctrl -
1058                      1))
1059                         phba->eh_sgl_alloc_index = 0;
1060                 else
1061                         phba->eh_sgl_alloc_index++;
1062         } else
1063                 psgl_handle = NULL;
1064         spin_unlock_irqrestore(&phba->mgmt_sgl_lock, flags);
1065         return psgl_handle;
1066 }
1067
1068 void
1069 free_mgmt_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
1070 {
1071         unsigned long flags;
1072
1073         spin_lock_irqsave(&phba->mgmt_sgl_lock, flags);
1074         beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
1075                     "BM_%d : In  free_mgmt_sgl_handle,"
1076                     "eh_sgl_free_index=%d\n",
1077                     phba->eh_sgl_free_index);
1078
1079         if (phba->eh_sgl_hndl_base[phba->eh_sgl_free_index]) {
1080                 /*
1081                  * this can happen if clean_task is called on a task that
1082                  * failed in xmit_task or alloc_pdu.
1083                  */
1084                 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
1085                             "BM_%d : Double Free in eh SGL ,"
1086                             "eh_sgl_free_index=%d\n",
1087                             phba->eh_sgl_free_index);
1088                 spin_unlock_irqrestore(&phba->mgmt_sgl_lock, flags);
1089                 return;
1090         }
1091         phba->eh_sgl_hndl_base[phba->eh_sgl_free_index] = psgl_handle;
1092         phba->eh_sgl_hndl_avbl++;
1093         if (phba->eh_sgl_free_index ==
1094             (phba->params.icds_per_ctrl - phba->params.ios_per_ctrl - 1))
1095                 phba->eh_sgl_free_index = 0;
1096         else
1097                 phba->eh_sgl_free_index++;
1098         spin_unlock_irqrestore(&phba->mgmt_sgl_lock, flags);
1099 }
1100
1101 static void
1102 be_complete_io(struct beiscsi_conn *beiscsi_conn,
1103                 struct iscsi_task *task,
1104                 struct common_sol_cqe *csol_cqe)
1105 {
1106         struct beiscsi_io_task *io_task = task->dd_data;
1107         struct be_status_bhs *sts_bhs =
1108                                 (struct be_status_bhs *)io_task->cmd_bhs;
1109         struct iscsi_conn *conn = beiscsi_conn->conn;
1110         unsigned char *sense;
1111         u32 resid = 0, exp_cmdsn, max_cmdsn;
1112         u8 rsp, status, flags;
1113
1114         exp_cmdsn = csol_cqe->exp_cmdsn;
1115         max_cmdsn = (csol_cqe->exp_cmdsn +
1116                      csol_cqe->cmd_wnd - 1);
1117         rsp = csol_cqe->i_resp;
1118         status = csol_cqe->i_sts;
1119         flags = csol_cqe->i_flags;
1120         resid = csol_cqe->res_cnt;
1121
1122         if (!task->sc) {
1123                 if (io_task->scsi_cmnd) {
1124                         scsi_dma_unmap(io_task->scsi_cmnd);
1125                         io_task->scsi_cmnd = NULL;
1126                 }
1127
1128                 return;
1129         }
1130         task->sc->result = (DID_OK << 16) | status;
1131         if (rsp != ISCSI_STATUS_CMD_COMPLETED) {
1132                 task->sc->result = DID_ERROR << 16;
1133                 goto unmap;
1134         }
1135
1136         /* bidi not initially supported */
1137         if (flags & (ISCSI_FLAG_CMD_UNDERFLOW | ISCSI_FLAG_CMD_OVERFLOW)) {
1138                 if (!status && (flags & ISCSI_FLAG_CMD_OVERFLOW))
1139                         task->sc->result = DID_ERROR << 16;
1140
1141                 if (flags & ISCSI_FLAG_CMD_UNDERFLOW) {
1142                         scsi_set_resid(task->sc, resid);
1143                         if (!status && (scsi_bufflen(task->sc) - resid <
1144                             task->sc->underflow))
1145                                 task->sc->result = DID_ERROR << 16;
1146                 }
1147         }
1148
1149         if (status == SAM_STAT_CHECK_CONDITION) {
1150                 u16 sense_len;
1151                 unsigned short *slen = (unsigned short *)sts_bhs->sense_info;
1152
1153                 sense = sts_bhs->sense_info + sizeof(unsigned short);
1154                 sense_len = be16_to_cpu(*slen);
1155                 memcpy(task->sc->sense_buffer, sense,
1156                        min_t(u16, sense_len, SCSI_SENSE_BUFFERSIZE));
1157         }
1158
1159         if (io_task->cmd_bhs->iscsi_hdr.flags & ISCSI_FLAG_CMD_READ)
1160                 conn->rxdata_octets += resid;
1161 unmap:
1162         if (io_task->scsi_cmnd) {
1163                 scsi_dma_unmap(io_task->scsi_cmnd);
1164                 io_task->scsi_cmnd = NULL;
1165         }
1166         iscsi_complete_scsi_task(task, exp_cmdsn, max_cmdsn);
1167 }
1168
1169 static void
1170 be_complete_logout(struct beiscsi_conn *beiscsi_conn,
1171                     struct iscsi_task *task,
1172                     struct common_sol_cqe *csol_cqe)
1173 {
1174         struct iscsi_logout_rsp *hdr;
1175         struct beiscsi_io_task *io_task = task->dd_data;
1176         struct iscsi_conn *conn = beiscsi_conn->conn;
1177
1178         hdr = (struct iscsi_logout_rsp *)task->hdr;
1179         hdr->opcode = ISCSI_OP_LOGOUT_RSP;
1180         hdr->t2wait = 5;
1181         hdr->t2retain = 0;
1182         hdr->flags = csol_cqe->i_flags;
1183         hdr->response = csol_cqe->i_resp;
1184         hdr->exp_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn);
1185         hdr->max_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn +
1186                                      csol_cqe->cmd_wnd - 1);
1187
1188         hdr->dlength[0] = 0;
1189         hdr->dlength[1] = 0;
1190         hdr->dlength[2] = 0;
1191         hdr->hlength = 0;
1192         hdr->itt = io_task->libiscsi_itt;
1193         __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
1194 }
1195
1196 static void
1197 be_complete_tmf(struct beiscsi_conn *beiscsi_conn,
1198                  struct iscsi_task *task,
1199                  struct common_sol_cqe *csol_cqe)
1200 {
1201         struct iscsi_tm_rsp *hdr;
1202         struct iscsi_conn *conn = beiscsi_conn->conn;
1203         struct beiscsi_io_task *io_task = task->dd_data;
1204
1205         hdr = (struct iscsi_tm_rsp *)task->hdr;
1206         hdr->opcode = ISCSI_OP_SCSI_TMFUNC_RSP;
1207         hdr->flags = csol_cqe->i_flags;
1208         hdr->response = csol_cqe->i_resp;
1209         hdr->exp_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn);
1210         hdr->max_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn +
1211                                      csol_cqe->cmd_wnd - 1);
1212
1213         hdr->itt = io_task->libiscsi_itt;
1214         __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
1215 }
1216
1217 static void
1218 hwi_complete_drvr_msgs(struct beiscsi_conn *beiscsi_conn,
1219                        struct beiscsi_hba *phba, struct sol_cqe *psol)
1220 {
1221         struct hwi_wrb_context *pwrb_context;
1222         uint16_t wrb_index, cid, cri_index;
1223         struct hwi_controller *phwi_ctrlr;
1224         struct wrb_handle *pwrb_handle;
1225         struct iscsi_session *session;
1226         struct iscsi_task *task;
1227
1228         phwi_ctrlr = phba->phwi_ctrlr;
1229         if (is_chip_be2_be3r(phba)) {
1230                 wrb_index = AMAP_GET_BITS(struct amap_it_dmsg_cqe,
1231                                           wrb_idx, psol);
1232                 cid = AMAP_GET_BITS(struct amap_it_dmsg_cqe,
1233                                     cid, psol);
1234         } else {
1235                 wrb_index = AMAP_GET_BITS(struct amap_it_dmsg_cqe_v2,
1236                                           wrb_idx, psol);
1237                 cid = AMAP_GET_BITS(struct amap_it_dmsg_cqe_v2,
1238                                     cid, psol);
1239         }
1240
1241         cri_index = BE_GET_CRI_FROM_CID(cid);
1242         pwrb_context = &phwi_ctrlr->wrb_context[cri_index];
1243         pwrb_handle = pwrb_context->pwrb_handle_basestd[wrb_index];
1244         session = beiscsi_conn->conn->session;
1245         spin_lock_bh(&session->back_lock);
1246         task = pwrb_handle->pio_handle;
1247         if (task)
1248                 __iscsi_put_task(task);
1249         spin_unlock_bh(&session->back_lock);
1250 }
1251
1252 static void
1253 be_complete_nopin_resp(struct beiscsi_conn *beiscsi_conn,
1254                         struct iscsi_task *task,
1255                         struct common_sol_cqe *csol_cqe)
1256 {
1257         struct iscsi_nopin *hdr;
1258         struct iscsi_conn *conn = beiscsi_conn->conn;
1259         struct beiscsi_io_task *io_task = task->dd_data;
1260
1261         hdr = (struct iscsi_nopin *)task->hdr;
1262         hdr->flags = csol_cqe->i_flags;
1263         hdr->exp_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn);
1264         hdr->max_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn +
1265                                      csol_cqe->cmd_wnd - 1);
1266
1267         hdr->opcode = ISCSI_OP_NOOP_IN;
1268         hdr->itt = io_task->libiscsi_itt;
1269         __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
1270 }
1271
1272 static void adapter_get_sol_cqe(struct beiscsi_hba *phba,
1273                 struct sol_cqe *psol,
1274                 struct common_sol_cqe *csol_cqe)
1275 {
1276         if (is_chip_be2_be3r(phba)) {
1277                 csol_cqe->exp_cmdsn = AMAP_GET_BITS(struct amap_sol_cqe,
1278                                                     i_exp_cmd_sn, psol);
1279                 csol_cqe->res_cnt = AMAP_GET_BITS(struct amap_sol_cqe,
1280                                                   i_res_cnt, psol);
1281                 csol_cqe->cmd_wnd = AMAP_GET_BITS(struct amap_sol_cqe,
1282                                                   i_cmd_wnd, psol);
1283                 csol_cqe->wrb_index = AMAP_GET_BITS(struct amap_sol_cqe,
1284                                                     wrb_index, psol);
1285                 csol_cqe->cid = AMAP_GET_BITS(struct amap_sol_cqe,
1286                                               cid, psol);
1287                 csol_cqe->hw_sts = AMAP_GET_BITS(struct amap_sol_cqe,
1288                                                  hw_sts, psol);
1289                 csol_cqe->i_resp = AMAP_GET_BITS(struct amap_sol_cqe,
1290                                                  i_resp, psol);
1291                 csol_cqe->i_sts = AMAP_GET_BITS(struct amap_sol_cqe,
1292                                                 i_sts, psol);
1293                 csol_cqe->i_flags = AMAP_GET_BITS(struct amap_sol_cqe,
1294                                                   i_flags, psol);
1295         } else {
1296                 csol_cqe->exp_cmdsn = AMAP_GET_BITS(struct amap_sol_cqe_v2,
1297                                                     i_exp_cmd_sn, psol);
1298                 csol_cqe->res_cnt = AMAP_GET_BITS(struct amap_sol_cqe_v2,
1299                                                   i_res_cnt, psol);
1300                 csol_cqe->wrb_index = AMAP_GET_BITS(struct amap_sol_cqe_v2,
1301                                                     wrb_index, psol);
1302                 csol_cqe->cid = AMAP_GET_BITS(struct amap_sol_cqe_v2,
1303                                               cid, psol);
1304                 csol_cqe->hw_sts = AMAP_GET_BITS(struct amap_sol_cqe_v2,
1305                                                  hw_sts, psol);
1306                 csol_cqe->cmd_wnd = AMAP_GET_BITS(struct amap_sol_cqe_v2,
1307                                                   i_cmd_wnd, psol);
1308                 if (AMAP_GET_BITS(struct amap_sol_cqe_v2,
1309                                   cmd_cmpl, psol))
1310                         csol_cqe->i_sts = AMAP_GET_BITS(struct amap_sol_cqe_v2,
1311                                                         i_sts, psol);
1312                 else
1313                         csol_cqe->i_resp = AMAP_GET_BITS(struct amap_sol_cqe_v2,
1314                                                          i_sts, psol);
1315                 if (AMAP_GET_BITS(struct amap_sol_cqe_v2,
1316                                   u, psol))
1317                         csol_cqe->i_flags = ISCSI_FLAG_CMD_UNDERFLOW;
1318
1319                 if (AMAP_GET_BITS(struct amap_sol_cqe_v2,
1320                                   o, psol))
1321                         csol_cqe->i_flags |= ISCSI_FLAG_CMD_OVERFLOW;
1322         }
1323 }
1324
1325
1326 static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn,
1327                              struct beiscsi_hba *phba, struct sol_cqe *psol)
1328 {
1329         struct iscsi_conn *conn = beiscsi_conn->conn;
1330         struct iscsi_session *session = conn->session;
1331         struct common_sol_cqe csol_cqe = {0};
1332         struct hwi_wrb_context *pwrb_context;
1333         struct hwi_controller *phwi_ctrlr;
1334         struct wrb_handle *pwrb_handle;
1335         struct iscsi_task *task;
1336         uint16_t cri_index = 0;
1337         uint8_t type;
1338
1339         phwi_ctrlr = phba->phwi_ctrlr;
1340
1341         /* Copy the elements to a common structure */
1342         adapter_get_sol_cqe(phba, psol, &csol_cqe);
1343
1344         cri_index = BE_GET_CRI_FROM_CID(csol_cqe.cid);
1345         pwrb_context = &phwi_ctrlr->wrb_context[cri_index];
1346
1347         pwrb_handle = pwrb_context->pwrb_handle_basestd[
1348                       csol_cqe.wrb_index];
1349
1350         spin_lock_bh(&session->back_lock);
1351         task = pwrb_handle->pio_handle;
1352         if (!task) {
1353                 spin_unlock_bh(&session->back_lock);
1354                 return;
1355         }
1356         type = ((struct beiscsi_io_task *)task->dd_data)->wrb_type;
1357
1358         switch (type) {
1359         case HWH_TYPE_IO:
1360         case HWH_TYPE_IO_RD:
1361                 if ((task->hdr->opcode & ISCSI_OPCODE_MASK) ==
1362                      ISCSI_OP_NOOP_OUT)
1363                         be_complete_nopin_resp(beiscsi_conn, task, &csol_cqe);
1364                 else
1365                         be_complete_io(beiscsi_conn, task, &csol_cqe);
1366                 break;
1367
1368         case HWH_TYPE_LOGOUT:
1369                 if ((task->hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGOUT)
1370                         be_complete_logout(beiscsi_conn, task, &csol_cqe);
1371                 else
1372                         be_complete_tmf(beiscsi_conn, task, &csol_cqe);
1373                 break;
1374
1375         case HWH_TYPE_LOGIN:
1376                 beiscsi_log(phba, KERN_ERR,
1377                             BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,
1378                             "BM_%d :\t\t No HWH_TYPE_LOGIN Expected in"
1379                             " %s- Solicited path\n", __func__);
1380                 break;
1381
1382         case HWH_TYPE_NOP:
1383                 be_complete_nopin_resp(beiscsi_conn, task, &csol_cqe);
1384                 break;
1385
1386         default:
1387                 beiscsi_log(phba, KERN_WARNING,
1388                             BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,
1389                             "BM_%d : In %s, unknown type = %d "
1390                             "wrb_index 0x%x CID 0x%x\n", __func__, type,
1391                             csol_cqe.wrb_index,
1392                             csol_cqe.cid);
1393                 break;
1394         }
1395
1396         spin_unlock_bh(&session->back_lock);
1397 }
1398
1399 /*
1400  * ASYNC PDUs include
1401  * a. Unsolicited NOP-In (target initiated NOP-In)
1402  * b. ASYNC Messages
1403  * c. Reject PDU
1404  * d. Login response
1405  * These headers arrive unprocessed by the EP firmware.
1406  * iSCSI layer processes them.
1407  */
1408 static unsigned int
1409 beiscsi_complete_pdu(struct beiscsi_conn *beiscsi_conn,
1410                 struct pdu_base *phdr, void *pdata, unsigned int dlen)
1411 {
1412         struct beiscsi_hba *phba = beiscsi_conn->phba;
1413         struct iscsi_conn *conn = beiscsi_conn->conn;
1414         struct beiscsi_io_task *io_task;
1415         struct iscsi_hdr *login_hdr;
1416         struct iscsi_task *task;
1417         u8 code;
1418
1419         code = AMAP_GET_BITS(struct amap_pdu_base, opcode, phdr);
1420         switch (code) {
1421         case ISCSI_OP_NOOP_IN:
1422                 pdata = NULL;
1423                 dlen = 0;
1424                 break;
1425         case ISCSI_OP_ASYNC_EVENT:
1426                 break;
1427         case ISCSI_OP_REJECT:
1428                 WARN_ON(!pdata);
1429                 WARN_ON(!(dlen == 48));
1430                 beiscsi_log(phba, KERN_ERR,
1431                             BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,
1432                             "BM_%d : In ISCSI_OP_REJECT\n");
1433                 break;
1434         case ISCSI_OP_LOGIN_RSP:
1435         case ISCSI_OP_TEXT_RSP:
1436                 task = conn->login_task;
1437                 io_task = task->dd_data;
1438                 login_hdr = (struct iscsi_hdr *)phdr;
1439                 login_hdr->itt = io_task->libiscsi_itt;
1440                 break;
1441         default:
1442                 beiscsi_log(phba, KERN_WARNING,
1443                             BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
1444                             "BM_%d : unrecognized async PDU opcode 0x%x\n",
1445                             code);
1446                 return 1;
1447         }
1448         __iscsi_complete_pdu(conn, (struct iscsi_hdr *)phdr, pdata, dlen);
1449         return 0;
1450 }
1451
1452 static inline void
1453 beiscsi_hdl_put_handle(struct hd_async_context *pasync_ctx,
1454                          struct hd_async_handle *pasync_handle)
1455 {
1456         pasync_handle->is_final = 0;
1457         pasync_handle->buffer_len = 0;
1458         pasync_handle->in_use = 0;
1459         list_del_init(&pasync_handle->link);
1460 }
1461
1462 static void
1463 beiscsi_hdl_purge_handles(struct beiscsi_hba *phba,
1464                           struct hd_async_context *pasync_ctx,
1465                           u16 cri)
1466 {
1467         struct hd_async_handle *pasync_handle, *tmp_handle;
1468         struct list_head *plist;
1469
1470         plist  = &pasync_ctx->async_entry[cri].wq.list;
1471         list_for_each_entry_safe(pasync_handle, tmp_handle, plist, link)
1472                 beiscsi_hdl_put_handle(pasync_ctx, pasync_handle);
1473
1474         INIT_LIST_HEAD(&pasync_ctx->async_entry[cri].wq.list);
1475         pasync_ctx->async_entry[cri].wq.hdr_len = 0;
1476         pasync_ctx->async_entry[cri].wq.bytes_received = 0;
1477         pasync_ctx->async_entry[cri].wq.bytes_needed = 0;
1478 }
1479
1480 static struct hd_async_handle *
1481 beiscsi_hdl_get_handle(struct beiscsi_conn *beiscsi_conn,
1482                        struct hd_async_context *pasync_ctx,
1483                        struct i_t_dpdu_cqe *pdpdu_cqe,
1484                        u8 *header)
1485 {
1486         struct beiscsi_hba *phba = beiscsi_conn->phba;
1487         struct hd_async_handle *pasync_handle;
1488         struct be_bus_address phys_addr;
1489         u16 cid, code, ci, cri;
1490         u8 final, error = 0;
1491         u32 dpl;
1492
1493         cid = beiscsi_conn->beiscsi_conn_cid;
1494         cri = BE_GET_ASYNC_CRI_FROM_CID(cid);
1495         /**
1496          * This function is invoked to get the right async_handle structure
1497          * from a given DEF PDU CQ entry.
1498          *
1499          * - index in CQ entry gives the vertical index
1500          * - address in CQ entry is the offset where the DMA last ended
1501          * - final - no more notifications for this PDU
1502          */
1503         if (is_chip_be2_be3r(phba)) {
1504                 dpl = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe,
1505                                     dpl, pdpdu_cqe);
1506                 ci = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe,
1507                                       index, pdpdu_cqe);
1508                 final = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe,
1509                                       final, pdpdu_cqe);
1510         } else {
1511                 dpl = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2,
1512                                     dpl, pdpdu_cqe);
1513                 ci = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2,
1514                                       index, pdpdu_cqe);
1515                 final = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2,
1516                                       final, pdpdu_cqe);
1517         }
1518
1519         /**
1520          * DB addr Hi/Lo is same for BE and SKH.
1521          * Subtract the dataplacementlength to get to the base.
1522          */
1523         phys_addr.u.a32.address_lo = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe,
1524                                                    db_addr_lo, pdpdu_cqe);
1525         phys_addr.u.a32.address_lo -= dpl;
1526         phys_addr.u.a32.address_hi = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe,
1527                                                    db_addr_hi, pdpdu_cqe);
1528
1529         code = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe, code, pdpdu_cqe);
1530         switch (code) {
1531         case UNSOL_HDR_NOTIFY:
1532                 pasync_handle = pasync_ctx->async_entry[ci].header;
1533                 *header = 1;
1534                 break;
1535         case UNSOL_DATA_DIGEST_ERROR_NOTIFY:
1536                 error = 1;
1537                 fallthrough;
1538         case UNSOL_DATA_NOTIFY:
1539                 pasync_handle = pasync_ctx->async_entry[ci].data;
1540                 break;
1541         /* called only for above codes */
1542         default:
1543                 return NULL;
1544         }
1545
1546         if (pasync_handle->pa.u.a64.address != phys_addr.u.a64.address ||
1547             pasync_handle->index != ci) {
1548                 /* driver bug - if ci does not match async handle index */
1549                 error = 1;
1550                 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_ISCSI,
1551                             "BM_%d : cid %u async PDU handle mismatch - addr in %cQE %llx at %u:addr in CQE %llx ci %u\n",
1552                             cid, pasync_handle->is_header ? 'H' : 'D',
1553                             pasync_handle->pa.u.a64.address,
1554                             pasync_handle->index,
1555                             phys_addr.u.a64.address, ci);
1556                 /* FW has stale address - attempt continuing by dropping */
1557         }
1558
1559         /**
1560          * DEF PDU header and data buffers with errors should be simply
1561          * dropped as there are no consumers for it.
1562          */
1563         if (error) {
1564                 beiscsi_hdl_put_handle(pasync_ctx, pasync_handle);
1565                 return NULL;
1566         }
1567
1568         if (pasync_handle->in_use || !list_empty(&pasync_handle->link)) {
1569                 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_ISCSI,
1570                             "BM_%d : cid %d async PDU handle in use - code %d ci %d addr %llx\n",
1571                             cid, code, ci, phys_addr.u.a64.address);
1572                 beiscsi_hdl_purge_handles(phba, pasync_ctx, cri);
1573         }
1574
1575         list_del_init(&pasync_handle->link);
1576         /**
1577          * Each CID is associated with unique CRI.
1578          * ASYNC_CRI_FROM_CID mapping and CRI_FROM_CID are totaly different.
1579          **/
1580         pasync_handle->cri = cri;
1581         pasync_handle->is_final = final;
1582         pasync_handle->buffer_len = dpl;
1583         pasync_handle->in_use = 1;
1584
1585         return pasync_handle;
1586 }
1587
1588 static unsigned int
1589 beiscsi_hdl_fwd_pdu(struct beiscsi_conn *beiscsi_conn,
1590                     struct hd_async_context *pasync_ctx,
1591                     u16 cri)
1592 {
1593         struct iscsi_session *session = beiscsi_conn->conn->session;
1594         struct hd_async_handle *pasync_handle, *plast_handle;
1595         struct beiscsi_hba *phba = beiscsi_conn->phba;
1596         void *phdr = NULL, *pdata = NULL;
1597         u32 dlen = 0, status = 0;
1598         struct list_head *plist;
1599
1600         plist = &pasync_ctx->async_entry[cri].wq.list;
1601         plast_handle = NULL;
1602         list_for_each_entry(pasync_handle, plist, link) {
1603                 plast_handle = pasync_handle;
1604                 /* get the header, the first entry */
1605                 if (!phdr) {
1606                         phdr = pasync_handle->pbuffer;
1607                         continue;
1608                 }
1609                 /* use first buffer to collect all the data */
1610                 if (!pdata) {
1611                         pdata = pasync_handle->pbuffer;
1612                         dlen = pasync_handle->buffer_len;
1613                         continue;
1614                 }
1615                 if (!pasync_handle->buffer_len ||
1616                     (dlen + pasync_handle->buffer_len) >
1617                     pasync_ctx->async_data.buffer_size)
1618                         break;
1619                 memcpy(pdata + dlen, pasync_handle->pbuffer,
1620                        pasync_handle->buffer_len);
1621                 dlen += pasync_handle->buffer_len;
1622         }
1623
1624         if (!plast_handle->is_final) {
1625                 /* last handle should have final PDU notification from FW */
1626                 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_ISCSI,
1627                             "BM_%d : cid %u %p fwd async PDU opcode %x with last handle missing - HL%u:DN%u:DR%u\n",
1628                             beiscsi_conn->beiscsi_conn_cid, plast_handle,
1629                             AMAP_GET_BITS(struct amap_pdu_base, opcode, phdr),
1630                             pasync_ctx->async_entry[cri].wq.hdr_len,
1631                             pasync_ctx->async_entry[cri].wq.bytes_needed,
1632                             pasync_ctx->async_entry[cri].wq.bytes_received);
1633         }
1634         spin_lock_bh(&session->back_lock);
1635         status = beiscsi_complete_pdu(beiscsi_conn, phdr, pdata, dlen);
1636         spin_unlock_bh(&session->back_lock);
1637         beiscsi_hdl_purge_handles(phba, pasync_ctx, cri);
1638         return status;
1639 }
1640
1641 static unsigned int
1642 beiscsi_hdl_gather_pdu(struct beiscsi_conn *beiscsi_conn,
1643                        struct hd_async_context *pasync_ctx,
1644                        struct hd_async_handle *pasync_handle)
1645 {
1646         unsigned int bytes_needed = 0, status = 0;
1647         u16 cri = pasync_handle->cri;
1648         struct cri_wait_queue *wq;
1649         struct beiscsi_hba *phba;
1650         struct pdu_base *ppdu;
1651         char *err = "";
1652
1653         phba = beiscsi_conn->phba;
1654         wq = &pasync_ctx->async_entry[cri].wq;
1655         if (pasync_handle->is_header) {
1656                 /* check if PDU hdr is rcv'd when old hdr not completed */
1657                 if (wq->hdr_len) {
1658                         err = "incomplete";
1659                         goto drop_pdu;
1660                 }
1661                 ppdu = pasync_handle->pbuffer;
1662                 bytes_needed = AMAP_GET_BITS(struct amap_pdu_base,
1663                                              data_len_hi, ppdu);
1664                 bytes_needed <<= 16;
1665                 bytes_needed |= be16_to_cpu(AMAP_GET_BITS(struct amap_pdu_base,
1666                                                           data_len_lo, ppdu));
1667                 wq->hdr_len = pasync_handle->buffer_len;
1668                 wq->bytes_received = 0;
1669                 wq->bytes_needed = bytes_needed;
1670                 list_add_tail(&pasync_handle->link, &wq->list);
1671                 if (!bytes_needed)
1672                         status = beiscsi_hdl_fwd_pdu(beiscsi_conn,
1673                                                      pasync_ctx, cri);
1674         } else {
1675                 /* check if data received has header and is needed */
1676                 if (!wq->hdr_len || !wq->bytes_needed) {
1677                         err = "header less";
1678                         goto drop_pdu;
1679                 }
1680                 wq->bytes_received += pasync_handle->buffer_len;
1681                 /* Something got overwritten? Better catch it here. */
1682                 if (wq->bytes_received > wq->bytes_needed) {
1683                         err = "overflow";
1684                         goto drop_pdu;
1685                 }
1686                 list_add_tail(&pasync_handle->link, &wq->list);
1687                 if (wq->bytes_received == wq->bytes_needed)
1688                         status = beiscsi_hdl_fwd_pdu(beiscsi_conn,
1689                                                      pasync_ctx, cri);
1690         }
1691         return status;
1692
1693 drop_pdu:
1694         beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_ISCSI,
1695                     "BM_%d : cid %u async PDU %s - def-%c:HL%u:DN%u:DR%u\n",
1696                     beiscsi_conn->beiscsi_conn_cid, err,
1697                     pasync_handle->is_header ? 'H' : 'D',
1698                     wq->hdr_len, wq->bytes_needed,
1699                     pasync_handle->buffer_len);
1700         /* discard this handle */
1701         beiscsi_hdl_put_handle(pasync_ctx, pasync_handle);
1702         /* free all the other handles in cri_wait_queue */
1703         beiscsi_hdl_purge_handles(phba, pasync_ctx, cri);
1704         /* try continuing */
1705         return status;
1706 }
1707
1708 static void
1709 beiscsi_hdq_post_handles(struct beiscsi_hba *phba,
1710                          u8 header, u8 ulp_num, u16 nbuf)
1711 {
1712         struct hd_async_handle *pasync_handle;
1713         struct hd_async_context *pasync_ctx;
1714         struct hwi_controller *phwi_ctrlr;
1715         struct phys_addr *pasync_sge;
1716         u32 ring_id, doorbell = 0;
1717         u32 doorbell_offset;
1718         u16 prod, pi;
1719
1720         phwi_ctrlr = phba->phwi_ctrlr;
1721         pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr, ulp_num);
1722         if (header) {
1723                 pasync_sge = pasync_ctx->async_header.ring_base;
1724                 pi = pasync_ctx->async_header.pi;
1725                 ring_id = phwi_ctrlr->default_pdu_hdr[ulp_num].id;
1726                 doorbell_offset = phwi_ctrlr->default_pdu_hdr[ulp_num].
1727                                         doorbell_offset;
1728         } else {
1729                 pasync_sge = pasync_ctx->async_data.ring_base;
1730                 pi = pasync_ctx->async_data.pi;
1731                 ring_id = phwi_ctrlr->default_pdu_data[ulp_num].id;
1732                 doorbell_offset = phwi_ctrlr->default_pdu_data[ulp_num].
1733                                         doorbell_offset;
1734         }
1735
1736         for (prod = 0; prod < nbuf; prod++) {
1737                 if (header)
1738                         pasync_handle = pasync_ctx->async_entry[pi].header;
1739                 else
1740                         pasync_handle = pasync_ctx->async_entry[pi].data;
1741                 WARN_ON(pasync_handle->is_header != header);
1742                 WARN_ON(pasync_handle->index != pi);
1743                 /* setup the ring only once */
1744                 if (nbuf == pasync_ctx->num_entries) {
1745                         /* note hi is lo */
1746                         pasync_sge[pi].hi = pasync_handle->pa.u.a32.address_lo;
1747                         pasync_sge[pi].lo = pasync_handle->pa.u.a32.address_hi;
1748                 }
1749                 if (++pi == pasync_ctx->num_entries)
1750                         pi = 0;
1751         }
1752
1753         if (header)
1754                 pasync_ctx->async_header.pi = pi;
1755         else
1756                 pasync_ctx->async_data.pi = pi;
1757
1758         doorbell |= ring_id & DB_DEF_PDU_RING_ID_MASK;
1759         doorbell |= 1 << DB_DEF_PDU_REARM_SHIFT;
1760         doorbell |= 0 << DB_DEF_PDU_EVENT_SHIFT;
1761         doorbell |= (prod & DB_DEF_PDU_CQPROC_MASK) << DB_DEF_PDU_CQPROC_SHIFT;
1762         iowrite32(doorbell, phba->db_va + doorbell_offset);
1763 }
1764
1765 static void
1766 beiscsi_hdq_process_compl(struct beiscsi_conn *beiscsi_conn,
1767                           struct i_t_dpdu_cqe *pdpdu_cqe)
1768 {
1769         struct beiscsi_hba *phba = beiscsi_conn->phba;
1770         struct hd_async_handle *pasync_handle = NULL;
1771         struct hd_async_context *pasync_ctx;
1772         struct hwi_controller *phwi_ctrlr;
1773         u8 ulp_num, consumed, header = 0;
1774         u16 cid_cri;
1775
1776         phwi_ctrlr = phba->phwi_ctrlr;
1777         cid_cri = BE_GET_CRI_FROM_CID(beiscsi_conn->beiscsi_conn_cid);
1778         ulp_num = BEISCSI_GET_ULP_FROM_CRI(phwi_ctrlr, cid_cri);
1779         pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr, ulp_num);
1780         pasync_handle = beiscsi_hdl_get_handle(beiscsi_conn, pasync_ctx,
1781                                                pdpdu_cqe, &header);
1782         if (is_chip_be2_be3r(phba))
1783                 consumed = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe,
1784                                          num_cons, pdpdu_cqe);
1785         else
1786                 consumed = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2,
1787                                          num_cons, pdpdu_cqe);
1788         if (pasync_handle)
1789                 beiscsi_hdl_gather_pdu(beiscsi_conn, pasync_ctx, pasync_handle);
1790         /* num_cons indicates number of 8 RQEs consumed */
1791         if (consumed)
1792                 beiscsi_hdq_post_handles(phba, header, ulp_num, 8 * consumed);
1793 }
1794
1795 void beiscsi_process_mcc_cq(struct beiscsi_hba *phba)
1796 {
1797         struct be_queue_info *mcc_cq;
1798         struct  be_mcc_compl *mcc_compl;
1799         unsigned int num_processed = 0;
1800
1801         mcc_cq = &phba->ctrl.mcc_obj.cq;
1802         mcc_compl = queue_tail_node(mcc_cq);
1803         mcc_compl->flags = le32_to_cpu(mcc_compl->flags);
1804         while (mcc_compl->flags & CQE_FLAGS_VALID_MASK) {
1805                 if (beiscsi_hba_in_error(phba))
1806                         return;
1807
1808                 if (num_processed >= 32) {
1809                         hwi_ring_cq_db(phba, mcc_cq->id,
1810                                         num_processed, 0);
1811                         num_processed = 0;
1812                 }
1813                 if (mcc_compl->flags & CQE_FLAGS_ASYNC_MASK) {
1814                         beiscsi_process_async_event(phba, mcc_compl);
1815                 } else if (mcc_compl->flags & CQE_FLAGS_COMPLETED_MASK) {
1816                         beiscsi_process_mcc_compl(&phba->ctrl, mcc_compl);
1817                 }
1818
1819                 mcc_compl->flags = 0;
1820                 queue_tail_inc(mcc_cq);
1821                 mcc_compl = queue_tail_node(mcc_cq);
1822                 mcc_compl->flags = le32_to_cpu(mcc_compl->flags);
1823                 num_processed++;
1824         }
1825
1826         if (num_processed > 0)
1827                 hwi_ring_cq_db(phba, mcc_cq->id, num_processed, 1);
1828 }
1829
1830 static void beiscsi_mcc_work(struct work_struct *work)
1831 {
1832         struct be_eq_obj *pbe_eq;
1833         struct beiscsi_hba *phba;
1834
1835         pbe_eq = container_of(work, struct be_eq_obj, mcc_work);
1836         phba = pbe_eq->phba;
1837         beiscsi_process_mcc_cq(phba);
1838         /* rearm EQ for further interrupts */
1839         if (!beiscsi_hba_in_error(phba))
1840                 hwi_ring_eq_db(phba, pbe_eq->q.id, 0, 0, 1, 1);
1841 }
1842
1843 /**
1844  * beiscsi_process_cq()- Process the Completion Queue
1845  * @pbe_eq: Event Q on which the Completion has come
1846  * @budget: Max number of events to processed
1847  *
1848  * return
1849  *     Number of Completion Entries processed.
1850  **/
1851 unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq, int budget)
1852 {
1853         struct be_queue_info *cq;
1854         struct sol_cqe *sol;
1855         unsigned int total = 0;
1856         unsigned int num_processed = 0;
1857         unsigned short code = 0, cid = 0;
1858         uint16_t cri_index = 0;
1859         struct beiscsi_conn *beiscsi_conn;
1860         struct beiscsi_endpoint *beiscsi_ep;
1861         struct iscsi_endpoint *ep;
1862         struct beiscsi_hba *phba;
1863
1864         cq = pbe_eq->cq;
1865         sol = queue_tail_node(cq);
1866         phba = pbe_eq->phba;
1867
1868         while (sol->dw[offsetof(struct amap_sol_cqe, valid) / 32] &
1869                CQE_VALID_MASK) {
1870                 if (beiscsi_hba_in_error(phba))
1871                         return 0;
1872
1873                 be_dws_le_to_cpu(sol, sizeof(struct sol_cqe));
1874
1875                 code = (sol->dw[offsetof(struct amap_sol_cqe, code) / 32] &
1876                                 CQE_CODE_MASK);
1877
1878                  /* Get the CID */
1879                 if (is_chip_be2_be3r(phba)) {
1880                         cid = AMAP_GET_BITS(struct amap_sol_cqe, cid, sol);
1881                 } else {
1882                         if ((code == DRIVERMSG_NOTIFY) ||
1883                             (code == UNSOL_HDR_NOTIFY) ||
1884                             (code == UNSOL_DATA_NOTIFY))
1885                                 cid = AMAP_GET_BITS(
1886                                                     struct amap_i_t_dpdu_cqe_v2,
1887                                                     cid, sol);
1888                         else
1889                                 cid = AMAP_GET_BITS(struct amap_sol_cqe_v2,
1890                                                     cid, sol);
1891                 }
1892
1893                 cri_index = BE_GET_CRI_FROM_CID(cid);
1894                 ep = phba->ep_array[cri_index];
1895
1896                 if (ep == NULL) {
1897                         /* connection has already been freed
1898                          * just move on to next one
1899                          */
1900                         beiscsi_log(phba, KERN_WARNING,
1901                                     BEISCSI_LOG_INIT,
1902                                     "BM_%d : proc cqe of disconn ep: cid %d\n",
1903                                     cid);
1904                         goto proc_next_cqe;
1905                 }
1906
1907                 beiscsi_ep = ep->dd_data;
1908                 beiscsi_conn = beiscsi_ep->conn;
1909
1910                 /* replenish cq */
1911                 if (num_processed == 32) {
1912                         hwi_ring_cq_db(phba, cq->id, 32, 0);
1913                         num_processed = 0;
1914                 }
1915                 total++;
1916
1917                 switch (code) {
1918                 case SOL_CMD_COMPLETE:
1919                         hwi_complete_cmd(beiscsi_conn, phba, sol);
1920                         break;
1921                 case DRIVERMSG_NOTIFY:
1922                         beiscsi_log(phba, KERN_INFO,
1923                                     BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
1924                                     "BM_%d : Received %s[%d] on CID : %d\n",
1925                                     cqe_desc[code], code, cid);
1926
1927                         hwi_complete_drvr_msgs(beiscsi_conn, phba, sol);
1928                         break;
1929                 case UNSOL_HDR_NOTIFY:
1930                         beiscsi_log(phba, KERN_INFO,
1931                                     BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
1932                                     "BM_%d : Received %s[%d] on CID : %d\n",
1933                                     cqe_desc[code], code, cid);
1934
1935                         spin_lock_bh(&phba->async_pdu_lock);
1936                         beiscsi_hdq_process_compl(beiscsi_conn,
1937                                                   (struct i_t_dpdu_cqe *)sol);
1938                         spin_unlock_bh(&phba->async_pdu_lock);
1939                         break;
1940                 case UNSOL_DATA_NOTIFY:
1941                         beiscsi_log(phba, KERN_INFO,
1942                                     BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,
1943                                     "BM_%d : Received %s[%d] on CID : %d\n",
1944                                     cqe_desc[code], code, cid);
1945
1946                         spin_lock_bh(&phba->async_pdu_lock);
1947                         beiscsi_hdq_process_compl(beiscsi_conn,
1948                                                   (struct i_t_dpdu_cqe *)sol);
1949                         spin_unlock_bh(&phba->async_pdu_lock);
1950                         break;
1951                 case CXN_INVALIDATE_INDEX_NOTIFY:
1952                 case CMD_INVALIDATED_NOTIFY:
1953                 case CXN_INVALIDATE_NOTIFY:
1954                         beiscsi_log(phba, KERN_ERR,
1955                                     BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
1956                                     "BM_%d : Ignoring %s[%d] on CID : %d\n",
1957                                     cqe_desc[code], code, cid);
1958                         break;
1959                 case CXN_KILLED_HDR_DIGEST_ERR:
1960                 case SOL_CMD_KILLED_DATA_DIGEST_ERR:
1961                         beiscsi_log(phba, KERN_ERR,
1962                                     BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,
1963                                     "BM_%d : Cmd Notification %s[%d] on CID : %d\n",
1964                                     cqe_desc[code], code,  cid);
1965                         break;
1966                 case CMD_KILLED_INVALID_STATSN_RCVD:
1967                 case CMD_KILLED_INVALID_R2T_RCVD:
1968                 case CMD_CXN_KILLED_LUN_INVALID:
1969                 case CMD_CXN_KILLED_ICD_INVALID:
1970                 case CMD_CXN_KILLED_ITT_INVALID:
1971                 case CMD_CXN_KILLED_SEQ_OUTOFORDER:
1972                 case CMD_CXN_KILLED_INVALID_DATASN_RCVD:
1973                         beiscsi_log(phba, KERN_ERR,
1974                                     BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,
1975                                     "BM_%d : Cmd Notification %s[%d] on CID : %d\n",
1976                                     cqe_desc[code], code,  cid);
1977                         break;
1978                 case UNSOL_DATA_DIGEST_ERROR_NOTIFY:
1979                         beiscsi_log(phba, KERN_ERR,
1980                                     BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
1981                                     "BM_%d :  Dropping %s[%d] on DPDU ring on CID : %d\n",
1982                                     cqe_desc[code], code, cid);
1983                         spin_lock_bh(&phba->async_pdu_lock);
1984                         /* driver consumes the entry and drops the contents */
1985                         beiscsi_hdq_process_compl(beiscsi_conn,
1986                                                   (struct i_t_dpdu_cqe *)sol);
1987                         spin_unlock_bh(&phba->async_pdu_lock);
1988                         break;
1989                 case CXN_KILLED_PDU_SIZE_EXCEEDS_DSL:
1990                 case CXN_KILLED_BURST_LEN_MISMATCH:
1991                 case CXN_KILLED_AHS_RCVD:
1992                 case CXN_KILLED_UNKNOWN_HDR:
1993                 case CXN_KILLED_STALE_ITT_TTT_RCVD:
1994                 case CXN_KILLED_INVALID_ITT_TTT_RCVD:
1995                 case CXN_KILLED_TIMED_OUT:
1996                 case CXN_KILLED_FIN_RCVD:
1997                 case CXN_KILLED_RST_SENT:
1998                 case CXN_KILLED_RST_RCVD:
1999                 case CXN_KILLED_BAD_UNSOL_PDU_RCVD:
2000                 case CXN_KILLED_BAD_WRB_INDEX_ERROR:
2001                 case CXN_KILLED_OVER_RUN_RESIDUAL:
2002                 case CXN_KILLED_UNDER_RUN_RESIDUAL:
2003                 case CXN_KILLED_CMND_DATA_NOT_ON_SAME_CONN:
2004                         beiscsi_log(phba, KERN_ERR,
2005                                     BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
2006                                     "BM_%d : Event %s[%d] received on CID : %d\n",
2007                                     cqe_desc[code], code, cid);
2008                         if (beiscsi_conn)
2009                                 iscsi_conn_failure(beiscsi_conn->conn,
2010                                                    ISCSI_ERR_CONN_FAILED);
2011                         break;
2012                 default:
2013                         beiscsi_log(phba, KERN_ERR,
2014                                     BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
2015                                     "BM_%d : Invalid CQE Event Received Code : %d CID 0x%x...\n",
2016                                     code, cid);
2017                         break;
2018                 }
2019
2020 proc_next_cqe:
2021                 AMAP_SET_BITS(struct amap_sol_cqe, valid, sol, 0);
2022                 queue_tail_inc(cq);
2023                 sol = queue_tail_node(cq);
2024                 num_processed++;
2025                 if (total == budget)
2026                         break;
2027         }
2028
2029         hwi_ring_cq_db(phba, cq->id, num_processed, 1);
2030         return total;
2031 }
2032
2033 static int be_iopoll(struct irq_poll *iop, int budget)
2034 {
2035         unsigned int ret, io_events;
2036         struct beiscsi_hba *phba;
2037         struct be_eq_obj *pbe_eq;
2038         struct be_eq_entry *eqe = NULL;
2039         struct be_queue_info *eq;
2040
2041         pbe_eq = container_of(iop, struct be_eq_obj, iopoll);
2042         phba = pbe_eq->phba;
2043         if (beiscsi_hba_in_error(phba)) {
2044                 irq_poll_complete(iop);
2045                 return 0;
2046         }
2047
2048         io_events = 0;
2049         eq = &pbe_eq->q;
2050         eqe = queue_tail_node(eq);
2051         while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32] &
2052                         EQE_VALID_MASK) {
2053                 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
2054                 queue_tail_inc(eq);
2055                 eqe = queue_tail_node(eq);
2056                 io_events++;
2057         }
2058         hwi_ring_eq_db(phba, eq->id, 1, io_events, 0, 1);
2059
2060         ret = beiscsi_process_cq(pbe_eq, budget);
2061         pbe_eq->cq_count += ret;
2062         if (ret < budget) {
2063                 irq_poll_complete(iop);
2064                 beiscsi_log(phba, KERN_INFO,
2065                             BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,
2066                             "BM_%d : rearm pbe_eq->q.id =%d ret %d\n",
2067                             pbe_eq->q.id, ret);
2068                 if (!beiscsi_hba_in_error(phba))
2069                         hwi_ring_eq_db(phba, pbe_eq->q.id, 0, 0, 1, 1);
2070         }
2071         return ret;
2072 }
2073
2074 static void
2075 hwi_write_sgl_v2(struct iscsi_wrb *pwrb, struct scatterlist *sg,
2076                   unsigned int num_sg, struct beiscsi_io_task *io_task)
2077 {
2078         struct iscsi_sge *psgl;
2079         unsigned int sg_len, index;
2080         unsigned int sge_len = 0;
2081         unsigned long long addr;
2082         struct scatterlist *l_sg;
2083         unsigned int offset;
2084
2085         AMAP_SET_BITS(struct amap_iscsi_wrb_v2, iscsi_bhs_addr_lo, pwrb,
2086                       io_task->bhs_pa.u.a32.address_lo);
2087         AMAP_SET_BITS(struct amap_iscsi_wrb_v2, iscsi_bhs_addr_hi, pwrb,
2088                       io_task->bhs_pa.u.a32.address_hi);
2089
2090         l_sg = sg;
2091         for (index = 0; (index < num_sg) && (index < 2); index++,
2092                         sg = sg_next(sg)) {
2093                 if (index == 0) {
2094                         sg_len = sg_dma_len(sg);
2095                         addr = (u64) sg_dma_address(sg);
2096                         AMAP_SET_BITS(struct amap_iscsi_wrb_v2,
2097                                       sge0_addr_lo, pwrb,
2098                                       lower_32_bits(addr));
2099                         AMAP_SET_BITS(struct amap_iscsi_wrb_v2,
2100                                       sge0_addr_hi, pwrb,
2101                                       upper_32_bits(addr));
2102                         AMAP_SET_BITS(struct amap_iscsi_wrb_v2,
2103                                       sge0_len, pwrb,
2104                                       sg_len);
2105                         sge_len = sg_len;
2106                 } else {
2107                         AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge1_r2t_offset,
2108                                       pwrb, sge_len);
2109                         sg_len = sg_dma_len(sg);
2110                         addr = (u64) sg_dma_address(sg);
2111                         AMAP_SET_BITS(struct amap_iscsi_wrb_v2,
2112                                       sge1_addr_lo, pwrb,
2113                                       lower_32_bits(addr));
2114                         AMAP_SET_BITS(struct amap_iscsi_wrb_v2,
2115                                       sge1_addr_hi, pwrb,
2116                                       upper_32_bits(addr));
2117                         AMAP_SET_BITS(struct amap_iscsi_wrb_v2,
2118                                       sge1_len, pwrb,
2119                                       sg_len);
2120                 }
2121         }
2122         psgl = (struct iscsi_sge *)io_task->psgl_handle->pfrag;
2123         memset(psgl, 0, sizeof(*psgl) * BE2_SGE);
2124
2125         AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, io_task->bhs_len - 2);
2126
2127         AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
2128                       io_task->bhs_pa.u.a32.address_hi);
2129         AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
2130                       io_task->bhs_pa.u.a32.address_lo);
2131
2132         if (num_sg == 1) {
2133                 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge0_last, pwrb,
2134                               1);
2135                 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge1_last, pwrb,
2136                               0);
2137         } else if (num_sg == 2) {
2138                 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge0_last, pwrb,
2139                               0);
2140                 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge1_last, pwrb,
2141                               1);
2142         } else {
2143                 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge0_last, pwrb,
2144                               0);
2145                 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge1_last, pwrb,
2146                               0);
2147         }
2148
2149         sg = l_sg;
2150         psgl++;
2151         psgl++;
2152         offset = 0;
2153         for (index = 0; index < num_sg; index++, sg = sg_next(sg), psgl++) {
2154                 sg_len = sg_dma_len(sg);
2155                 addr = (u64) sg_dma_address(sg);
2156                 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
2157                               lower_32_bits(addr));
2158                 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
2159                               upper_32_bits(addr));
2160                 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, sg_len);
2161                 AMAP_SET_BITS(struct amap_iscsi_sge, sge_offset, psgl, offset);
2162                 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 0);
2163                 offset += sg_len;
2164         }
2165         psgl--;
2166         AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1);
2167 }
2168
2169 static void
2170 hwi_write_sgl(struct iscsi_wrb *pwrb, struct scatterlist *sg,
2171               unsigned int num_sg, struct beiscsi_io_task *io_task)
2172 {
2173         struct iscsi_sge *psgl;
2174         unsigned int sg_len, index;
2175         unsigned int sge_len = 0;
2176         unsigned long long addr;
2177         struct scatterlist *l_sg;
2178         unsigned int offset;
2179
2180         AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_lo, pwrb,
2181                                       io_task->bhs_pa.u.a32.address_lo);
2182         AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_hi, pwrb,
2183                                       io_task->bhs_pa.u.a32.address_hi);
2184
2185         l_sg = sg;
2186         for (index = 0; (index < num_sg) && (index < 2); index++,
2187                                                          sg = sg_next(sg)) {
2188                 if (index == 0) {
2189                         sg_len = sg_dma_len(sg);
2190                         addr = (u64) sg_dma_address(sg);
2191                         AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_lo, pwrb,
2192                                                 ((u32)(addr & 0xFFFFFFFF)));
2193                         AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_hi, pwrb,
2194                                                         ((u32)(addr >> 32)));
2195                         AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_len, pwrb,
2196                                                         sg_len);
2197                         sge_len = sg_len;
2198                 } else {
2199                         AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_r2t_offset,
2200                                                         pwrb, sge_len);
2201                         sg_len = sg_dma_len(sg);
2202                         addr = (u64) sg_dma_address(sg);
2203                         AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_addr_lo, pwrb,
2204                                                 ((u32)(addr & 0xFFFFFFFF)));
2205                         AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_addr_hi, pwrb,
2206                                                         ((u32)(addr >> 32)));
2207                         AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_len, pwrb,
2208                                                         sg_len);
2209                 }
2210         }
2211         psgl = (struct iscsi_sge *)io_task->psgl_handle->pfrag;
2212         memset(psgl, 0, sizeof(*psgl) * BE2_SGE);
2213
2214         AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, io_task->bhs_len - 2);
2215
2216         AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
2217                         io_task->bhs_pa.u.a32.address_hi);
2218         AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
2219                         io_task->bhs_pa.u.a32.address_lo);
2220
2221         if (num_sg == 1) {
2222                 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb,
2223                                                                 1);
2224                 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb,
2225                                                                 0);
2226         } else if (num_sg == 2) {
2227                 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb,
2228                                                                 0);
2229                 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb,
2230                                                                 1);
2231         } else {
2232                 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb,
2233                                                                 0);
2234                 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb,
2235                                                                 0);
2236         }
2237         sg = l_sg;
2238         psgl++;
2239         psgl++;
2240         offset = 0;
2241         for (index = 0; index < num_sg; index++, sg = sg_next(sg), psgl++) {
2242                 sg_len = sg_dma_len(sg);
2243                 addr = (u64) sg_dma_address(sg);
2244                 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
2245                                                 (addr & 0xFFFFFFFF));
2246                 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
2247                                                 (addr >> 32));
2248                 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, sg_len);
2249                 AMAP_SET_BITS(struct amap_iscsi_sge, sge_offset, psgl, offset);
2250                 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 0);
2251                 offset += sg_len;
2252         }
2253         psgl--;
2254         AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1);
2255 }
2256
2257 /**
2258  * hwi_write_buffer()- Populate the WRB with task info
2259  * @pwrb: ptr to the WRB entry
2260  * @task: iscsi task which is to be executed
2261  **/
2262 static int hwi_write_buffer(struct iscsi_wrb *pwrb, struct iscsi_task *task)
2263 {
2264         struct iscsi_sge *psgl;
2265         struct beiscsi_io_task *io_task = task->dd_data;
2266         struct beiscsi_conn *beiscsi_conn = io_task->conn;
2267         struct beiscsi_hba *phba = beiscsi_conn->phba;
2268         uint8_t dsp_value = 0;
2269
2270         io_task->bhs_len = sizeof(struct be_nonio_bhs) - 2;
2271         AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_lo, pwrb,
2272                                 io_task->bhs_pa.u.a32.address_lo);
2273         AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_hi, pwrb,
2274                                 io_task->bhs_pa.u.a32.address_hi);
2275
2276         if (task->data) {
2277
2278                 /* Check for the data_count */
2279                 dsp_value = (task->data_count) ? 1 : 0;
2280
2281                 if (is_chip_be2_be3r(phba))
2282                         AMAP_SET_BITS(struct amap_iscsi_wrb, dsp,
2283                                       pwrb, dsp_value);
2284                 else
2285                         AMAP_SET_BITS(struct amap_iscsi_wrb_v2, dsp,
2286                                       pwrb, dsp_value);
2287
2288                 /* Map addr only if there is data_count */
2289                 if (dsp_value) {
2290                         io_task->mtask_addr = dma_map_single(&phba->pcidev->dev,
2291                                                              task->data,
2292                                                              task->data_count,
2293                                                              DMA_TO_DEVICE);
2294                         if (dma_mapping_error(&phba->pcidev->dev,
2295                                                   io_task->mtask_addr))
2296                                 return -ENOMEM;
2297                         io_task->mtask_data_count = task->data_count;
2298                 } else
2299                         io_task->mtask_addr = 0;
2300
2301                 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_lo, pwrb,
2302                               lower_32_bits(io_task->mtask_addr));
2303                 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_hi, pwrb,
2304                               upper_32_bits(io_task->mtask_addr));
2305                 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_len, pwrb,
2306                                                 task->data_count);
2307
2308                 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb, 1);
2309         } else {
2310                 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0);
2311                 io_task->mtask_addr = 0;
2312         }
2313
2314         psgl = (struct iscsi_sge *)io_task->psgl_handle->pfrag;
2315
2316         AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, io_task->bhs_len);
2317
2318         AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
2319                       io_task->bhs_pa.u.a32.address_hi);
2320         AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
2321                       io_task->bhs_pa.u.a32.address_lo);
2322         if (task->data) {
2323                 psgl++;
2324                 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, 0);
2325                 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, 0);
2326                 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, 0);
2327                 AMAP_SET_BITS(struct amap_iscsi_sge, sge_offset, psgl, 0);
2328                 AMAP_SET_BITS(struct amap_iscsi_sge, rsvd0, psgl, 0);
2329                 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 0);
2330
2331                 psgl++;
2332                 if (task->data) {
2333                         AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
2334                                       lower_32_bits(io_task->mtask_addr));
2335                         AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
2336                                       upper_32_bits(io_task->mtask_addr));
2337                 }
2338                 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, 0x106);
2339         }
2340         AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1);
2341         return 0;
2342 }
2343
2344 /**
2345  * beiscsi_find_mem_req()- Find mem needed
2346  * @phba: ptr to HBA struct
2347  **/
2348 static void beiscsi_find_mem_req(struct beiscsi_hba *phba)
2349 {
2350         uint8_t mem_descr_index, ulp_num;
2351         unsigned int num_async_pdu_buf_pages;
2352         unsigned int num_async_pdu_data_pages, wrb_sz_per_cxn;
2353         unsigned int num_async_pdu_buf_sgl_pages, num_async_pdu_data_sgl_pages;
2354
2355         phba->params.hwi_ws_sz = sizeof(struct hwi_controller);
2356
2357         phba->mem_req[ISCSI_MEM_GLOBAL_HEADER] = 2 *
2358                                                  BE_ISCSI_PDU_HEADER_SIZE;
2359         phba->mem_req[HWI_MEM_ADDN_CONTEXT] =
2360                                             sizeof(struct hwi_context_memory);
2361
2362
2363         phba->mem_req[HWI_MEM_WRB] = sizeof(struct iscsi_wrb)
2364             * (phba->params.wrbs_per_cxn)
2365             * phba->params.cxns_per_ctrl;
2366         wrb_sz_per_cxn =  sizeof(struct wrb_handle) *
2367                                  (phba->params.wrbs_per_cxn);
2368         phba->mem_req[HWI_MEM_WRBH] = roundup_pow_of_two((wrb_sz_per_cxn) *
2369                                 phba->params.cxns_per_ctrl);
2370
2371         phba->mem_req[HWI_MEM_SGLH] = sizeof(struct sgl_handle) *
2372                 phba->params.icds_per_ctrl;
2373         phba->mem_req[HWI_MEM_SGE] = sizeof(struct iscsi_sge) *
2374                 phba->params.num_sge_per_io * phba->params.icds_per_ctrl;
2375         for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
2376                 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) {
2377
2378                         num_async_pdu_buf_sgl_pages =
2379                                 PAGES_REQUIRED(BEISCSI_ASYNC_HDQ_SIZE(
2380                                                phba, ulp_num) *
2381                                                sizeof(struct phys_addr));
2382
2383                         num_async_pdu_buf_pages =
2384                                 PAGES_REQUIRED(BEISCSI_ASYNC_HDQ_SIZE(
2385                                                phba, ulp_num) *
2386                                                phba->params.defpdu_hdr_sz);
2387
2388                         num_async_pdu_data_pages =
2389                                 PAGES_REQUIRED(BEISCSI_ASYNC_HDQ_SIZE(
2390                                                phba, ulp_num) *
2391                                                phba->params.defpdu_data_sz);
2392
2393                         num_async_pdu_data_sgl_pages =
2394                                 PAGES_REQUIRED(BEISCSI_ASYNC_HDQ_SIZE(
2395                                                phba, ulp_num) *
2396                                                sizeof(struct phys_addr));
2397
2398                         mem_descr_index = (HWI_MEM_TEMPLATE_HDR_ULP0 +
2399                                           (ulp_num * MEM_DESCR_OFFSET));
2400                         phba->mem_req[mem_descr_index] =
2401                                         BEISCSI_GET_CID_COUNT(phba, ulp_num) *
2402                                         BEISCSI_TEMPLATE_HDR_PER_CXN_SIZE;
2403
2404                         mem_descr_index = (HWI_MEM_ASYNC_HEADER_BUF_ULP0 +
2405                                           (ulp_num * MEM_DESCR_OFFSET));
2406                         phba->mem_req[mem_descr_index] =
2407                                           num_async_pdu_buf_pages *
2408                                           PAGE_SIZE;
2409
2410                         mem_descr_index = (HWI_MEM_ASYNC_DATA_BUF_ULP0 +
2411                                           (ulp_num * MEM_DESCR_OFFSET));
2412                         phba->mem_req[mem_descr_index] =
2413                                           num_async_pdu_data_pages *
2414                                           PAGE_SIZE;
2415
2416                         mem_descr_index = (HWI_MEM_ASYNC_HEADER_RING_ULP0 +
2417                                           (ulp_num * MEM_DESCR_OFFSET));
2418                         phba->mem_req[mem_descr_index] =
2419                                           num_async_pdu_buf_sgl_pages *
2420                                           PAGE_SIZE;
2421
2422                         mem_descr_index = (HWI_MEM_ASYNC_DATA_RING_ULP0 +
2423                                           (ulp_num * MEM_DESCR_OFFSET));
2424                         phba->mem_req[mem_descr_index] =
2425                                           num_async_pdu_data_sgl_pages *
2426                                           PAGE_SIZE;
2427
2428                         mem_descr_index = (HWI_MEM_ASYNC_HEADER_HANDLE_ULP0 +
2429                                           (ulp_num * MEM_DESCR_OFFSET));
2430                         phba->mem_req[mem_descr_index] =
2431                                 BEISCSI_ASYNC_HDQ_SIZE(phba, ulp_num) *
2432                                 sizeof(struct hd_async_handle);
2433
2434                         mem_descr_index = (HWI_MEM_ASYNC_DATA_HANDLE_ULP0 +
2435                                           (ulp_num * MEM_DESCR_OFFSET));
2436                         phba->mem_req[mem_descr_index] =
2437                                 BEISCSI_ASYNC_HDQ_SIZE(phba, ulp_num) *
2438                                 sizeof(struct hd_async_handle);
2439
2440                         mem_descr_index = (HWI_MEM_ASYNC_PDU_CONTEXT_ULP0 +
2441                                           (ulp_num * MEM_DESCR_OFFSET));
2442                         phba->mem_req[mem_descr_index] =
2443                                 sizeof(struct hd_async_context) +
2444                                 (BEISCSI_ASYNC_HDQ_SIZE(phba, ulp_num) *
2445                                  sizeof(struct hd_async_entry));
2446                 }
2447         }
2448 }
2449
2450 static int beiscsi_alloc_mem(struct beiscsi_hba *phba)
2451 {
2452         dma_addr_t bus_add;
2453         struct hwi_controller *phwi_ctrlr;
2454         struct be_mem_descriptor *mem_descr;
2455         struct mem_array *mem_arr, *mem_arr_orig;
2456         unsigned int i, j, alloc_size, curr_alloc_size;
2457
2458         phba->phwi_ctrlr = kzalloc(phba->params.hwi_ws_sz, GFP_KERNEL);
2459         if (!phba->phwi_ctrlr)
2460                 return -ENOMEM;
2461
2462         /* Allocate memory for wrb_context */
2463         phwi_ctrlr = phba->phwi_ctrlr;
2464         phwi_ctrlr->wrb_context = kcalloc(phba->params.cxns_per_ctrl,
2465                                           sizeof(struct hwi_wrb_context),
2466                                           GFP_KERNEL);
2467         if (!phwi_ctrlr->wrb_context) {
2468                 kfree(phba->phwi_ctrlr);
2469                 return -ENOMEM;
2470         }
2471
2472         phba->init_mem = kcalloc(SE_MEM_MAX, sizeof(*mem_descr),
2473                                  GFP_KERNEL);
2474         if (!phba->init_mem) {
2475                 kfree(phwi_ctrlr->wrb_context);
2476                 kfree(phba->phwi_ctrlr);
2477                 return -ENOMEM;
2478         }
2479
2480         mem_arr_orig = kmalloc_array(BEISCSI_MAX_FRAGS_INIT,
2481                                      sizeof(*mem_arr_orig),
2482                                      GFP_KERNEL);
2483         if (!mem_arr_orig) {
2484                 kfree(phba->init_mem);
2485                 kfree(phwi_ctrlr->wrb_context);
2486                 kfree(phba->phwi_ctrlr);
2487                 return -ENOMEM;
2488         }
2489
2490         mem_descr = phba->init_mem;
2491         for (i = 0; i < SE_MEM_MAX; i++) {
2492                 if (!phba->mem_req[i]) {
2493                         mem_descr->mem_array = NULL;
2494                         mem_descr++;
2495                         continue;
2496                 }
2497
2498                 j = 0;
2499                 mem_arr = mem_arr_orig;
2500                 alloc_size = phba->mem_req[i];
2501                 memset(mem_arr, 0, sizeof(struct mem_array) *
2502                        BEISCSI_MAX_FRAGS_INIT);
2503                 curr_alloc_size = min(be_max_phys_size * 1024, alloc_size);
2504                 do {
2505                         mem_arr->virtual_address =
2506                                 dma_alloc_coherent(&phba->pcidev->dev,
2507                                         curr_alloc_size, &bus_add, GFP_KERNEL);
2508                         if (!mem_arr->virtual_address) {
2509                                 if (curr_alloc_size <= BE_MIN_MEM_SIZE)
2510                                         goto free_mem;
2511                                 if (curr_alloc_size -
2512                                         rounddown_pow_of_two(curr_alloc_size))
2513                                         curr_alloc_size = rounddown_pow_of_two
2514                                                              (curr_alloc_size);
2515                                 else
2516                                         curr_alloc_size = curr_alloc_size / 2;
2517                         } else {
2518                                 mem_arr->bus_address.u.
2519                                     a64.address = (__u64) bus_add;
2520                                 mem_arr->size = curr_alloc_size;
2521                                 alloc_size -= curr_alloc_size;
2522                                 curr_alloc_size = min(be_max_phys_size *
2523                                                       1024, alloc_size);
2524                                 j++;
2525                                 mem_arr++;
2526                         }
2527                 } while (alloc_size);
2528                 mem_descr->num_elements = j;
2529                 mem_descr->size_in_bytes = phba->mem_req[i];
2530                 mem_descr->mem_array = kmalloc_array(j, sizeof(*mem_arr),
2531                                                      GFP_KERNEL);
2532                 if (!mem_descr->mem_array)
2533                         goto free_mem;
2534
2535                 memcpy(mem_descr->mem_array, mem_arr_orig,
2536                        sizeof(struct mem_array) * j);
2537                 mem_descr++;
2538         }
2539         kfree(mem_arr_orig);
2540         return 0;
2541 free_mem:
2542         mem_descr->num_elements = j;
2543         while ((i) || (j)) {
2544                 for (j = mem_descr->num_elements; j > 0; j--) {
2545                         dma_free_coherent(&phba->pcidev->dev,
2546                                             mem_descr->mem_array[j - 1].size,
2547                                             mem_descr->mem_array[j - 1].
2548                                             virtual_address,
2549                                             (unsigned long)mem_descr->
2550                                             mem_array[j - 1].
2551                                             bus_address.u.a64.address);
2552                 }
2553                 if (i) {
2554                         i--;
2555                         kfree(mem_descr->mem_array);
2556                         mem_descr--;
2557                 }
2558         }
2559         kfree(mem_arr_orig);
2560         kfree(phba->init_mem);
2561         kfree(phba->phwi_ctrlr->wrb_context);
2562         kfree(phba->phwi_ctrlr);
2563         return -ENOMEM;
2564 }
2565
2566 static int beiscsi_get_memory(struct beiscsi_hba *phba)
2567 {
2568         beiscsi_find_mem_req(phba);
2569         return beiscsi_alloc_mem(phba);
2570 }
2571
2572 static void iscsi_init_global_templates(struct beiscsi_hba *phba)
2573 {
2574         struct pdu_data_out *pdata_out;
2575         struct pdu_nop_out *pnop_out;
2576         struct be_mem_descriptor *mem_descr;
2577
2578         mem_descr = phba->init_mem;
2579         mem_descr += ISCSI_MEM_GLOBAL_HEADER;
2580         pdata_out =
2581             (struct pdu_data_out *)mem_descr->mem_array[0].virtual_address;
2582         memset(pdata_out, 0, BE_ISCSI_PDU_HEADER_SIZE);
2583
2584         AMAP_SET_BITS(struct amap_pdu_data_out, opcode, pdata_out,
2585                       IIOC_SCSI_DATA);
2586
2587         pnop_out =
2588             (struct pdu_nop_out *)((unsigned char *)mem_descr->mem_array[0].
2589                                    virtual_address + BE_ISCSI_PDU_HEADER_SIZE);
2590
2591         memset(pnop_out, 0, BE_ISCSI_PDU_HEADER_SIZE);
2592         AMAP_SET_BITS(struct amap_pdu_nop_out, ttt, pnop_out, 0xFFFFFFFF);
2593         AMAP_SET_BITS(struct amap_pdu_nop_out, f_bit, pnop_out, 1);
2594         AMAP_SET_BITS(struct amap_pdu_nop_out, i_bit, pnop_out, 0);
2595 }
2596
2597 static int beiscsi_init_wrb_handle(struct beiscsi_hba *phba)
2598 {
2599         struct be_mem_descriptor *mem_descr_wrbh, *mem_descr_wrb;
2600         struct hwi_context_memory *phwi_ctxt;
2601         struct wrb_handle *pwrb_handle = NULL;
2602         struct hwi_controller *phwi_ctrlr;
2603         struct hwi_wrb_context *pwrb_context;
2604         struct iscsi_wrb *pwrb = NULL;
2605         unsigned int num_cxn_wrbh = 0;
2606         unsigned int num_cxn_wrb = 0, j, idx = 0, index;
2607
2608         mem_descr_wrbh = phba->init_mem;
2609         mem_descr_wrbh += HWI_MEM_WRBH;
2610
2611         mem_descr_wrb = phba->init_mem;
2612         mem_descr_wrb += HWI_MEM_WRB;
2613         phwi_ctrlr = phba->phwi_ctrlr;
2614
2615         /* Allocate memory for WRBQ */
2616         phwi_ctxt = phwi_ctrlr->phwi_ctxt;
2617         phwi_ctxt->be_wrbq = kcalloc(phba->params.cxns_per_ctrl,
2618                                      sizeof(struct be_queue_info),
2619                                      GFP_KERNEL);
2620         if (!phwi_ctxt->be_wrbq) {
2621                 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
2622                             "BM_%d : WRBQ Mem Alloc Failed\n");
2623                 return -ENOMEM;
2624         }
2625
2626         for (index = 0; index < phba->params.cxns_per_ctrl; index++) {
2627                 pwrb_context = &phwi_ctrlr->wrb_context[index];
2628                 pwrb_context->pwrb_handle_base =
2629                                 kcalloc(phba->params.wrbs_per_cxn,
2630                                         sizeof(struct wrb_handle *),
2631                                         GFP_KERNEL);
2632                 if (!pwrb_context->pwrb_handle_base) {
2633                         beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
2634                                     "BM_%d : Mem Alloc Failed. Failing to load\n");
2635                         goto init_wrb_hndl_failed;
2636                 }
2637                 pwrb_context->pwrb_handle_basestd =
2638                                 kcalloc(phba->params.wrbs_per_cxn,
2639                                         sizeof(struct wrb_handle *),
2640                                         GFP_KERNEL);
2641                 if (!pwrb_context->pwrb_handle_basestd) {
2642                         beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
2643                                     "BM_%d : Mem Alloc Failed. Failing to load\n");
2644                         goto init_wrb_hndl_failed;
2645                 }
2646                 if (!num_cxn_wrbh) {
2647                         pwrb_handle =
2648                                 mem_descr_wrbh->mem_array[idx].virtual_address;
2649                         num_cxn_wrbh = ((mem_descr_wrbh->mem_array[idx].size) /
2650                                         ((sizeof(struct wrb_handle)) *
2651                                          phba->params.wrbs_per_cxn));
2652                         idx++;
2653                 }
2654                 pwrb_context->alloc_index = 0;
2655                 pwrb_context->wrb_handles_available = 0;
2656                 pwrb_context->free_index = 0;
2657
2658                 if (num_cxn_wrbh) {
2659                         for (j = 0; j < phba->params.wrbs_per_cxn; j++) {
2660                                 pwrb_context->pwrb_handle_base[j] = pwrb_handle;
2661                                 pwrb_context->pwrb_handle_basestd[j] =
2662                                                                 pwrb_handle;
2663                                 pwrb_context->wrb_handles_available++;
2664                                 pwrb_handle->wrb_index = j;
2665                                 pwrb_handle++;
2666                         }
2667                         num_cxn_wrbh--;
2668                 }
2669                 spin_lock_init(&pwrb_context->wrb_lock);
2670         }
2671         idx = 0;
2672         for (index = 0; index < phba->params.cxns_per_ctrl; index++) {
2673                 pwrb_context = &phwi_ctrlr->wrb_context[index];
2674                 if (!num_cxn_wrb) {
2675                         pwrb = mem_descr_wrb->mem_array[idx].virtual_address;
2676                         num_cxn_wrb = (mem_descr_wrb->mem_array[idx].size) /
2677                                 ((sizeof(struct iscsi_wrb) *
2678                                   phba->params.wrbs_per_cxn));
2679                         idx++;
2680                 }
2681
2682                 if (num_cxn_wrb) {
2683                         for (j = 0; j < phba->params.wrbs_per_cxn; j++) {
2684                                 pwrb_handle = pwrb_context->pwrb_handle_base[j];
2685                                 pwrb_handle->pwrb = pwrb;
2686                                 pwrb++;
2687                         }
2688                         num_cxn_wrb--;
2689                 }
2690         }
2691         return 0;
2692 init_wrb_hndl_failed:
2693         for (j = index; j > 0; j--) {
2694                 pwrb_context = &phwi_ctrlr->wrb_context[j];
2695                 kfree(pwrb_context->pwrb_handle_base);
2696                 kfree(pwrb_context->pwrb_handle_basestd);
2697         }
2698         return -ENOMEM;
2699 }
2700
2701 static int hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
2702 {
2703         uint8_t ulp_num;
2704         struct hwi_controller *phwi_ctrlr;
2705         struct hba_parameters *p = &phba->params;
2706         struct hd_async_context *pasync_ctx;
2707         struct hd_async_handle *pasync_header_h, *pasync_data_h;
2708         unsigned int index, idx, num_per_mem, num_async_data;
2709         struct be_mem_descriptor *mem_descr;
2710
2711         for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
2712                 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) {
2713                         /* get async_ctx for each ULP */
2714                         mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2715                         mem_descr += (HWI_MEM_ASYNC_PDU_CONTEXT_ULP0 +
2716                                      (ulp_num * MEM_DESCR_OFFSET));
2717
2718                         phwi_ctrlr = phba->phwi_ctrlr;
2719                         phwi_ctrlr->phwi_ctxt->pasync_ctx[ulp_num] =
2720                                 (struct hd_async_context *)
2721                                  mem_descr->mem_array[0].virtual_address;
2722
2723                         pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx[ulp_num];
2724                         memset(pasync_ctx, 0, sizeof(*pasync_ctx));
2725
2726                         pasync_ctx->async_entry =
2727                                         (struct hd_async_entry *)
2728                                         ((long unsigned int)pasync_ctx +
2729                                         sizeof(struct hd_async_context));
2730
2731                         pasync_ctx->num_entries = BEISCSI_ASYNC_HDQ_SIZE(phba,
2732                                                   ulp_num);
2733                         /* setup header buffers */
2734                         mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2735                         mem_descr += HWI_MEM_ASYNC_HEADER_BUF_ULP0 +
2736                                 (ulp_num * MEM_DESCR_OFFSET);
2737                         if (mem_descr->mem_array[0].virtual_address) {
2738                                 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
2739                                             "BM_%d : hwi_init_async_pdu_ctx"
2740                                             " HWI_MEM_ASYNC_HEADER_BUF_ULP%d va=%p\n",
2741                                             ulp_num,
2742                                             mem_descr->mem_array[0].
2743                                             virtual_address);
2744                         } else
2745                                 beiscsi_log(phba, KERN_WARNING,
2746                                             BEISCSI_LOG_INIT,
2747                                             "BM_%d : No Virtual address for ULP : %d\n",
2748                                             ulp_num);
2749
2750                         pasync_ctx->async_header.pi = 0;
2751                         pasync_ctx->async_header.buffer_size = p->defpdu_hdr_sz;
2752                         pasync_ctx->async_header.va_base =
2753                                 mem_descr->mem_array[0].virtual_address;
2754
2755                         pasync_ctx->async_header.pa_base.u.a64.address =
2756                                 mem_descr->mem_array[0].
2757                                 bus_address.u.a64.address;
2758
2759                         /* setup header buffer sgls */
2760                         mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2761                         mem_descr += HWI_MEM_ASYNC_HEADER_RING_ULP0 +
2762                                      (ulp_num * MEM_DESCR_OFFSET);
2763                         if (mem_descr->mem_array[0].virtual_address) {
2764                                 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
2765                                             "BM_%d : hwi_init_async_pdu_ctx"
2766                                             " HWI_MEM_ASYNC_HEADER_RING_ULP%d va=%p\n",
2767                                             ulp_num,
2768                                             mem_descr->mem_array[0].
2769                                             virtual_address);
2770                         } else
2771                                 beiscsi_log(phba, KERN_WARNING,
2772                                             BEISCSI_LOG_INIT,
2773                                             "BM_%d : No Virtual address for ULP : %d\n",
2774                                             ulp_num);
2775
2776                         pasync_ctx->async_header.ring_base =
2777                                 mem_descr->mem_array[0].virtual_address;
2778
2779                         /* setup header buffer handles */
2780                         mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2781                         mem_descr += HWI_MEM_ASYNC_HEADER_HANDLE_ULP0 +
2782                                      (ulp_num * MEM_DESCR_OFFSET);
2783                         if (mem_descr->mem_array[0].virtual_address) {
2784                                 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
2785                                             "BM_%d : hwi_init_async_pdu_ctx"
2786                                             " HWI_MEM_ASYNC_HEADER_HANDLE_ULP%d va=%p\n",
2787                                             ulp_num,
2788                                             mem_descr->mem_array[0].
2789                                             virtual_address);
2790                         } else
2791                                 beiscsi_log(phba, KERN_WARNING,
2792                                             BEISCSI_LOG_INIT,
2793                                             "BM_%d : No Virtual address for ULP : %d\n",
2794                                             ulp_num);
2795
2796                         pasync_ctx->async_header.handle_base =
2797                                 mem_descr->mem_array[0].virtual_address;
2798
2799                         /* setup data buffer sgls */
2800                         mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2801                         mem_descr += HWI_MEM_ASYNC_DATA_RING_ULP0 +
2802                                      (ulp_num * MEM_DESCR_OFFSET);
2803                         if (mem_descr->mem_array[0].virtual_address) {
2804                                 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
2805                                             "BM_%d : hwi_init_async_pdu_ctx"
2806                                             " HWI_MEM_ASYNC_DATA_RING_ULP%d va=%p\n",
2807                                             ulp_num,
2808                                             mem_descr->mem_array[0].
2809                                             virtual_address);
2810                         } else
2811                                 beiscsi_log(phba, KERN_WARNING,
2812                                             BEISCSI_LOG_INIT,
2813                                             "BM_%d : No Virtual address for ULP : %d\n",
2814                                             ulp_num);
2815
2816                         pasync_ctx->async_data.ring_base =
2817                                 mem_descr->mem_array[0].virtual_address;
2818
2819                         /* setup data buffer handles */
2820                         mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2821                         mem_descr += HWI_MEM_ASYNC_DATA_HANDLE_ULP0 +
2822                                      (ulp_num * MEM_DESCR_OFFSET);
2823                         if (!mem_descr->mem_array[0].virtual_address)
2824                                 beiscsi_log(phba, KERN_WARNING,
2825                                             BEISCSI_LOG_INIT,
2826                                             "BM_%d : No Virtual address for ULP : %d\n",
2827                                             ulp_num);
2828
2829                         pasync_ctx->async_data.handle_base =
2830                                 mem_descr->mem_array[0].virtual_address;
2831
2832                         pasync_header_h =
2833                                 (struct hd_async_handle *)
2834                                 pasync_ctx->async_header.handle_base;
2835                         pasync_data_h =
2836                                 (struct hd_async_handle *)
2837                                 pasync_ctx->async_data.handle_base;
2838
2839                         /* setup data buffers */
2840                         mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2841                         mem_descr += HWI_MEM_ASYNC_DATA_BUF_ULP0 +
2842                                      (ulp_num * MEM_DESCR_OFFSET);
2843                         if (mem_descr->mem_array[0].virtual_address) {
2844                                 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
2845                                             "BM_%d : hwi_init_async_pdu_ctx"
2846                                             " HWI_MEM_ASYNC_DATA_BUF_ULP%d va=%p\n",
2847                                             ulp_num,
2848                                             mem_descr->mem_array[0].
2849                                             virtual_address);
2850                         } else
2851                                 beiscsi_log(phba, KERN_WARNING,
2852                                             BEISCSI_LOG_INIT,
2853                                             "BM_%d : No Virtual address for ULP : %d\n",
2854                                             ulp_num);
2855
2856                         idx = 0;
2857                         pasync_ctx->async_data.pi = 0;
2858                         pasync_ctx->async_data.buffer_size = p->defpdu_data_sz;
2859                         pasync_ctx->async_data.va_base =
2860                                 mem_descr->mem_array[idx].virtual_address;
2861                         pasync_ctx->async_data.pa_base.u.a64.address =
2862                                 mem_descr->mem_array[idx].
2863                                 bus_address.u.a64.address;
2864
2865                         num_async_data = ((mem_descr->mem_array[idx].size) /
2866                                         phba->params.defpdu_data_sz);
2867                         num_per_mem = 0;
2868
2869                         for (index = 0; index < BEISCSI_ASYNC_HDQ_SIZE
2870                                         (phba, ulp_num); index++) {
2871                                 pasync_header_h->cri = -1;
2872                                 pasync_header_h->is_header = 1;
2873                                 pasync_header_h->index = index;
2874                                 INIT_LIST_HEAD(&pasync_header_h->link);
2875                                 pasync_header_h->pbuffer =
2876                                         (void *)((unsigned long)
2877                                                  (pasync_ctx->
2878                                                   async_header.va_base) +
2879                                                  (p->defpdu_hdr_sz * index));
2880
2881                                 pasync_header_h->pa.u.a64.address =
2882                                         pasync_ctx->async_header.pa_base.u.a64.
2883                                         address + (p->defpdu_hdr_sz * index);
2884
2885                                 pasync_ctx->async_entry[index].header =
2886                                         pasync_header_h;
2887                                 pasync_header_h++;
2888                                 INIT_LIST_HEAD(&pasync_ctx->async_entry[index].
2889                                                 wq.list);
2890
2891                                 pasync_data_h->cri = -1;
2892                                 pasync_data_h->is_header = 0;
2893                                 pasync_data_h->index = index;
2894                                 INIT_LIST_HEAD(&pasync_data_h->link);
2895
2896                                 if (!num_async_data) {
2897                                         num_per_mem = 0;
2898                                         idx++;
2899                                         pasync_ctx->async_data.va_base =
2900                                                 mem_descr->mem_array[idx].
2901                                                 virtual_address;
2902                                         pasync_ctx->async_data.pa_base.u.
2903                                                 a64.address =
2904                                                 mem_descr->mem_array[idx].
2905                                                 bus_address.u.a64.address;
2906                                         num_async_data =
2907                                                 ((mem_descr->mem_array[idx].
2908                                                   size) /
2909                                                  phba->params.defpdu_data_sz);
2910                                 }
2911                                 pasync_data_h->pbuffer =
2912                                         (void *)((unsigned long)
2913                                         (pasync_ctx->async_data.va_base) +
2914                                         (p->defpdu_data_sz * num_per_mem));
2915
2916                                 pasync_data_h->pa.u.a64.address =
2917                                         pasync_ctx->async_data.pa_base.u.a64.
2918                                         address + (p->defpdu_data_sz *
2919                                         num_per_mem);
2920                                 num_per_mem++;
2921                                 num_async_data--;
2922
2923                                 pasync_ctx->async_entry[index].data =
2924                                         pasync_data_h;
2925                                 pasync_data_h++;
2926                         }
2927                 }
2928         }
2929
2930         return 0;
2931 }
2932
2933 static int
2934 be_sgl_create_contiguous(void *virtual_address,
2935                          u64 physical_address, u32 length,
2936                          struct be_dma_mem *sgl)
2937 {
2938         WARN_ON(!virtual_address);
2939         WARN_ON(!physical_address);
2940         WARN_ON(!length);
2941         WARN_ON(!sgl);
2942
2943         sgl->va = virtual_address;
2944         sgl->dma = (unsigned long)physical_address;
2945         sgl->size = length;
2946
2947         return 0;
2948 }
2949
2950 static void be_sgl_destroy_contiguous(struct be_dma_mem *sgl)
2951 {
2952         memset(sgl, 0, sizeof(*sgl));
2953 }
2954
2955 static void
2956 hwi_build_be_sgl_arr(struct beiscsi_hba *phba,
2957                      struct mem_array *pmem, struct be_dma_mem *sgl)
2958 {
2959         if (sgl->va)
2960                 be_sgl_destroy_contiguous(sgl);
2961
2962         be_sgl_create_contiguous(pmem->virtual_address,
2963                                  pmem->bus_address.u.a64.address,
2964                                  pmem->size, sgl);
2965 }
2966
2967 static void
2968 hwi_build_be_sgl_by_offset(struct beiscsi_hba *phba,
2969                            struct mem_array *pmem, struct be_dma_mem *sgl)
2970 {
2971         if (sgl->va)
2972                 be_sgl_destroy_contiguous(sgl);
2973
2974         be_sgl_create_contiguous((unsigned char *)pmem->virtual_address,
2975                                  pmem->bus_address.u.a64.address,
2976                                  pmem->size, sgl);
2977 }
2978
2979 static int be_fill_queue(struct be_queue_info *q,
2980                 u16 len, u16 entry_size, void *vaddress)
2981 {
2982         struct be_dma_mem *mem = &q->dma_mem;
2983
2984         memset(q, 0, sizeof(*q));
2985         q->len = len;
2986         q->entry_size = entry_size;
2987         mem->size = len * entry_size;
2988         mem->va = vaddress;
2989         if (!mem->va)
2990                 return -ENOMEM;
2991         memset(mem->va, 0, mem->size);
2992         return 0;
2993 }
2994
2995 static int beiscsi_create_eqs(struct beiscsi_hba *phba,
2996                              struct hwi_context_memory *phwi_context)
2997 {
2998         int ret = -ENOMEM, eq_for_mcc;
2999         unsigned int i, num_eq_pages;
3000         struct be_queue_info *eq;
3001         struct be_dma_mem *mem;
3002         void *eq_vaddress;
3003         dma_addr_t paddr;
3004
3005         num_eq_pages = PAGES_REQUIRED(phba->params.num_eq_entries *
3006                                       sizeof(struct be_eq_entry));
3007
3008         if (phba->pcidev->msix_enabled)
3009                 eq_for_mcc = 1;
3010         else
3011                 eq_for_mcc = 0;
3012         for (i = 0; i < (phba->num_cpus + eq_for_mcc); i++) {
3013                 eq = &phwi_context->be_eq[i].q;
3014                 mem = &eq->dma_mem;
3015                 phwi_context->be_eq[i].phba = phba;
3016                 eq_vaddress = dma_alloc_coherent(&phba->pcidev->dev,
3017                                                    num_eq_pages * PAGE_SIZE,
3018                                                    &paddr, GFP_KERNEL);
3019                 if (!eq_vaddress) {
3020                         ret = -ENOMEM;
3021                         goto create_eq_error;
3022                 }
3023
3024                 mem->va = eq_vaddress;
3025                 ret = be_fill_queue(eq, phba->params.num_eq_entries,
3026                                     sizeof(struct be_eq_entry), eq_vaddress);
3027                 if (ret) {
3028                         beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3029                                     "BM_%d : be_fill_queue Failed for EQ\n");
3030                         goto create_eq_error;
3031                 }
3032
3033                 mem->dma = paddr;
3034                 ret = beiscsi_cmd_eq_create(&phba->ctrl, eq,
3035                                             BEISCSI_EQ_DELAY_DEF);
3036                 if (ret) {
3037                         beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3038                                     "BM_%d : beiscsi_cmd_eq_create Failed for EQ\n");
3039                         goto create_eq_error;
3040                 }
3041
3042                 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3043                             "BM_%d : eqid = %d\n",
3044                             phwi_context->be_eq[i].q.id);
3045         }
3046         return 0;
3047
3048 create_eq_error:
3049         for (i = 0; i < (phba->num_cpus + eq_for_mcc); i++) {
3050                 eq = &phwi_context->be_eq[i].q;
3051                 mem = &eq->dma_mem;
3052                 if (mem->va)
3053                         dma_free_coherent(&phba->pcidev->dev, num_eq_pages
3054                                             * PAGE_SIZE,
3055                                             mem->va, mem->dma);
3056         }
3057         return ret;
3058 }
3059
3060 static int beiscsi_create_cqs(struct beiscsi_hba *phba,
3061                              struct hwi_context_memory *phwi_context)
3062 {
3063         unsigned int i, num_cq_pages;
3064         struct be_queue_info *cq, *eq;
3065         struct be_dma_mem *mem;
3066         struct be_eq_obj *pbe_eq;
3067         void *cq_vaddress;
3068         int ret = -ENOMEM;
3069         dma_addr_t paddr;
3070
3071         num_cq_pages = PAGES_REQUIRED(phba->params.num_cq_entries *
3072                                       sizeof(struct sol_cqe));
3073
3074         for (i = 0; i < phba->num_cpus; i++) {
3075                 cq = &phwi_context->be_cq[i];
3076                 eq = &phwi_context->be_eq[i].q;
3077                 pbe_eq = &phwi_context->be_eq[i];
3078                 pbe_eq->cq = cq;
3079                 pbe_eq->phba = phba;
3080                 mem = &cq->dma_mem;
3081                 cq_vaddress = dma_alloc_coherent(&phba->pcidev->dev,
3082                                                    num_cq_pages * PAGE_SIZE,
3083                                                    &paddr, GFP_KERNEL);
3084                 if (!cq_vaddress) {
3085                         ret = -ENOMEM;
3086                         goto create_cq_error;
3087                 }
3088
3089                 ret = be_fill_queue(cq, phba->params.num_cq_entries,
3090                                     sizeof(struct sol_cqe), cq_vaddress);
3091                 if (ret) {
3092                         beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3093                                     "BM_%d : be_fill_queue Failed for ISCSI CQ\n");
3094                         goto create_cq_error;
3095                 }
3096
3097                 mem->dma = paddr;
3098                 ret = beiscsi_cmd_cq_create(&phba->ctrl, cq, eq, false,
3099                                             false, 0);
3100                 if (ret) {
3101                         beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3102                                     "BM_%d : beiscsi_cmd_eq_create Failed for ISCSI CQ\n");
3103                         goto create_cq_error;
3104                 }
3105                 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3106                             "BM_%d : iscsi cq_id is %d for eq_id %d\n"
3107                             "iSCSI CQ CREATED\n", cq->id, eq->id);
3108         }
3109         return 0;
3110
3111 create_cq_error:
3112         for (i = 0; i < phba->num_cpus; i++) {
3113                 cq = &phwi_context->be_cq[i];
3114                 mem = &cq->dma_mem;
3115                 if (mem->va)
3116                         dma_free_coherent(&phba->pcidev->dev, num_cq_pages
3117                                             * PAGE_SIZE,
3118                                             mem->va, mem->dma);
3119         }
3120         return ret;
3121 }
3122
3123 static int
3124 beiscsi_create_def_hdr(struct beiscsi_hba *phba,
3125                        struct hwi_context_memory *phwi_context,
3126                        struct hwi_controller *phwi_ctrlr,
3127                        unsigned int def_pdu_ring_sz, uint8_t ulp_num)
3128 {
3129         unsigned int idx;
3130         int ret;
3131         struct be_queue_info *dq, *cq;
3132         struct be_dma_mem *mem;
3133         struct be_mem_descriptor *mem_descr;
3134         void *dq_vaddress;
3135
3136         idx = 0;
3137         dq = &phwi_context->be_def_hdrq[ulp_num];
3138         cq = &phwi_context->be_cq[0];
3139         mem = &dq->dma_mem;
3140         mem_descr = phba->init_mem;
3141         mem_descr += HWI_MEM_ASYNC_HEADER_RING_ULP0 +
3142                     (ulp_num * MEM_DESCR_OFFSET);
3143         dq_vaddress = mem_descr->mem_array[idx].virtual_address;
3144         ret = be_fill_queue(dq, mem_descr->mem_array[0].size /
3145                             sizeof(struct phys_addr),
3146                             sizeof(struct phys_addr), dq_vaddress);
3147         if (ret) {
3148                 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3149                             "BM_%d : be_fill_queue Failed for DEF PDU HDR on ULP : %d\n",
3150                             ulp_num);
3151
3152                 return ret;
3153         }
3154         mem->dma = (unsigned long)mem_descr->mem_array[idx].
3155                                   bus_address.u.a64.address;
3156         ret = be_cmd_create_default_pdu_queue(&phba->ctrl, cq, dq,
3157                                               def_pdu_ring_sz,
3158                                               phba->params.defpdu_hdr_sz,
3159                                               BEISCSI_DEFQ_HDR, ulp_num);
3160         if (ret) {
3161                 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3162                             "BM_%d : be_cmd_create_default_pdu_queue Failed DEFHDR on ULP : %d\n",
3163                             ulp_num);
3164
3165                 return ret;
3166         }
3167
3168         beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3169                     "BM_%d : iscsi hdr def pdu id for ULP : %d is %d\n",
3170                     ulp_num,
3171                     phwi_context->be_def_hdrq[ulp_num].id);
3172         return 0;
3173 }
3174
3175 static int
3176 beiscsi_create_def_data(struct beiscsi_hba *phba,
3177                         struct hwi_context_memory *phwi_context,
3178                         struct hwi_controller *phwi_ctrlr,
3179                         unsigned int def_pdu_ring_sz, uint8_t ulp_num)
3180 {
3181         unsigned int idx;
3182         int ret;
3183         struct be_queue_info *dataq, *cq;
3184         struct be_dma_mem *mem;
3185         struct be_mem_descriptor *mem_descr;
3186         void *dq_vaddress;
3187
3188         idx = 0;
3189         dataq = &phwi_context->be_def_dataq[ulp_num];
3190         cq = &phwi_context->be_cq[0];
3191         mem = &dataq->dma_mem;
3192         mem_descr = phba->init_mem;
3193         mem_descr += HWI_MEM_ASYNC_DATA_RING_ULP0 +
3194                     (ulp_num * MEM_DESCR_OFFSET);
3195         dq_vaddress = mem_descr->mem_array[idx].virtual_address;
3196         ret = be_fill_queue(dataq, mem_descr->mem_array[0].size /
3197                             sizeof(struct phys_addr),
3198                             sizeof(struct phys_addr), dq_vaddress);
3199         if (ret) {
3200                 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3201                             "BM_%d : be_fill_queue Failed for DEF PDU "
3202                             "DATA on ULP : %d\n",
3203                             ulp_num);
3204
3205                 return ret;
3206         }
3207         mem->dma = (unsigned long)mem_descr->mem_array[idx].
3208                                   bus_address.u.a64.address;
3209         ret = be_cmd_create_default_pdu_queue(&phba->ctrl, cq, dataq,
3210                                               def_pdu_ring_sz,
3211                                               phba->params.defpdu_data_sz,
3212                                               BEISCSI_DEFQ_DATA, ulp_num);
3213         if (ret) {
3214                 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3215                             "BM_%d be_cmd_create_default_pdu_queue"
3216                             " Failed for DEF PDU DATA on ULP : %d\n",
3217                             ulp_num);
3218                 return ret;
3219         }
3220
3221         beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3222                     "BM_%d : iscsi def data id on ULP : %d is  %d\n",
3223                     ulp_num,
3224                     phwi_context->be_def_dataq[ulp_num].id);
3225
3226         beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3227                     "BM_%d : DEFAULT PDU DATA RING CREATED on ULP : %d\n",
3228                     ulp_num);
3229         return 0;
3230 }
3231
3232
3233 static int
3234 beiscsi_post_template_hdr(struct beiscsi_hba *phba)
3235 {
3236         struct be_mem_descriptor *mem_descr;
3237         struct mem_array *pm_arr;
3238         struct be_dma_mem sgl;
3239         int status, ulp_num;
3240
3241         for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
3242                 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) {
3243                         mem_descr = (struct be_mem_descriptor *)phba->init_mem;
3244                         mem_descr += HWI_MEM_TEMPLATE_HDR_ULP0 +
3245                                     (ulp_num * MEM_DESCR_OFFSET);
3246                         pm_arr = mem_descr->mem_array;
3247
3248                         hwi_build_be_sgl_arr(phba, pm_arr, &sgl);
3249                         status = be_cmd_iscsi_post_template_hdr(
3250                                  &phba->ctrl, &sgl);
3251
3252                         if (status != 0) {
3253                                 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3254                                             "BM_%d : Post Template HDR Failed for "
3255                                             "ULP_%d\n", ulp_num);
3256                                 return status;
3257                         }
3258
3259                         beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3260                                     "BM_%d : Template HDR Pages Posted for "
3261                                     "ULP_%d\n", ulp_num);
3262                 }
3263         }
3264         return 0;
3265 }
3266
3267 static int
3268 beiscsi_post_pages(struct beiscsi_hba *phba)
3269 {
3270         struct be_mem_descriptor *mem_descr;
3271         struct mem_array *pm_arr;
3272         unsigned int page_offset, i;
3273         struct be_dma_mem sgl;
3274         int status, ulp_num = 0;
3275
3276         mem_descr = phba->init_mem;
3277         mem_descr += HWI_MEM_SGE;
3278         pm_arr = mem_descr->mem_array;
3279
3280         for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++)
3281                 if (test_bit(ulp_num, &phba->fw_config.ulp_supported))
3282                         break;
3283
3284         page_offset = (sizeof(struct iscsi_sge) * phba->params.num_sge_per_io *
3285                         phba->fw_config.iscsi_icd_start[ulp_num]) / PAGE_SIZE;
3286         for (i = 0; i < mem_descr->num_elements; i++) {
3287                 hwi_build_be_sgl_arr(phba, pm_arr, &sgl);
3288                 status = be_cmd_iscsi_post_sgl_pages(&phba->ctrl, &sgl,
3289                                                 page_offset,
3290                                                 (pm_arr->size / PAGE_SIZE));
3291                 page_offset += pm_arr->size / PAGE_SIZE;
3292                 if (status != 0) {
3293                         beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3294                                     "BM_%d : post sgl failed.\n");
3295                         return status;
3296                 }
3297                 pm_arr++;
3298         }
3299         beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3300                     "BM_%d : POSTED PAGES\n");
3301         return 0;
3302 }
3303
3304 static void be_queue_free(struct beiscsi_hba *phba, struct be_queue_info *q)
3305 {
3306         struct be_dma_mem *mem = &q->dma_mem;
3307         if (mem->va) {
3308                 dma_free_coherent(&phba->pcidev->dev, mem->size,
3309                         mem->va, mem->dma);
3310                 mem->va = NULL;
3311         }
3312 }
3313
3314 static int be_queue_alloc(struct beiscsi_hba *phba, struct be_queue_info *q,
3315                 u16 len, u16 entry_size)
3316 {
3317         struct be_dma_mem *mem = &q->dma_mem;
3318
3319         memset(q, 0, sizeof(*q));
3320         q->len = len;
3321         q->entry_size = entry_size;
3322         mem->size = len * entry_size;
3323         mem->va = dma_alloc_coherent(&phba->pcidev->dev, mem->size, &mem->dma,
3324                                      GFP_KERNEL);
3325         if (!mem->va)
3326                 return -ENOMEM;
3327         return 0;
3328 }
3329
3330 static int
3331 beiscsi_create_wrb_rings(struct beiscsi_hba *phba,
3332                          struct hwi_context_memory *phwi_context,
3333                          struct hwi_controller *phwi_ctrlr)
3334 {
3335         unsigned int num_wrb_rings;
3336         u64 pa_addr_lo;
3337         unsigned int idx, num, i, ulp_num;
3338         struct mem_array *pwrb_arr;
3339         void *wrb_vaddr;
3340         struct be_dma_mem sgl;
3341         struct be_mem_descriptor *mem_descr;
3342         struct hwi_wrb_context *pwrb_context;
3343         int status;
3344         uint8_t ulp_count = 0, ulp_base_num = 0;
3345         uint16_t cid_count_ulp[BEISCSI_ULP_COUNT] = { 0 };
3346
3347         idx = 0;
3348         mem_descr = phba->init_mem;
3349         mem_descr += HWI_MEM_WRB;
3350         pwrb_arr = kmalloc_array(phba->params.cxns_per_ctrl,
3351                                  sizeof(*pwrb_arr),
3352                                  GFP_KERNEL);
3353         if (!pwrb_arr) {
3354                 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3355                             "BM_%d : Memory alloc failed in create wrb ring.\n");
3356                 return -ENOMEM;
3357         }
3358         wrb_vaddr = mem_descr->mem_array[idx].virtual_address;
3359         pa_addr_lo = mem_descr->mem_array[idx].bus_address.u.a64.address;
3360         num_wrb_rings = mem_descr->mem_array[idx].size /
3361                 (phba->params.wrbs_per_cxn * sizeof(struct iscsi_wrb));
3362
3363         for (num = 0; num < phba->params.cxns_per_ctrl; num++) {
3364                 if (num_wrb_rings) {
3365                         pwrb_arr[num].virtual_address = wrb_vaddr;
3366                         pwrb_arr[num].bus_address.u.a64.address = pa_addr_lo;
3367                         pwrb_arr[num].size = phba->params.wrbs_per_cxn *
3368                                             sizeof(struct iscsi_wrb);
3369                         wrb_vaddr += pwrb_arr[num].size;
3370                         pa_addr_lo += pwrb_arr[num].size;
3371                         num_wrb_rings--;
3372                 } else {
3373                         idx++;
3374                         wrb_vaddr = mem_descr->mem_array[idx].virtual_address;
3375                         pa_addr_lo = mem_descr->mem_array[idx].
3376                                         bus_address.u.a64.address;
3377                         num_wrb_rings = mem_descr->mem_array[idx].size /
3378                                         (phba->params.wrbs_per_cxn *
3379                                         sizeof(struct iscsi_wrb));
3380                         pwrb_arr[num].virtual_address = wrb_vaddr;
3381                         pwrb_arr[num].bus_address.u.a64.address = pa_addr_lo;
3382                         pwrb_arr[num].size = phba->params.wrbs_per_cxn *
3383                                                  sizeof(struct iscsi_wrb);
3384                         wrb_vaddr += pwrb_arr[num].size;
3385                         pa_addr_lo += pwrb_arr[num].size;
3386                         num_wrb_rings--;
3387                 }
3388         }
3389
3390         /* Get the ULP Count */
3391         for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++)
3392                 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) {
3393                         ulp_count++;
3394                         ulp_base_num = ulp_num;
3395                         cid_count_ulp[ulp_num] =
3396                                 BEISCSI_GET_CID_COUNT(phba, ulp_num);
3397                 }
3398
3399         for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
3400                 if (ulp_count > 1) {
3401                         ulp_base_num = (ulp_base_num + 1) % BEISCSI_ULP_COUNT;
3402
3403                         if (!cid_count_ulp[ulp_base_num])
3404                                 ulp_base_num = (ulp_base_num + 1) %
3405                                                 BEISCSI_ULP_COUNT;
3406
3407                         cid_count_ulp[ulp_base_num]--;
3408                 }
3409
3410
3411                 hwi_build_be_sgl_by_offset(phba, &pwrb_arr[i], &sgl);
3412                 status = be_cmd_wrbq_create(&phba->ctrl, &sgl,
3413                                             &phwi_context->be_wrbq[i],
3414                                             &phwi_ctrlr->wrb_context[i],
3415                                             ulp_base_num);
3416                 if (status != 0) {
3417                         beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3418                                     "BM_%d : wrbq create failed.");
3419                         kfree(pwrb_arr);
3420                         return status;
3421                 }
3422                 pwrb_context = &phwi_ctrlr->wrb_context[i];
3423                 BE_SET_CID_TO_CRI(i, pwrb_context->cid);
3424         }
3425         kfree(pwrb_arr);
3426         return 0;
3427 }
3428
3429 static void free_wrb_handles(struct beiscsi_hba *phba)
3430 {
3431         unsigned int index;
3432         struct hwi_controller *phwi_ctrlr;
3433         struct hwi_wrb_context *pwrb_context;
3434
3435         phwi_ctrlr = phba->phwi_ctrlr;
3436         for (index = 0; index < phba->params.cxns_per_ctrl; index++) {
3437                 pwrb_context = &phwi_ctrlr->wrb_context[index];
3438                 kfree(pwrb_context->pwrb_handle_base);
3439                 kfree(pwrb_context->pwrb_handle_basestd);
3440         }
3441 }
3442
3443 static void be_mcc_queues_destroy(struct beiscsi_hba *phba)
3444 {
3445         struct be_ctrl_info *ctrl = &phba->ctrl;
3446         struct be_dma_mem *ptag_mem;
3447         struct be_queue_info *q;
3448         int i, tag;
3449
3450         q = &phba->ctrl.mcc_obj.q;
3451         for (i = 0; i < MAX_MCC_CMD; i++) {
3452                 tag = i + 1;
3453                 if (!test_bit(MCC_TAG_STATE_RUNNING,
3454                               &ctrl->ptag_state[tag].tag_state))
3455                         continue;
3456
3457                 if (test_bit(MCC_TAG_STATE_TIMEOUT,
3458                              &ctrl->ptag_state[tag].tag_state)) {
3459                         ptag_mem = &ctrl->ptag_state[tag].tag_mem_state;
3460                         if (ptag_mem->size) {
3461                                 dma_free_coherent(&ctrl->pdev->dev,
3462                                                     ptag_mem->size,
3463                                                     ptag_mem->va,
3464                                                     ptag_mem->dma);
3465                                 ptag_mem->size = 0;
3466                         }
3467                         continue;
3468                 }
3469                 /**
3470                  * If MCC is still active and waiting then wake up the process.
3471                  * We are here only because port is going offline. The process
3472                  * sees that (BEISCSI_HBA_ONLINE is cleared) and EIO error is
3473                  * returned for the operation and allocated memory cleaned up.
3474                  */
3475                 if (waitqueue_active(&ctrl->mcc_wait[tag])) {
3476                         ctrl->mcc_tag_status[tag] = MCC_STATUS_FAILED;
3477                         ctrl->mcc_tag_status[tag] |= CQE_VALID_MASK;
3478                         wake_up_interruptible(&ctrl->mcc_wait[tag]);
3479                         /*
3480                          * Control tag info gets reinitialized in enable
3481                          * so wait for the process to clear running state.
3482                          */
3483                         while (test_bit(MCC_TAG_STATE_RUNNING,
3484                                         &ctrl->ptag_state[tag].tag_state))
3485                                 schedule_timeout_uninterruptible(HZ);
3486                 }
3487                 /**
3488                  * For MCC with tag_states MCC_TAG_STATE_ASYNC and
3489                  * MCC_TAG_STATE_IGNORE nothing needs to done.
3490                  */
3491         }
3492         if (q->created) {
3493                 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_MCCQ);
3494                 be_queue_free(phba, q);
3495         }
3496
3497         q = &phba->ctrl.mcc_obj.cq;
3498         if (q->created) {
3499                 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ);
3500                 be_queue_free(phba, q);
3501         }
3502 }
3503
3504 static int be_mcc_queues_create(struct beiscsi_hba *phba,
3505                                 struct hwi_context_memory *phwi_context)
3506 {
3507         struct be_queue_info *q, *cq;
3508         struct be_ctrl_info *ctrl = &phba->ctrl;
3509
3510         /* Alloc MCC compl queue */
3511         cq = &phba->ctrl.mcc_obj.cq;
3512         if (be_queue_alloc(phba, cq, MCC_CQ_LEN,
3513                         sizeof(struct be_mcc_compl)))
3514                 goto err;
3515         /* Ask BE to create MCC compl queue; */
3516         if (phba->pcidev->msix_enabled) {
3517                 if (beiscsi_cmd_cq_create(ctrl, cq,
3518                                         &phwi_context->be_eq[phba->num_cpus].q,
3519                                         false, true, 0))
3520                         goto mcc_cq_free;
3521         } else {
3522                 if (beiscsi_cmd_cq_create(ctrl, cq, &phwi_context->be_eq[0].q,
3523                                           false, true, 0))
3524                         goto mcc_cq_free;
3525         }
3526
3527         /* Alloc MCC queue */
3528         q = &phba->ctrl.mcc_obj.q;
3529         if (be_queue_alloc(phba, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
3530                 goto mcc_cq_destroy;
3531
3532         /* Ask BE to create MCC queue */
3533         if (beiscsi_cmd_mccq_create(phba, q, cq))
3534                 goto mcc_q_free;
3535
3536         return 0;
3537
3538 mcc_q_free:
3539         be_queue_free(phba, q);
3540 mcc_cq_destroy:
3541         beiscsi_cmd_q_destroy(ctrl, cq, QTYPE_CQ);
3542 mcc_cq_free:
3543         be_queue_free(phba, cq);
3544 err:
3545         return -ENOMEM;
3546 }
3547
3548 static void be2iscsi_enable_msix(struct beiscsi_hba *phba)
3549 {
3550         int nvec = 1;
3551
3552         switch (phba->generation) {
3553         case BE_GEN2:
3554         case BE_GEN3:
3555                 nvec = BEISCSI_MAX_NUM_CPUS + 1;
3556                 break;
3557         case BE_GEN4:
3558                 nvec = phba->fw_config.eqid_count;
3559                 break;
3560         default:
3561                 nvec = 2;
3562                 break;
3563         }
3564
3565         /* if eqid_count == 1 fall back to INTX */
3566         if (enable_msix && nvec > 1) {
3567                 struct irq_affinity desc = { .post_vectors = 1 };
3568
3569                 if (pci_alloc_irq_vectors_affinity(phba->pcidev, 2, nvec,
3570                                 PCI_IRQ_MSIX | PCI_IRQ_AFFINITY, &desc) < 0) {
3571                         phba->num_cpus = nvec - 1;
3572                         return;
3573                 }
3574         }
3575
3576         phba->num_cpus = 1;
3577 }
3578
3579 static void hwi_purge_eq(struct beiscsi_hba *phba)
3580 {
3581         struct hwi_controller *phwi_ctrlr;
3582         struct hwi_context_memory *phwi_context;
3583         struct be_queue_info *eq;
3584         struct be_eq_entry *eqe = NULL;
3585         int i, eq_msix;
3586         unsigned int num_processed;
3587
3588         if (beiscsi_hba_in_error(phba))
3589                 return;
3590
3591         phwi_ctrlr = phba->phwi_ctrlr;
3592         phwi_context = phwi_ctrlr->phwi_ctxt;
3593         if (phba->pcidev->msix_enabled)
3594                 eq_msix = 1;
3595         else
3596                 eq_msix = 0;
3597
3598         for (i = 0; i < (phba->num_cpus + eq_msix); i++) {
3599                 eq = &phwi_context->be_eq[i].q;
3600                 eqe = queue_tail_node(eq);
3601                 num_processed = 0;
3602                 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
3603                                         & EQE_VALID_MASK) {
3604                         AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
3605                         queue_tail_inc(eq);
3606                         eqe = queue_tail_node(eq);
3607                         num_processed++;
3608                 }
3609
3610                 if (num_processed)
3611                         hwi_ring_eq_db(phba, eq->id, 1, num_processed, 1, 1);
3612         }
3613 }
3614
3615 static void hwi_cleanup_port(struct beiscsi_hba *phba)
3616 {
3617         struct be_queue_info *q;
3618         struct be_ctrl_info *ctrl = &phba->ctrl;
3619         struct hwi_controller *phwi_ctrlr;
3620         struct hwi_context_memory *phwi_context;
3621         int i, eq_for_mcc, ulp_num;
3622
3623         for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++)
3624                 if (test_bit(ulp_num, &phba->fw_config.ulp_supported))
3625                         beiscsi_cmd_iscsi_cleanup(phba, ulp_num);
3626
3627         /**
3628          * Purge all EQ entries that may have been left out. This is to
3629          * workaround a problem we've seen occasionally where driver gets an
3630          * interrupt with EQ entry bit set after stopping the controller.
3631          */
3632         hwi_purge_eq(phba);
3633
3634         phwi_ctrlr = phba->phwi_ctrlr;
3635         phwi_context = phwi_ctrlr->phwi_ctxt;
3636
3637         be_cmd_iscsi_remove_template_hdr(ctrl);
3638
3639         for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
3640                 q = &phwi_context->be_wrbq[i];
3641                 if (q->created)
3642                         beiscsi_cmd_q_destroy(ctrl, q, QTYPE_WRBQ);
3643         }
3644         kfree(phwi_context->be_wrbq);
3645         free_wrb_handles(phba);
3646
3647         for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
3648                 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) {
3649
3650                         q = &phwi_context->be_def_hdrq[ulp_num];
3651                         if (q->created)
3652                                 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ);
3653
3654                         q = &phwi_context->be_def_dataq[ulp_num];
3655                         if (q->created)
3656                                 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ);
3657                 }
3658         }
3659
3660         beiscsi_cmd_q_destroy(ctrl, NULL, QTYPE_SGL);
3661
3662         for (i = 0; i < (phba->num_cpus); i++) {
3663                 q = &phwi_context->be_cq[i];
3664                 if (q->created) {
3665                         be_queue_free(phba, q);
3666                         beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ);
3667                 }
3668         }
3669
3670         be_mcc_queues_destroy(phba);
3671         if (phba->pcidev->msix_enabled)
3672                 eq_for_mcc = 1;
3673         else
3674                 eq_for_mcc = 0;
3675         for (i = 0; i < (phba->num_cpus + eq_for_mcc); i++) {
3676                 q = &phwi_context->be_eq[i].q;
3677                 if (q->created) {
3678                         be_queue_free(phba, q);
3679                         beiscsi_cmd_q_destroy(ctrl, q, QTYPE_EQ);
3680                 }
3681         }
3682         /* this ensures complete FW cleanup */
3683         beiscsi_cmd_function_reset(phba);
3684         /* last communication, indicate driver is unloading */
3685         beiscsi_cmd_special_wrb(&phba->ctrl, 0);
3686 }
3687
3688 static int hwi_init_port(struct beiscsi_hba *phba)
3689 {
3690         struct hwi_controller *phwi_ctrlr;
3691         struct hwi_context_memory *phwi_context;
3692         unsigned int def_pdu_ring_sz;
3693         struct be_ctrl_info *ctrl = &phba->ctrl;
3694         int status, ulp_num;
3695         u16 nbufs;
3696
3697         phwi_ctrlr = phba->phwi_ctrlr;
3698         phwi_context = phwi_ctrlr->phwi_ctxt;
3699         /* set port optic state to unknown */
3700         phba->optic_state = 0xff;
3701
3702         status = beiscsi_create_eqs(phba, phwi_context);
3703         if (status != 0) {
3704                 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3705                             "BM_%d : EQ not created\n");
3706                 goto error;
3707         }
3708
3709         status = be_mcc_queues_create(phba, phwi_context);
3710         if (status != 0)
3711                 goto error;
3712
3713         status = beiscsi_check_supported_fw(ctrl, phba);
3714         if (status != 0) {
3715                 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3716                             "BM_%d : Unsupported fw version\n");
3717                 goto error;
3718         }
3719
3720         status = beiscsi_create_cqs(phba, phwi_context);
3721         if (status != 0) {
3722                 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3723                             "BM_%d : CQ not created\n");
3724                 goto error;
3725         }
3726
3727         for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
3728                 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) {
3729                         nbufs = phwi_context->pasync_ctx[ulp_num]->num_entries;
3730                         def_pdu_ring_sz = nbufs * sizeof(struct phys_addr);
3731
3732                         status = beiscsi_create_def_hdr(phba, phwi_context,
3733                                                         phwi_ctrlr,
3734                                                         def_pdu_ring_sz,
3735                                                         ulp_num);
3736                         if (status != 0) {
3737                                 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3738                                             "BM_%d : Default Header not created for ULP : %d\n",
3739                                             ulp_num);
3740                                 goto error;
3741                         }
3742
3743                         status = beiscsi_create_def_data(phba, phwi_context,
3744                                                          phwi_ctrlr,
3745                                                          def_pdu_ring_sz,
3746                                                          ulp_num);
3747                         if (status != 0) {
3748                                 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3749                                             "BM_%d : Default Data not created for ULP : %d\n",
3750                                             ulp_num);
3751                                 goto error;
3752                         }
3753                         /**
3754                          * Now that the default PDU rings have been created,
3755                          * let EP know about it.
3756                          */
3757                         beiscsi_hdq_post_handles(phba, BEISCSI_DEFQ_HDR,
3758                                                  ulp_num, nbufs);
3759                         beiscsi_hdq_post_handles(phba, BEISCSI_DEFQ_DATA,
3760                                                  ulp_num, nbufs);
3761                 }
3762         }
3763
3764         status = beiscsi_post_pages(phba);
3765         if (status != 0) {
3766                 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3767                             "BM_%d : Post SGL Pages Failed\n");
3768                 goto error;
3769         }
3770
3771         status = beiscsi_post_template_hdr(phba);
3772         if (status != 0) {
3773                 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3774                             "BM_%d : Template HDR Posting for CXN Failed\n");
3775         }
3776
3777         status = beiscsi_create_wrb_rings(phba, phwi_context, phwi_ctrlr);
3778         if (status != 0) {
3779                 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3780                             "BM_%d : WRB Rings not created\n");
3781                 goto error;
3782         }
3783
3784         for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
3785                 uint16_t async_arr_idx = 0;
3786
3787                 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) {
3788                         uint16_t cri = 0;
3789                         struct hd_async_context *pasync_ctx;
3790
3791                         pasync_ctx = HWI_GET_ASYNC_PDU_CTX(
3792                                      phwi_ctrlr, ulp_num);
3793                         for (cri = 0; cri <
3794                              phba->params.cxns_per_ctrl; cri++) {
3795                                 if (ulp_num == BEISCSI_GET_ULP_FROM_CRI
3796                                                (phwi_ctrlr, cri))
3797                                         pasync_ctx->cid_to_async_cri_map[
3798                                         phwi_ctrlr->wrb_context[cri].cid] =
3799                                         async_arr_idx++;
3800                         }
3801                 }
3802         }
3803
3804         beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3805                     "BM_%d : hwi_init_port success\n");
3806         return 0;
3807
3808 error:
3809         beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3810                     "BM_%d : hwi_init_port failed");
3811         hwi_cleanup_port(phba);
3812         return status;
3813 }
3814
3815 static int hwi_init_controller(struct beiscsi_hba *phba)
3816 {
3817         struct hwi_controller *phwi_ctrlr;
3818
3819         phwi_ctrlr = phba->phwi_ctrlr;
3820         if (1 == phba->init_mem[HWI_MEM_ADDN_CONTEXT].num_elements) {
3821                 phwi_ctrlr->phwi_ctxt = (struct hwi_context_memory *)phba->
3822                     init_mem[HWI_MEM_ADDN_CONTEXT].mem_array[0].virtual_address;
3823                 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3824                             "BM_%d :  phwi_ctrlr->phwi_ctxt=%p\n",
3825                             phwi_ctrlr->phwi_ctxt);
3826         } else {
3827                 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3828                             "BM_%d : HWI_MEM_ADDN_CONTEXT is more "
3829                             "than one element.Failing to load\n");
3830                 return -ENOMEM;
3831         }
3832
3833         iscsi_init_global_templates(phba);
3834         if (beiscsi_init_wrb_handle(phba))
3835                 return -ENOMEM;
3836
3837         if (hwi_init_async_pdu_ctx(phba)) {
3838                 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3839                             "BM_%d : hwi_init_async_pdu_ctx failed\n");
3840                 return -ENOMEM;
3841         }
3842
3843         if (hwi_init_port(phba) != 0) {
3844                 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3845                             "BM_%d : hwi_init_controller failed\n");
3846
3847                 return -ENOMEM;
3848         }
3849         return 0;
3850 }
3851
3852 static void beiscsi_free_mem(struct beiscsi_hba *phba)
3853 {
3854         struct be_mem_descriptor *mem_descr;
3855         int i, j;
3856
3857         mem_descr = phba->init_mem;
3858         for (i = 0; i < SE_MEM_MAX; i++) {
3859                 for (j = mem_descr->num_elements; j > 0; j--) {
3860                         dma_free_coherent(&phba->pcidev->dev,
3861                           mem_descr->mem_array[j - 1].size,
3862                           mem_descr->mem_array[j - 1].virtual_address,
3863                           (unsigned long)mem_descr->mem_array[j - 1].
3864                           bus_address.u.a64.address);
3865                 }
3866
3867                 kfree(mem_descr->mem_array);
3868                 mem_descr++;
3869         }
3870         kfree(phba->init_mem);
3871         kfree(phba->phwi_ctrlr->wrb_context);
3872         kfree(phba->phwi_ctrlr);
3873 }
3874
3875 static int beiscsi_init_sgl_handle(struct beiscsi_hba *phba)
3876 {
3877         struct be_mem_descriptor *mem_descr_sglh, *mem_descr_sg;
3878         struct sgl_handle *psgl_handle;
3879         struct iscsi_sge *pfrag;
3880         unsigned int arr_index, i, idx;
3881         unsigned int ulp_icd_start, ulp_num = 0;
3882
3883         phba->io_sgl_hndl_avbl = 0;
3884         phba->eh_sgl_hndl_avbl = 0;
3885
3886         mem_descr_sglh = phba->init_mem;
3887         mem_descr_sglh += HWI_MEM_SGLH;
3888         if (1 == mem_descr_sglh->num_elements) {
3889                 phba->io_sgl_hndl_base = kcalloc(phba->params.ios_per_ctrl,
3890                                                  sizeof(struct sgl_handle *),
3891                                                  GFP_KERNEL);
3892                 if (!phba->io_sgl_hndl_base) {
3893                         beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3894                                     "BM_%d : Mem Alloc Failed. Failing to load\n");
3895                         return -ENOMEM;
3896                 }
3897                 phba->eh_sgl_hndl_base =
3898                         kcalloc(phba->params.icds_per_ctrl -
3899                                         phba->params.ios_per_ctrl,
3900                                 sizeof(struct sgl_handle *), GFP_KERNEL);
3901                 if (!phba->eh_sgl_hndl_base) {
3902                         kfree(phba->io_sgl_hndl_base);
3903                         beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3904                                     "BM_%d : Mem Alloc Failed. Failing to load\n");
3905                         return -ENOMEM;
3906                 }
3907         } else {
3908                 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3909                             "BM_%d : HWI_MEM_SGLH is more than one element."
3910                             "Failing to load\n");
3911                 return -ENOMEM;
3912         }
3913
3914         arr_index = 0;
3915         idx = 0;
3916         while (idx < mem_descr_sglh->num_elements) {
3917                 psgl_handle = mem_descr_sglh->mem_array[idx].virtual_address;
3918
3919                 for (i = 0; i < (mem_descr_sglh->mem_array[idx].size /
3920                       sizeof(struct sgl_handle)); i++) {
3921                         if (arr_index < phba->params.ios_per_ctrl) {
3922                                 phba->io_sgl_hndl_base[arr_index] = psgl_handle;
3923                                 phba->io_sgl_hndl_avbl++;
3924                                 arr_index++;
3925                         } else {
3926                                 phba->eh_sgl_hndl_base[arr_index -
3927                                         phba->params.ios_per_ctrl] =
3928                                                                 psgl_handle;
3929                                 arr_index++;
3930                                 phba->eh_sgl_hndl_avbl++;
3931                         }
3932                         psgl_handle++;
3933                 }
3934                 idx++;
3935         }
3936         beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3937                     "BM_%d : phba->io_sgl_hndl_avbl=%d "
3938                     "phba->eh_sgl_hndl_avbl=%d\n",
3939                     phba->io_sgl_hndl_avbl,
3940                     phba->eh_sgl_hndl_avbl);
3941
3942         mem_descr_sg = phba->init_mem;
3943         mem_descr_sg += HWI_MEM_SGE;
3944         beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3945                     "\n BM_%d : mem_descr_sg->num_elements=%d\n",
3946                     mem_descr_sg->num_elements);
3947
3948         for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++)
3949                 if (test_bit(ulp_num, &phba->fw_config.ulp_supported))
3950                         break;
3951
3952         ulp_icd_start = phba->fw_config.iscsi_icd_start[ulp_num];
3953
3954         arr_index = 0;
3955         idx = 0;
3956         while (idx < mem_descr_sg->num_elements) {
3957                 pfrag = mem_descr_sg->mem_array[idx].virtual_address;
3958
3959                 for (i = 0;
3960                      i < (mem_descr_sg->mem_array[idx].size) /
3961                      (sizeof(struct iscsi_sge) * phba->params.num_sge_per_io);
3962                      i++) {
3963                         if (arr_index < phba->params.ios_per_ctrl)
3964                                 psgl_handle = phba->io_sgl_hndl_base[arr_index];
3965                         else
3966                                 psgl_handle = phba->eh_sgl_hndl_base[arr_index -
3967                                                 phba->params.ios_per_ctrl];
3968                         psgl_handle->pfrag = pfrag;
3969                         AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, pfrag, 0);
3970                         AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, pfrag, 0);
3971                         pfrag += phba->params.num_sge_per_io;
3972                         psgl_handle->sgl_index = ulp_icd_start + arr_index++;
3973                 }
3974                 idx++;
3975         }
3976         phba->io_sgl_free_index = 0;
3977         phba->io_sgl_alloc_index = 0;
3978         phba->eh_sgl_free_index = 0;
3979         phba->eh_sgl_alloc_index = 0;
3980         return 0;
3981 }
3982
3983 static int hba_setup_cid_tbls(struct beiscsi_hba *phba)
3984 {
3985         int ret;
3986         uint16_t i, ulp_num;
3987         struct ulp_cid_info *ptr_cid_info = NULL;
3988
3989         for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
3990                 if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) {
3991                         ptr_cid_info = kzalloc(sizeof(struct ulp_cid_info),
3992                                                GFP_KERNEL);
3993
3994                         if (!ptr_cid_info) {
3995                                 ret = -ENOMEM;
3996                                 goto free_memory;
3997                         }
3998
3999                         /* Allocate memory for CID array */
4000                         ptr_cid_info->cid_array =
4001                                 kcalloc(BEISCSI_GET_CID_COUNT(phba, ulp_num),
4002                                         sizeof(*ptr_cid_info->cid_array),
4003                                         GFP_KERNEL);
4004                         if (!ptr_cid_info->cid_array) {
4005                                 kfree(ptr_cid_info);
4006                                 ptr_cid_info = NULL;
4007                                 ret = -ENOMEM;
4008
4009                                 goto free_memory;
4010                         }
4011                         ptr_cid_info->avlbl_cids = BEISCSI_GET_CID_COUNT(
4012                                                    phba, ulp_num);
4013
4014                         /* Save the cid_info_array ptr */
4015                         phba->cid_array_info[ulp_num] = ptr_cid_info;
4016                 }
4017         }
4018         phba->ep_array = kcalloc(phba->params.cxns_per_ctrl,
4019                                  sizeof(struct iscsi_endpoint *),
4020                                  GFP_KERNEL);
4021         if (!phba->ep_array) {
4022                 ret = -ENOMEM;
4023
4024                 goto free_memory;
4025         }
4026
4027         phba->conn_table = kcalloc(phba->params.cxns_per_ctrl,
4028                                    sizeof(struct beiscsi_conn *),
4029                                    GFP_KERNEL);
4030         if (!phba->conn_table) {
4031                 kfree(phba->ep_array);
4032                 phba->ep_array = NULL;
4033                 ret = -ENOMEM;
4034
4035                 goto free_memory;
4036         }
4037
4038         for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
4039                 ulp_num = phba->phwi_ctrlr->wrb_context[i].ulp_num;
4040
4041                 ptr_cid_info = phba->cid_array_info[ulp_num];
4042                 ptr_cid_info->cid_array[ptr_cid_info->cid_alloc++] =
4043                         phba->phwi_ctrlr->wrb_context[i].cid;
4044
4045         }
4046
4047         for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
4048                 if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) {
4049                         ptr_cid_info = phba->cid_array_info[ulp_num];
4050
4051                         ptr_cid_info->cid_alloc = 0;
4052                         ptr_cid_info->cid_free = 0;
4053                 }
4054         }
4055         return 0;
4056
4057 free_memory:
4058         for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
4059                 if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) {
4060                         ptr_cid_info = phba->cid_array_info[ulp_num];
4061
4062                         if (ptr_cid_info) {
4063                                 kfree(ptr_cid_info->cid_array);
4064                                 kfree(ptr_cid_info);
4065                                 phba->cid_array_info[ulp_num] = NULL;
4066                         }
4067                 }
4068         }
4069
4070         return ret;
4071 }
4072
4073 static void hwi_enable_intr(struct beiscsi_hba *phba)
4074 {
4075         struct be_ctrl_info *ctrl = &phba->ctrl;
4076         struct hwi_controller *phwi_ctrlr;
4077         struct hwi_context_memory *phwi_context;
4078         struct be_queue_info *eq;
4079         u8 __iomem *addr;
4080         u32 reg, i;
4081         u32 enabled;
4082
4083         phwi_ctrlr = phba->phwi_ctrlr;
4084         phwi_context = phwi_ctrlr->phwi_ctxt;
4085
4086         addr = (u8 __iomem *) ((u8 __iomem *) ctrl->pcicfg +
4087                         PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET);
4088         reg = ioread32(addr);
4089
4090         enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
4091         if (!enabled) {
4092                 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
4093                 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
4094                             "BM_%d : reg =x%08x addr=%p\n", reg, addr);
4095                 iowrite32(reg, addr);
4096         }
4097
4098         if (!phba->pcidev->msix_enabled) {
4099                 eq = &phwi_context->be_eq[0].q;
4100                 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
4101                             "BM_%d : eq->id=%d\n", eq->id);
4102
4103                 hwi_ring_eq_db(phba, eq->id, 0, 0, 1, 1);
4104         } else {
4105                 for (i = 0; i <= phba->num_cpus; i++) {
4106                         eq = &phwi_context->be_eq[i].q;
4107                         beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
4108                                     "BM_%d : eq->id=%d\n", eq->id);
4109                         hwi_ring_eq_db(phba, eq->id, 0, 0, 1, 1);
4110                 }
4111         }
4112 }
4113
4114 static void hwi_disable_intr(struct beiscsi_hba *phba)
4115 {
4116         struct be_ctrl_info *ctrl = &phba->ctrl;
4117
4118         u8 __iomem *addr = ctrl->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
4119         u32 reg = ioread32(addr);
4120
4121         u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
4122         if (enabled) {
4123                 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
4124                 iowrite32(reg, addr);
4125         } else
4126                 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
4127                             "BM_%d : In hwi_disable_intr, Already Disabled\n");
4128 }
4129
4130 static int beiscsi_init_port(struct beiscsi_hba *phba)
4131 {
4132         int ret;
4133
4134         ret = hwi_init_controller(phba);
4135         if (ret < 0) {
4136                 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
4137                             "BM_%d : init controller failed\n");
4138                 return ret;
4139         }
4140         ret = beiscsi_init_sgl_handle(phba);
4141         if (ret < 0) {
4142                 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
4143                             "BM_%d : init sgl handles failed\n");
4144                 goto cleanup_port;
4145         }
4146
4147         ret = hba_setup_cid_tbls(phba);
4148         if (ret < 0) {
4149                 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
4150                             "BM_%d : setup CID table failed\n");
4151                 kfree(phba->io_sgl_hndl_base);
4152                 kfree(phba->eh_sgl_hndl_base);
4153                 goto cleanup_port;
4154         }
4155         return ret;
4156
4157 cleanup_port:
4158         hwi_cleanup_port(phba);
4159         return ret;
4160 }
4161
4162 static void beiscsi_cleanup_port(struct beiscsi_hba *phba)
4163 {
4164         struct ulp_cid_info *ptr_cid_info = NULL;
4165         int ulp_num;
4166
4167         kfree(phba->io_sgl_hndl_base);
4168         kfree(phba->eh_sgl_hndl_base);
4169         kfree(phba->ep_array);
4170         kfree(phba->conn_table);
4171
4172         for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
4173                 if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) {
4174                         ptr_cid_info = phba->cid_array_info[ulp_num];
4175
4176                         if (ptr_cid_info) {
4177                                 kfree(ptr_cid_info->cid_array);
4178                                 kfree(ptr_cid_info);
4179                                 phba->cid_array_info[ulp_num] = NULL;
4180                         }
4181                 }
4182         }
4183 }
4184
4185 /**
4186  * beiscsi_free_mgmt_task_handles()- Free driver CXN resources
4187  * @beiscsi_conn: ptr to the conn to be cleaned up
4188  * @task: ptr to iscsi_task resource to be freed.
4189  *
4190  * Free driver mgmt resources binded to CXN.
4191  **/
4192 void
4193 beiscsi_free_mgmt_task_handles(struct beiscsi_conn *beiscsi_conn,
4194                                 struct iscsi_task *task)
4195 {
4196         struct beiscsi_io_task *io_task;
4197         struct beiscsi_hba *phba = beiscsi_conn->phba;
4198         struct hwi_wrb_context *pwrb_context;
4199         struct hwi_controller *phwi_ctrlr;
4200         uint16_t cri_index = BE_GET_CRI_FROM_CID(
4201                                 beiscsi_conn->beiscsi_conn_cid);
4202
4203         phwi_ctrlr = phba->phwi_ctrlr;
4204         pwrb_context = &phwi_ctrlr->wrb_context[cri_index];
4205
4206         io_task = task->dd_data;
4207
4208         if (io_task->pwrb_handle) {
4209                 free_wrb_handle(phba, pwrb_context, io_task->pwrb_handle);
4210                 io_task->pwrb_handle = NULL;
4211         }
4212
4213         if (io_task->psgl_handle) {
4214                 free_mgmt_sgl_handle(phba, io_task->psgl_handle);
4215                 io_task->psgl_handle = NULL;
4216         }
4217
4218         if (io_task->mtask_addr) {
4219                 dma_unmap_single(&phba->pcidev->dev,
4220                                  io_task->mtask_addr,
4221                                  io_task->mtask_data_count,
4222                                  DMA_TO_DEVICE);
4223                 io_task->mtask_addr = 0;
4224         }
4225 }
4226
4227 /**
4228  * beiscsi_cleanup_task()- Free driver resources of the task
4229  * @task: ptr to the iscsi task
4230  *
4231  **/
4232 static void beiscsi_cleanup_task(struct iscsi_task *task)
4233 {
4234         struct beiscsi_io_task *io_task = task->dd_data;
4235         struct iscsi_conn *conn = task->conn;
4236         struct beiscsi_conn *beiscsi_conn = conn->dd_data;
4237         struct beiscsi_hba *phba = beiscsi_conn->phba;
4238         struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess;
4239         struct hwi_wrb_context *pwrb_context;
4240         struct hwi_controller *phwi_ctrlr;
4241         uint16_t cri_index = BE_GET_CRI_FROM_CID(
4242                              beiscsi_conn->beiscsi_conn_cid);
4243
4244         phwi_ctrlr = phba->phwi_ctrlr;
4245         pwrb_context = &phwi_ctrlr->wrb_context[cri_index];
4246
4247         if (io_task->cmd_bhs) {
4248                 dma_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs,
4249                               io_task->bhs_pa.u.a64.address);
4250                 io_task->cmd_bhs = NULL;
4251                 task->hdr = NULL;
4252         }
4253
4254         if (task->sc) {
4255                 if (io_task->pwrb_handle) {
4256                         free_wrb_handle(phba, pwrb_context,
4257                                         io_task->pwrb_handle);
4258                         io_task->pwrb_handle = NULL;
4259                 }
4260
4261                 if (io_task->psgl_handle) {
4262                         free_io_sgl_handle(phba, io_task->psgl_handle);
4263                         io_task->psgl_handle = NULL;
4264                 }
4265
4266                 if (io_task->scsi_cmnd) {
4267                         if (io_task->num_sg)
4268                                 scsi_dma_unmap(io_task->scsi_cmnd);
4269                         io_task->scsi_cmnd = NULL;
4270                 }
4271         } else {
4272                 if (!beiscsi_conn->login_in_progress)
4273                         beiscsi_free_mgmt_task_handles(beiscsi_conn, task);
4274         }
4275 }
4276
4277 void
4278 beiscsi_offload_connection(struct beiscsi_conn *beiscsi_conn,
4279                            struct beiscsi_offload_params *params)
4280 {
4281         struct wrb_handle *pwrb_handle;
4282         struct hwi_wrb_context *pwrb_context = NULL;
4283         struct beiscsi_hba *phba = beiscsi_conn->phba;
4284         struct iscsi_task *task = beiscsi_conn->task;
4285         struct iscsi_session *session = task->conn->session;
4286         u32 doorbell = 0;
4287
4288         /*
4289          * We can always use 0 here because it is reserved by libiscsi for
4290          * login/startup related tasks.
4291          */
4292         beiscsi_conn->login_in_progress = 0;
4293         spin_lock_bh(&session->back_lock);
4294         beiscsi_cleanup_task(task);
4295         spin_unlock_bh(&session->back_lock);
4296
4297         pwrb_handle = alloc_wrb_handle(phba, beiscsi_conn->beiscsi_conn_cid,
4298                                        &pwrb_context);
4299
4300         /* Check for the adapter family */
4301         if (is_chip_be2_be3r(phba))
4302                 beiscsi_offload_cxn_v0(params, pwrb_handle,
4303                                        phba->init_mem,
4304                                        pwrb_context);
4305         else
4306                 beiscsi_offload_cxn_v2(params, pwrb_handle,
4307                                        pwrb_context);
4308
4309         be_dws_le_to_cpu(pwrb_handle->pwrb,
4310                          sizeof(struct iscsi_target_context_update_wrb));
4311
4312         doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK;
4313         doorbell |= (pwrb_handle->wrb_index & DB_DEF_PDU_WRB_INDEX_MASK)
4314                              << DB_DEF_PDU_WRB_INDEX_SHIFT;
4315         doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
4316         iowrite32(doorbell, phba->db_va +
4317                   beiscsi_conn->doorbell_offset);
4318
4319         /*
4320          * There is no completion for CONTEXT_UPDATE. The completion of next
4321          * WRB posted guarantees FW's processing and DMA'ing of it.
4322          * Use beiscsi_put_wrb_handle to put it back in the pool which makes
4323          * sure zero'ing or reuse of the WRB only after wrbs_per_cxn.
4324          */
4325         beiscsi_put_wrb_handle(pwrb_context, pwrb_handle,
4326                                phba->params.wrbs_per_cxn);
4327         beiscsi_log(phba, KERN_INFO,
4328                     BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
4329                     "BM_%d : put CONTEXT_UPDATE pwrb_handle=%p free_index=0x%x wrb_handles_available=%d\n",
4330                     pwrb_handle, pwrb_context->free_index,
4331                     pwrb_context->wrb_handles_available);
4332 }
4333
4334 static void beiscsi_parse_pdu(struct iscsi_conn *conn, itt_t itt,
4335                               int *index, int *age)
4336 {
4337         *index = (int)itt;
4338         if (age)
4339                 *age = conn->session->age;
4340 }
4341
4342 /**
4343  * beiscsi_alloc_pdu - allocates pdu and related resources
4344  * @task: libiscsi task
4345  * @opcode: opcode of pdu for task
4346  *
4347  * This is called with the session lock held. It will allocate
4348  * the wrb and sgl if needed for the command. And it will prep
4349  * the pdu's itt. beiscsi_parse_pdu will later translate
4350  * the pdu itt to the libiscsi task itt.
4351  */
4352 static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
4353 {
4354         struct beiscsi_io_task *io_task = task->dd_data;
4355         struct iscsi_conn *conn = task->conn;
4356         struct beiscsi_conn *beiscsi_conn = conn->dd_data;
4357         struct beiscsi_hba *phba = beiscsi_conn->phba;
4358         struct hwi_wrb_context *pwrb_context;
4359         struct hwi_controller *phwi_ctrlr;
4360         itt_t itt;
4361         uint16_t cri_index = 0;
4362         struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess;
4363         dma_addr_t paddr;
4364
4365         io_task->cmd_bhs = dma_pool_alloc(beiscsi_sess->bhs_pool,
4366                                           GFP_ATOMIC, &paddr);
4367         if (!io_task->cmd_bhs)
4368                 return -ENOMEM;
4369         io_task->bhs_pa.u.a64.address = paddr;
4370         io_task->libiscsi_itt = (itt_t)task->itt;
4371         io_task->conn = beiscsi_conn;
4372
4373         task->hdr = (struct iscsi_hdr *)&io_task->cmd_bhs->iscsi_hdr;
4374         task->hdr_max = sizeof(struct be_cmd_bhs);
4375         io_task->psgl_handle = NULL;
4376         io_task->pwrb_handle = NULL;
4377
4378         if (task->sc) {
4379                 io_task->psgl_handle = alloc_io_sgl_handle(phba);
4380                 if (!io_task->psgl_handle) {
4381                         beiscsi_log(phba, KERN_ERR,
4382                                     BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
4383                                     "BM_%d : Alloc of IO_SGL_ICD Failed "
4384                                     "for the CID : %d\n",
4385                                     beiscsi_conn->beiscsi_conn_cid);
4386                         goto free_hndls;
4387                 }
4388                 io_task->pwrb_handle = alloc_wrb_handle(phba,
4389                                         beiscsi_conn->beiscsi_conn_cid,
4390                                         &io_task->pwrb_context);
4391                 if (!io_task->pwrb_handle) {
4392                         beiscsi_log(phba, KERN_ERR,
4393                                     BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
4394                                     "BM_%d : Alloc of WRB_HANDLE Failed "
4395                                     "for the CID : %d\n",
4396                                     beiscsi_conn->beiscsi_conn_cid);
4397                         goto free_io_hndls;
4398                 }
4399         } else {
4400                 io_task->scsi_cmnd = NULL;
4401                 if ((opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGIN) {
4402                         beiscsi_conn->task = task;
4403                         if (!beiscsi_conn->login_in_progress) {
4404                                 io_task->psgl_handle = (struct sgl_handle *)
4405                                                 alloc_mgmt_sgl_handle(phba);
4406                                 if (!io_task->psgl_handle) {
4407                                         beiscsi_log(phba, KERN_ERR,
4408                                                     BEISCSI_LOG_IO |
4409                                                     BEISCSI_LOG_CONFIG,
4410                                                     "BM_%d : Alloc of MGMT_SGL_ICD Failed "
4411                                                     "for the CID : %d\n",
4412                                                     beiscsi_conn->beiscsi_conn_cid);
4413                                         goto free_hndls;
4414                                 }
4415
4416                                 beiscsi_conn->login_in_progress = 1;
4417                                 beiscsi_conn->plogin_sgl_handle =
4418                                                         io_task->psgl_handle;
4419                                 io_task->pwrb_handle =
4420                                         alloc_wrb_handle(phba,
4421                                         beiscsi_conn->beiscsi_conn_cid,
4422                                         &io_task->pwrb_context);
4423                                 if (!io_task->pwrb_handle) {
4424                                         beiscsi_log(phba, KERN_ERR,
4425                                                     BEISCSI_LOG_IO |
4426                                                     BEISCSI_LOG_CONFIG,
4427                                                     "BM_%d : Alloc of WRB_HANDLE Failed "
4428                                                     "for the CID : %d\n",
4429                                                     beiscsi_conn->beiscsi_conn_cid);
4430                                         goto free_mgmt_hndls;
4431                                 }
4432                                 beiscsi_conn->plogin_wrb_handle =
4433                                                         io_task->pwrb_handle;
4434
4435                         } else {
4436                                 io_task->psgl_handle =
4437                                                 beiscsi_conn->plogin_sgl_handle;
4438                                 io_task->pwrb_handle =
4439                                                 beiscsi_conn->plogin_wrb_handle;
4440                         }
4441                 } else {
4442                         io_task->psgl_handle = alloc_mgmt_sgl_handle(phba);
4443                         if (!io_task->psgl_handle) {
4444                                 beiscsi_log(phba, KERN_ERR,
4445                                             BEISCSI_LOG_IO |
4446                                             BEISCSI_LOG_CONFIG,
4447                                             "BM_%d : Alloc of MGMT_SGL_ICD Failed "
4448                                             "for the CID : %d\n",
4449                                             beiscsi_conn->beiscsi_conn_cid);
4450                                 goto free_hndls;
4451                         }
4452                         io_task->pwrb_handle =
4453                                         alloc_wrb_handle(phba,
4454                                         beiscsi_conn->beiscsi_conn_cid,
4455                                         &io_task->pwrb_context);
4456                         if (!io_task->pwrb_handle) {
4457                                 beiscsi_log(phba, KERN_ERR,
4458                                             BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
4459                                             "BM_%d : Alloc of WRB_HANDLE Failed "
4460                                             "for the CID : %d\n",
4461                                             beiscsi_conn->beiscsi_conn_cid);
4462                                 goto free_mgmt_hndls;
4463                         }
4464
4465                 }
4466         }
4467         itt = (itt_t) cpu_to_be32(((unsigned int)io_task->pwrb_handle->
4468                                  wrb_index << 16) | (unsigned int)
4469                                 (io_task->psgl_handle->sgl_index));
4470         io_task->pwrb_handle->pio_handle = task;
4471
4472         io_task->cmd_bhs->iscsi_hdr.itt = itt;
4473         return 0;
4474
4475 free_io_hndls:
4476         free_io_sgl_handle(phba, io_task->psgl_handle);
4477         goto free_hndls;
4478 free_mgmt_hndls:
4479         free_mgmt_sgl_handle(phba, io_task->psgl_handle);
4480         io_task->psgl_handle = NULL;
4481 free_hndls:
4482         phwi_ctrlr = phba->phwi_ctrlr;
4483         cri_index = BE_GET_CRI_FROM_CID(
4484         beiscsi_conn->beiscsi_conn_cid);
4485         pwrb_context = &phwi_ctrlr->wrb_context[cri_index];
4486         if (io_task->pwrb_handle)
4487                 free_wrb_handle(phba, pwrb_context, io_task->pwrb_handle);
4488         io_task->pwrb_handle = NULL;
4489         dma_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs,
4490                       io_task->bhs_pa.u.a64.address);
4491         io_task->cmd_bhs = NULL;
4492         return -ENOMEM;
4493 }
4494 static int beiscsi_iotask_v2(struct iscsi_task *task, struct scatterlist *sg,
4495                        unsigned int num_sg, unsigned int xferlen,
4496                        unsigned int writedir)
4497 {
4498
4499         struct beiscsi_io_task *io_task = task->dd_data;
4500         struct iscsi_conn *conn = task->conn;
4501         struct beiscsi_conn *beiscsi_conn = conn->dd_data;
4502         struct beiscsi_hba *phba = beiscsi_conn->phba;
4503         struct iscsi_wrb *pwrb = NULL;
4504         unsigned int doorbell = 0;
4505
4506         pwrb = io_task->pwrb_handle->pwrb;
4507
4508         io_task->bhs_len = sizeof(struct be_cmd_bhs);
4509
4510         if (writedir) {
4511                 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, type, pwrb,
4512                               INI_WR_CMD);
4513                 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, dsp, pwrb, 1);
4514         } else {
4515                 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, type, pwrb,
4516                               INI_RD_CMD);
4517                 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, dsp, pwrb, 0);
4518         }
4519
4520         io_task->wrb_type = AMAP_GET_BITS(struct amap_iscsi_wrb_v2,
4521                                           type, pwrb);
4522
4523         AMAP_SET_BITS(struct amap_iscsi_wrb_v2, lun, pwrb,
4524                       cpu_to_be16(*(unsigned short *)
4525                       &io_task->cmd_bhs->iscsi_hdr.lun));
4526         AMAP_SET_BITS(struct amap_iscsi_wrb_v2, r2t_exp_dtl, pwrb, xferlen);
4527         AMAP_SET_BITS(struct amap_iscsi_wrb_v2, wrb_idx, pwrb,
4528                       io_task->pwrb_handle->wrb_index);
4529         AMAP_SET_BITS(struct amap_iscsi_wrb_v2, cmdsn_itt, pwrb,
4530                       be32_to_cpu(task->cmdsn));
4531         AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sgl_idx, pwrb,
4532                       io_task->psgl_handle->sgl_index);
4533
4534         hwi_write_sgl_v2(pwrb, sg, num_sg, io_task);
4535         AMAP_SET_BITS(struct amap_iscsi_wrb_v2, ptr2nextwrb, pwrb,
4536                       io_task->pwrb_handle->wrb_index);
4537         if (io_task->pwrb_context->plast_wrb)
4538                 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, ptr2nextwrb,
4539                               io_task->pwrb_context->plast_wrb,
4540                               io_task->pwrb_handle->wrb_index);
4541         io_task->pwrb_context->plast_wrb = pwrb;
4542
4543         be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_wrb));
4544
4545         doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK;
4546         doorbell |= (io_task->pwrb_handle->wrb_index &
4547                      DB_DEF_PDU_WRB_INDEX_MASK) <<
4548                      DB_DEF_PDU_WRB_INDEX_SHIFT;
4549         doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
4550         iowrite32(doorbell, phba->db_va +
4551                   beiscsi_conn->doorbell_offset);
4552         return 0;
4553 }
4554
4555 static int beiscsi_iotask(struct iscsi_task *task, struct scatterlist *sg,
4556                           unsigned int num_sg, unsigned int xferlen,
4557                           unsigned int writedir)
4558 {
4559
4560         struct beiscsi_io_task *io_task = task->dd_data;
4561         struct iscsi_conn *conn = task->conn;
4562         struct beiscsi_conn *beiscsi_conn = conn->dd_data;
4563         struct beiscsi_hba *phba = beiscsi_conn->phba;
4564         struct iscsi_wrb *pwrb = NULL;
4565         unsigned int doorbell = 0;
4566
4567         pwrb = io_task->pwrb_handle->pwrb;
4568         io_task->bhs_len = sizeof(struct be_cmd_bhs);
4569
4570         if (writedir) {
4571                 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
4572                               INI_WR_CMD);
4573                 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 1);
4574         } else {
4575                 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
4576                               INI_RD_CMD);
4577                 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0);
4578         }
4579
4580         io_task->wrb_type = AMAP_GET_BITS(struct amap_iscsi_wrb,
4581                                           type, pwrb);
4582
4583         AMAP_SET_BITS(struct amap_iscsi_wrb, lun, pwrb,
4584                       cpu_to_be16(*(unsigned short *)
4585                                   &io_task->cmd_bhs->iscsi_hdr.lun));
4586         AMAP_SET_BITS(struct amap_iscsi_wrb, r2t_exp_dtl, pwrb, xferlen);
4587         AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb,
4588                       io_task->pwrb_handle->wrb_index);
4589         AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb,
4590                       be32_to_cpu(task->cmdsn));
4591         AMAP_SET_BITS(struct amap_iscsi_wrb, sgl_icd_idx, pwrb,
4592                       io_task->psgl_handle->sgl_index);
4593
4594         hwi_write_sgl(pwrb, sg, num_sg, io_task);
4595
4596         AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, pwrb,
4597                       io_task->pwrb_handle->wrb_index);
4598         if (io_task->pwrb_context->plast_wrb)
4599                 AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb,
4600                               io_task->pwrb_context->plast_wrb,
4601                               io_task->pwrb_handle->wrb_index);
4602         io_task->pwrb_context->plast_wrb = pwrb;
4603
4604         be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_wrb));
4605
4606         doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK;
4607         doorbell |= (io_task->pwrb_handle->wrb_index &
4608                      DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT;
4609         doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
4610
4611         iowrite32(doorbell, phba->db_va +
4612                   beiscsi_conn->doorbell_offset);
4613         return 0;
4614 }
4615
4616 static int beiscsi_mtask(struct iscsi_task *task)
4617 {
4618         struct beiscsi_io_task *io_task = task->dd_data;
4619         struct iscsi_conn *conn = task->conn;
4620         struct beiscsi_conn *beiscsi_conn = conn->dd_data;
4621         struct beiscsi_hba *phba = beiscsi_conn->phba;
4622         struct iscsi_wrb *pwrb = NULL;
4623         unsigned int doorbell = 0;
4624         unsigned int cid;
4625         unsigned int pwrb_typeoffset = 0;
4626         int ret = 0;
4627
4628         cid = beiscsi_conn->beiscsi_conn_cid;
4629         pwrb = io_task->pwrb_handle->pwrb;
4630
4631         if (is_chip_be2_be3r(phba)) {
4632                 AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb,
4633                               be32_to_cpu(task->cmdsn));
4634                 AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb,
4635                               io_task->pwrb_handle->wrb_index);
4636                 AMAP_SET_BITS(struct amap_iscsi_wrb, sgl_icd_idx, pwrb,
4637                               io_task->psgl_handle->sgl_index);
4638                 AMAP_SET_BITS(struct amap_iscsi_wrb, r2t_exp_dtl, pwrb,
4639                               task->data_count);
4640                 AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, pwrb,
4641                               io_task->pwrb_handle->wrb_index);
4642                 if (io_task->pwrb_context->plast_wrb)
4643                         AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb,
4644                                       io_task->pwrb_context->plast_wrb,
4645                                       io_task->pwrb_handle->wrb_index);
4646                 io_task->pwrb_context->plast_wrb = pwrb;
4647
4648                 pwrb_typeoffset = BE_WRB_TYPE_OFFSET;
4649         } else {
4650                 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, cmdsn_itt, pwrb,
4651                               be32_to_cpu(task->cmdsn));
4652                 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, wrb_idx, pwrb,
4653                               io_task->pwrb_handle->wrb_index);
4654                 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sgl_idx, pwrb,
4655                               io_task->psgl_handle->sgl_index);
4656                 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, r2t_exp_dtl, pwrb,
4657                               task->data_count);
4658                 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, ptr2nextwrb, pwrb,
4659                               io_task->pwrb_handle->wrb_index);
4660                 if (io_task->pwrb_context->plast_wrb)
4661                         AMAP_SET_BITS(struct amap_iscsi_wrb_v2, ptr2nextwrb,
4662                                       io_task->pwrb_context->plast_wrb,
4663                                       io_task->pwrb_handle->wrb_index);
4664                 io_task->pwrb_context->plast_wrb = pwrb;
4665
4666                 pwrb_typeoffset = SKH_WRB_TYPE_OFFSET;
4667         }
4668
4669
4670         switch (task->hdr->opcode & ISCSI_OPCODE_MASK) {
4671         case ISCSI_OP_LOGIN:
4672                 AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb, 1);
4673                 ADAPTER_SET_WRB_TYPE(pwrb, TGT_DM_CMD, pwrb_typeoffset);
4674                 ret = hwi_write_buffer(pwrb, task);
4675                 break;
4676         case ISCSI_OP_NOOP_OUT:
4677                 if (task->hdr->ttt != ISCSI_RESERVED_TAG) {
4678                         ADAPTER_SET_WRB_TYPE(pwrb, TGT_DM_CMD, pwrb_typeoffset);
4679                         if (is_chip_be2_be3r(phba))
4680                                 AMAP_SET_BITS(struct amap_iscsi_wrb,
4681                                               dmsg, pwrb, 1);
4682                         else
4683                                 AMAP_SET_BITS(struct amap_iscsi_wrb_v2,
4684                                               dmsg, pwrb, 1);
4685                 } else {
4686                         ADAPTER_SET_WRB_TYPE(pwrb, INI_RD_CMD, pwrb_typeoffset);
4687                         if (is_chip_be2_be3r(phba))
4688                                 AMAP_SET_BITS(struct amap_iscsi_wrb,
4689                                               dmsg, pwrb, 0);
4690                         else
4691                                 AMAP_SET_BITS(struct amap_iscsi_wrb_v2,
4692                                               dmsg, pwrb, 0);
4693                 }
4694                 ret = hwi_write_buffer(pwrb, task);
4695                 break;
4696         case ISCSI_OP_TEXT:
4697                 ADAPTER_SET_WRB_TYPE(pwrb, TGT_DM_CMD, pwrb_typeoffset);
4698                 ret = hwi_write_buffer(pwrb, task);
4699                 break;
4700         case ISCSI_OP_SCSI_TMFUNC:
4701                 ADAPTER_SET_WRB_TYPE(pwrb, INI_TMF_CMD, pwrb_typeoffset);
4702                 ret = hwi_write_buffer(pwrb, task);
4703                 break;
4704         case ISCSI_OP_LOGOUT:
4705                 ADAPTER_SET_WRB_TYPE(pwrb, HWH_TYPE_LOGOUT, pwrb_typeoffset);
4706                 ret = hwi_write_buffer(pwrb, task);
4707                 break;
4708
4709         default:
4710                 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
4711                             "BM_%d : opcode =%d Not supported\n",
4712                             task->hdr->opcode & ISCSI_OPCODE_MASK);
4713
4714                 return -EINVAL;
4715         }
4716
4717         if (ret)
4718                 return ret;
4719
4720         /* Set the task type */
4721         io_task->wrb_type = (is_chip_be2_be3r(phba)) ?
4722                 AMAP_GET_BITS(struct amap_iscsi_wrb, type, pwrb) :
4723                 AMAP_GET_BITS(struct amap_iscsi_wrb_v2, type, pwrb);
4724
4725         doorbell |= cid & DB_WRB_POST_CID_MASK;
4726         doorbell |= (io_task->pwrb_handle->wrb_index &
4727                      DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT;
4728         doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
4729         iowrite32(doorbell, phba->db_va +
4730                   beiscsi_conn->doorbell_offset);
4731         return 0;
4732 }
4733
4734 static int beiscsi_task_xmit(struct iscsi_task *task)
4735 {
4736         struct beiscsi_io_task *io_task = task->dd_data;
4737         struct scsi_cmnd *sc = task->sc;
4738         struct beiscsi_hba *phba;
4739         struct scatterlist *sg;
4740         int num_sg;
4741         unsigned int  writedir = 0, xferlen = 0;
4742
4743         phba = io_task->conn->phba;
4744         /**
4745          * HBA in error includes BEISCSI_HBA_FW_TIMEOUT. IO path might be
4746          * operational if FW still gets heartbeat from EP FW. Is management
4747          * path really needed to continue further?
4748          */
4749         if (!beiscsi_hba_is_online(phba))
4750                 return -EIO;
4751
4752         if (!io_task->conn->login_in_progress)
4753                 task->hdr->exp_statsn = 0;
4754
4755         if (!sc)
4756                 return beiscsi_mtask(task);
4757
4758         io_task->scsi_cmnd = sc;
4759         io_task->num_sg = 0;
4760         num_sg = scsi_dma_map(sc);
4761         if (num_sg < 0) {
4762                 beiscsi_log(phba, KERN_ERR,
4763                             BEISCSI_LOG_IO | BEISCSI_LOG_ISCSI,
4764                             "BM_%d : scsi_dma_map Failed "
4765                             "Driver_ITT : 0x%x ITT : 0x%x Xferlen : 0x%x\n",
4766                             be32_to_cpu(io_task->cmd_bhs->iscsi_hdr.itt),
4767                             io_task->libiscsi_itt, scsi_bufflen(sc));
4768
4769                 return num_sg;
4770         }
4771         /**
4772          * For scsi cmd task, check num_sg before unmapping in cleanup_task.
4773          * For management task, cleanup_task checks mtask_addr before unmapping.
4774          */
4775         io_task->num_sg = num_sg;
4776         xferlen = scsi_bufflen(sc);
4777         sg = scsi_sglist(sc);
4778         if (sc->sc_data_direction == DMA_TO_DEVICE)
4779                 writedir = 1;
4780         else
4781                 writedir = 0;
4782
4783         return phba->iotask_fn(task, sg, num_sg, xferlen, writedir);
4784 }
4785
4786 /**
4787  * beiscsi_bsg_request - handle bsg request from ISCSI transport
4788  * @job: job to handle
4789  */
4790 static int beiscsi_bsg_request(struct bsg_job *job)
4791 {
4792         struct Scsi_Host *shost;
4793         struct beiscsi_hba *phba;
4794         struct iscsi_bsg_request *bsg_req = job->request;
4795         int rc = -EINVAL;
4796         unsigned int tag;
4797         struct be_dma_mem nonemb_cmd;
4798         struct be_cmd_resp_hdr *resp;
4799         struct iscsi_bsg_reply *bsg_reply = job->reply;
4800         unsigned short status, extd_status;
4801
4802         shost = iscsi_job_to_shost(job);
4803         phba = iscsi_host_priv(shost);
4804
4805         if (!beiscsi_hba_is_online(phba)) {
4806                 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
4807                             "BM_%d : HBA in error 0x%lx\n", phba->state);
4808                 return -ENXIO;
4809         }
4810
4811         switch (bsg_req->msgcode) {
4812         case ISCSI_BSG_HST_VENDOR:
4813                 nonemb_cmd.va = dma_alloc_coherent(&phba->ctrl.pdev->dev,
4814                                         job->request_payload.payload_len,
4815                                         &nonemb_cmd.dma, GFP_KERNEL);
4816                 if (nonemb_cmd.va == NULL) {
4817                         beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
4818                                     "BM_%d : Failed to allocate memory for "
4819                                     "beiscsi_bsg_request\n");
4820                         return -ENOMEM;
4821                 }
4822                 tag = mgmt_vendor_specific_fw_cmd(&phba->ctrl, phba, job,
4823                                                   &nonemb_cmd);
4824                 if (!tag) {
4825                         beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
4826                                     "BM_%d : MBX Tag Allocation Failed\n");
4827
4828                         dma_free_coherent(&phba->ctrl.pdev->dev, nonemb_cmd.size,
4829                                             nonemb_cmd.va, nonemb_cmd.dma);
4830                         return -EAGAIN;
4831                 }
4832
4833                 rc = wait_event_interruptible_timeout(
4834                                         phba->ctrl.mcc_wait[tag],
4835                                         phba->ctrl.mcc_tag_status[tag],
4836                                         msecs_to_jiffies(
4837                                         BEISCSI_HOST_MBX_TIMEOUT));
4838
4839                 if (!test_bit(BEISCSI_HBA_ONLINE, &phba->state)) {
4840                         clear_bit(MCC_TAG_STATE_RUNNING,
4841                                   &phba->ctrl.ptag_state[tag].tag_state);
4842                         dma_free_coherent(&phba->ctrl.pdev->dev, nonemb_cmd.size,
4843                                             nonemb_cmd.va, nonemb_cmd.dma);
4844                         return -EIO;
4845                 }
4846                 extd_status = (phba->ctrl.mcc_tag_status[tag] &
4847                                CQE_STATUS_ADDL_MASK) >> CQE_STATUS_ADDL_SHIFT;
4848                 status = phba->ctrl.mcc_tag_status[tag] & CQE_STATUS_MASK;
4849                 free_mcc_wrb(&phba->ctrl, tag);
4850                 resp = (struct be_cmd_resp_hdr *)nonemb_cmd.va;
4851                 sg_copy_from_buffer(job->reply_payload.sg_list,
4852                                     job->reply_payload.sg_cnt,
4853                                     nonemb_cmd.va, (resp->response_length
4854                                     + sizeof(*resp)));
4855                 bsg_reply->reply_payload_rcv_len = resp->response_length;
4856                 bsg_reply->result = status;
4857                 bsg_job_done(job, bsg_reply->result,
4858                              bsg_reply->reply_payload_rcv_len);
4859                 dma_free_coherent(&phba->ctrl.pdev->dev, nonemb_cmd.size,
4860                                     nonemb_cmd.va, nonemb_cmd.dma);
4861                 if (status || extd_status) {
4862                         beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
4863                                     "BM_%d : MBX Cmd Failed"
4864                                     " status = %d extd_status = %d\n",
4865                                     status, extd_status);
4866
4867                         return -EIO;
4868                 } else {
4869                         rc = 0;
4870                 }
4871                 break;
4872
4873         default:
4874                 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
4875                                 "BM_%d : Unsupported bsg command: 0x%x\n",
4876                                 bsg_req->msgcode);
4877                 break;
4878         }
4879
4880         return rc;
4881 }
4882
4883 static void beiscsi_hba_attrs_init(struct beiscsi_hba *phba)
4884 {
4885         /* Set the logging parameter */
4886         beiscsi_log_enable_init(phba, beiscsi_log_enable);
4887 }
4888
4889 void beiscsi_start_boot_work(struct beiscsi_hba *phba, unsigned int s_handle)
4890 {
4891         if (phba->boot_struct.boot_kset)
4892                 return;
4893
4894         /* skip if boot work is already in progress */
4895         if (test_and_set_bit(BEISCSI_HBA_BOOT_WORK, &phba->state))
4896                 return;
4897
4898         phba->boot_struct.retry = 3;
4899         phba->boot_struct.tag = 0;
4900         phba->boot_struct.s_handle = s_handle;
4901         phba->boot_struct.action = BEISCSI_BOOT_GET_SHANDLE;
4902         schedule_work(&phba->boot_work);
4903 }
4904
4905 #define BEISCSI_SYSFS_ISCSI_BOOT_FLAGS  3
4906 /*
4907  * beiscsi_show_boot_tgt_info()
4908  * Boot flag info for iscsi-utilities
4909  * Bit 0 Block valid flag
4910  * Bit 1 Firmware booting selected
4911  */
4912 static ssize_t beiscsi_show_boot_tgt_info(void *data, int type, char *buf)
4913 {
4914         struct beiscsi_hba *phba = data;
4915         struct mgmt_session_info *boot_sess = &phba->boot_struct.boot_sess;
4916         struct mgmt_conn_info *boot_conn = &boot_sess->conn_list[0];
4917         char *str = buf;
4918         int rc = -EPERM;
4919
4920         switch (type) {
4921         case ISCSI_BOOT_TGT_NAME:
4922                 rc = sprintf(buf, "%.*s\n",
4923                             (int)strlen(boot_sess->target_name),
4924                             (char *)&boot_sess->target_name);
4925                 break;
4926         case ISCSI_BOOT_TGT_IP_ADDR:
4927                 if (boot_conn->dest_ipaddr.ip_type == BEISCSI_IP_TYPE_V4)
4928                         rc = sprintf(buf, "%pI4\n",
4929                                 (char *)&boot_conn->dest_ipaddr.addr);
4930                 else
4931                         rc = sprintf(str, "%pI6\n",
4932                                 (char *)&boot_conn->dest_ipaddr.addr);
4933                 break;
4934         case ISCSI_BOOT_TGT_PORT:
4935                 rc = sprintf(str, "%d\n", boot_conn->dest_port);
4936                 break;
4937
4938         case ISCSI_BOOT_TGT_CHAP_NAME:
4939                 rc = sprintf(str,  "%.*s\n",
4940                              boot_conn->negotiated_login_options.auth_data.chap.
4941                              target_chap_name_length,
4942                              (char *)&boot_conn->negotiated_login_options.
4943                              auth_data.chap.target_chap_name);
4944                 break;
4945         case ISCSI_BOOT_TGT_CHAP_SECRET:
4946                 rc = sprintf(str,  "%.*s\n",
4947                              boot_conn->negotiated_login_options.auth_data.chap.
4948                              target_secret_length,
4949                              (char *)&boot_conn->negotiated_login_options.
4950                              auth_data.chap.target_secret);
4951                 break;
4952         case ISCSI_BOOT_TGT_REV_CHAP_NAME:
4953                 rc = sprintf(str,  "%.*s\n",
4954                              boot_conn->negotiated_login_options.auth_data.chap.
4955                              intr_chap_name_length,
4956                              (char *)&boot_conn->negotiated_login_options.
4957                              auth_data.chap.intr_chap_name);
4958                 break;
4959         case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
4960                 rc = sprintf(str,  "%.*s\n",
4961                              boot_conn->negotiated_login_options.auth_data.chap.
4962                              intr_secret_length,
4963                              (char *)&boot_conn->negotiated_login_options.
4964                              auth_data.chap.intr_secret);
4965                 break;
4966         case ISCSI_BOOT_TGT_FLAGS:
4967                 rc = sprintf(str, "%d\n", BEISCSI_SYSFS_ISCSI_BOOT_FLAGS);
4968                 break;
4969         case ISCSI_BOOT_TGT_NIC_ASSOC:
4970                 rc = sprintf(str, "0\n");
4971                 break;
4972         }
4973         return rc;
4974 }
4975
4976 static ssize_t beiscsi_show_boot_ini_info(void *data, int type, char *buf)
4977 {
4978         struct beiscsi_hba *phba = data;
4979         char *str = buf;
4980         int rc = -EPERM;
4981
4982         switch (type) {
4983         case ISCSI_BOOT_INI_INITIATOR_NAME:
4984                 rc = sprintf(str, "%s\n",
4985                              phba->boot_struct.boot_sess.initiator_iscsiname);
4986                 break;
4987         }
4988         return rc;
4989 }
4990
4991 static ssize_t beiscsi_show_boot_eth_info(void *data, int type, char *buf)
4992 {
4993         struct beiscsi_hba *phba = data;
4994         char *str = buf;
4995         int rc = -EPERM;
4996
4997         switch (type) {
4998         case ISCSI_BOOT_ETH_FLAGS:
4999                 rc = sprintf(str, "%d\n", BEISCSI_SYSFS_ISCSI_BOOT_FLAGS);
5000                 break;
5001         case ISCSI_BOOT_ETH_INDEX:
5002                 rc = sprintf(str, "0\n");
5003                 break;
5004         case ISCSI_BOOT_ETH_MAC:
5005                 rc  = beiscsi_get_macaddr(str, phba);
5006                 break;
5007         }
5008         return rc;
5009 }
5010
5011 static umode_t beiscsi_tgt_get_attr_visibility(void *data, int type)
5012 {
5013         umode_t rc = 0;
5014
5015         switch (type) {
5016         case ISCSI_BOOT_TGT_NAME:
5017         case ISCSI_BOOT_TGT_IP_ADDR:
5018         case ISCSI_BOOT_TGT_PORT:
5019         case ISCSI_BOOT_TGT_CHAP_NAME:
5020         case ISCSI_BOOT_TGT_CHAP_SECRET:
5021         case ISCSI_BOOT_TGT_REV_CHAP_NAME:
5022         case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
5023         case ISCSI_BOOT_TGT_NIC_ASSOC:
5024         case ISCSI_BOOT_TGT_FLAGS:
5025                 rc = S_IRUGO;
5026                 break;
5027         }
5028         return rc;
5029 }
5030
5031 static umode_t beiscsi_ini_get_attr_visibility(void *data, int type)
5032 {
5033         umode_t rc = 0;
5034
5035         switch (type) {
5036         case ISCSI_BOOT_INI_INITIATOR_NAME:
5037                 rc = S_IRUGO;
5038                 break;
5039         }
5040         return rc;
5041 }
5042
5043 static umode_t beiscsi_eth_get_attr_visibility(void *data, int type)
5044 {
5045         umode_t rc = 0;
5046
5047         switch (type) {
5048         case ISCSI_BOOT_ETH_FLAGS:
5049         case ISCSI_BOOT_ETH_MAC:
5050         case ISCSI_BOOT_ETH_INDEX:
5051                 rc = S_IRUGO;
5052                 break;
5053         }
5054         return rc;
5055 }
5056
5057 static void beiscsi_boot_kobj_release(void *data)
5058 {
5059         struct beiscsi_hba *phba = data;
5060
5061         scsi_host_put(phba->shost);
5062 }
5063
5064 static int beiscsi_boot_create_kset(struct beiscsi_hba *phba)
5065 {
5066         struct boot_struct *bs = &phba->boot_struct;
5067         struct iscsi_boot_kobj *boot_kobj;
5068
5069         if (bs->boot_kset) {
5070                 __beiscsi_log(phba, KERN_ERR,
5071                               "BM_%d: boot_kset already created\n");
5072                 return 0;
5073         }
5074
5075         bs->boot_kset = iscsi_boot_create_host_kset(phba->shost->host_no);
5076         if (!bs->boot_kset) {
5077                 __beiscsi_log(phba, KERN_ERR,
5078                               "BM_%d: boot_kset alloc failed\n");
5079                 return -ENOMEM;
5080         }
5081
5082         /* get shost ref because the show function will refer phba */
5083         if (!scsi_host_get(phba->shost))
5084                 goto free_kset;
5085
5086         boot_kobj = iscsi_boot_create_target(bs->boot_kset, 0, phba,
5087                                              beiscsi_show_boot_tgt_info,
5088                                              beiscsi_tgt_get_attr_visibility,
5089                                              beiscsi_boot_kobj_release);
5090         if (!boot_kobj)
5091                 goto put_shost;
5092
5093         if (!scsi_host_get(phba->shost))
5094                 goto free_kset;
5095
5096         boot_kobj = iscsi_boot_create_initiator(bs->boot_kset, 0, phba,
5097                                                 beiscsi_show_boot_ini_info,
5098                                                 beiscsi_ini_get_attr_visibility,
5099                                                 beiscsi_boot_kobj_release);
5100         if (!boot_kobj)
5101                 goto put_shost;
5102
5103         if (!scsi_host_get(phba->shost))
5104                 goto free_kset;
5105
5106         boot_kobj = iscsi_boot_create_ethernet(bs->boot_kset, 0, phba,
5107                                                beiscsi_show_boot_eth_info,
5108                                                beiscsi_eth_get_attr_visibility,
5109                                                beiscsi_boot_kobj_release);
5110         if (!boot_kobj)
5111                 goto put_shost;
5112
5113         return 0;
5114
5115 put_shost:
5116         scsi_host_put(phba->shost);
5117 free_kset:
5118         iscsi_boot_destroy_kset(bs->boot_kset);
5119         bs->boot_kset = NULL;
5120         return -ENOMEM;
5121 }
5122
5123 static void beiscsi_boot_work(struct work_struct *work)
5124 {
5125         struct beiscsi_hba *phba =
5126                 container_of(work, struct beiscsi_hba, boot_work);
5127         struct boot_struct *bs = &phba->boot_struct;
5128         unsigned int tag = 0;
5129
5130         if (!beiscsi_hba_is_online(phba))
5131                 return;
5132
5133         beiscsi_log(phba, KERN_INFO,
5134                     BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
5135                     "BM_%d : %s action %d\n",
5136                     __func__, phba->boot_struct.action);
5137
5138         switch (phba->boot_struct.action) {
5139         case BEISCSI_BOOT_REOPEN_SESS:
5140                 tag = beiscsi_boot_reopen_sess(phba);
5141                 break;
5142         case BEISCSI_BOOT_GET_SHANDLE:
5143                 tag = __beiscsi_boot_get_shandle(phba, 1);
5144                 break;
5145         case BEISCSI_BOOT_GET_SINFO:
5146                 tag = beiscsi_boot_get_sinfo(phba);
5147                 break;
5148         case BEISCSI_BOOT_LOGOUT_SESS:
5149                 tag = beiscsi_boot_logout_sess(phba);
5150                 break;
5151         case BEISCSI_BOOT_CREATE_KSET:
5152                 beiscsi_boot_create_kset(phba);
5153                 /**
5154                  * updated boot_kset is made visible to all before
5155                  * ending the boot work.
5156                  */
5157                 mb();
5158                 clear_bit(BEISCSI_HBA_BOOT_WORK, &phba->state);
5159                 return;
5160         }
5161         if (!tag) {
5162                 if (bs->retry--)
5163                         schedule_work(&phba->boot_work);
5164                 else
5165                         clear_bit(BEISCSI_HBA_BOOT_WORK, &phba->state);
5166         }
5167 }
5168
5169 static void beiscsi_eqd_update_work(struct work_struct *work)
5170 {
5171         struct hwi_context_memory *phwi_context;
5172         struct be_set_eqd set_eqd[MAX_CPUS];
5173         struct hwi_controller *phwi_ctrlr;
5174         struct be_eq_obj *pbe_eq;
5175         struct beiscsi_hba *phba;
5176         unsigned int pps, delta;
5177         struct be_aic_obj *aic;
5178         int eqd, i, num = 0;
5179         unsigned long now;
5180
5181         phba = container_of(work, struct beiscsi_hba, eqd_update.work);
5182         if (!beiscsi_hba_is_online(phba))
5183                 return;
5184
5185         phwi_ctrlr = phba->phwi_ctrlr;
5186         phwi_context = phwi_ctrlr->phwi_ctxt;
5187
5188         for (i = 0; i <= phba->num_cpus; i++) {
5189                 aic = &phba->aic_obj[i];
5190                 pbe_eq = &phwi_context->be_eq[i];
5191                 now = jiffies;
5192                 if (!aic->jiffies || time_before(now, aic->jiffies) ||
5193                     pbe_eq->cq_count < aic->eq_prev) {
5194                         aic->jiffies = now;
5195                         aic->eq_prev = pbe_eq->cq_count;
5196                         continue;
5197                 }
5198                 delta = jiffies_to_msecs(now - aic->jiffies);
5199                 pps = (((u32)(pbe_eq->cq_count - aic->eq_prev) * 1000) / delta);
5200                 eqd = (pps / 1500) << 2;
5201
5202                 if (eqd < 8)
5203                         eqd = 0;
5204                 eqd = min_t(u32, eqd, BEISCSI_EQ_DELAY_MAX);
5205                 eqd = max_t(u32, eqd, BEISCSI_EQ_DELAY_MIN);
5206
5207                 aic->jiffies = now;
5208                 aic->eq_prev = pbe_eq->cq_count;
5209
5210                 if (eqd != aic->prev_eqd) {
5211                         set_eqd[num].delay_multiplier = (eqd * 65)/100;
5212                         set_eqd[num].eq_id = pbe_eq->q.id;
5213                         aic->prev_eqd = eqd;
5214                         num++;
5215                 }
5216         }
5217         if (num)
5218                 /* completion of this is ignored */
5219                 beiscsi_modify_eq_delay(phba, set_eqd, num);
5220
5221         schedule_delayed_work(&phba->eqd_update,
5222                               msecs_to_jiffies(BEISCSI_EQD_UPDATE_INTERVAL));
5223 }
5224
5225 static void beiscsi_hw_tpe_check(struct timer_list *t)
5226 {
5227         struct beiscsi_hba *phba = from_timer(phba, t, hw_check);
5228         u32 wait;
5229
5230         /* if not TPE, do nothing */
5231         if (!beiscsi_detect_tpe(phba))
5232                 return;
5233
5234         /* wait default 4000ms before recovering */
5235         wait = 4000;
5236         if (phba->ue2rp > BEISCSI_UE_DETECT_INTERVAL)
5237                 wait = phba->ue2rp - BEISCSI_UE_DETECT_INTERVAL;
5238         queue_delayed_work(phba->wq, &phba->recover_port,
5239                            msecs_to_jiffies(wait));
5240 }
5241
5242 static void beiscsi_hw_health_check(struct timer_list *t)
5243 {
5244         struct beiscsi_hba *phba = from_timer(phba, t, hw_check);
5245
5246         beiscsi_detect_ue(phba);
5247         if (beiscsi_detect_ue(phba)) {
5248                 __beiscsi_log(phba, KERN_ERR,
5249                               "BM_%d : port in error: %lx\n", phba->state);
5250                 /* sessions are no longer valid, so first fail the sessions */
5251                 queue_work(phba->wq, &phba->sess_work);
5252
5253                 /* detect UER supported */
5254                 if (!test_bit(BEISCSI_HBA_UER_SUPP, &phba->state))
5255                         return;
5256                 /* modify this timer to check TPE */
5257                 phba->hw_check.function = beiscsi_hw_tpe_check;
5258         }
5259
5260         mod_timer(&phba->hw_check,
5261                   jiffies + msecs_to_jiffies(BEISCSI_UE_DETECT_INTERVAL));
5262 }
5263
5264 /*
5265  * beiscsi_enable_port()- Enables the disabled port.
5266  * Only port resources freed in disable function are reallocated.
5267  * This is called in HBA error handling path.
5268  *
5269  * @phba: Instance of driver private structure
5270  *
5271  **/
5272 static int beiscsi_enable_port(struct beiscsi_hba *phba)
5273 {
5274         struct hwi_context_memory *phwi_context;
5275         struct hwi_controller *phwi_ctrlr;
5276         struct be_eq_obj *pbe_eq;
5277         int ret, i;
5278
5279         if (test_bit(BEISCSI_HBA_ONLINE, &phba->state)) {
5280                 __beiscsi_log(phba, KERN_ERR,
5281                               "BM_%d : %s : port is online %lx\n",
5282                               __func__, phba->state);
5283                 return 0;
5284         }
5285
5286         ret = beiscsi_init_sliport(phba);
5287         if (ret)
5288                 return ret;
5289
5290         be2iscsi_enable_msix(phba);
5291
5292         beiscsi_get_params(phba);
5293         beiscsi_set_host_data(phba);
5294         /* Re-enable UER. If different TPE occurs then it is recoverable. */
5295         beiscsi_set_uer_feature(phba);
5296
5297         phba->shost->max_id = phba->params.cxns_per_ctrl - 1;
5298         phba->shost->can_queue = phba->params.ios_per_ctrl;
5299         ret = beiscsi_init_port(phba);
5300         if (ret < 0) {
5301                 __beiscsi_log(phba, KERN_ERR,
5302                               "BM_%d : init port failed\n");
5303                 goto disable_msix;
5304         }
5305
5306         for (i = 0; i < MAX_MCC_CMD; i++) {
5307                 init_waitqueue_head(&phba->ctrl.mcc_wait[i + 1]);
5308                 phba->ctrl.mcc_tag[i] = i + 1;
5309                 phba->ctrl.mcc_tag_status[i + 1] = 0;
5310                 phba->ctrl.mcc_tag_available++;
5311         }
5312
5313         phwi_ctrlr = phba->phwi_ctrlr;
5314         phwi_context = phwi_ctrlr->phwi_ctxt;
5315         for (i = 0; i < phba->num_cpus; i++) {
5316                 pbe_eq = &phwi_context->be_eq[i];
5317                 irq_poll_init(&pbe_eq->iopoll, be_iopoll_budget, be_iopoll);
5318         }
5319
5320         i = (phba->pcidev->msix_enabled) ? i : 0;
5321         /* Work item for MCC handling */
5322         pbe_eq = &phwi_context->be_eq[i];
5323         INIT_WORK(&pbe_eq->mcc_work, beiscsi_mcc_work);
5324
5325         ret = beiscsi_init_irqs(phba);
5326         if (ret < 0) {
5327                 __beiscsi_log(phba, KERN_ERR,
5328                               "BM_%d : setup IRQs failed %d\n", ret);
5329                 goto cleanup_port;
5330         }
5331         hwi_enable_intr(phba);
5332         /* port operational: clear all error bits */
5333         set_bit(BEISCSI_HBA_ONLINE, &phba->state);
5334         __beiscsi_log(phba, KERN_INFO,
5335                       "BM_%d : port online: 0x%lx\n", phba->state);
5336
5337         /* start hw_check timer and eqd_update work */
5338         schedule_delayed_work(&phba->eqd_update,
5339                               msecs_to_jiffies(BEISCSI_EQD_UPDATE_INTERVAL));
5340
5341         /**
5342          * Timer function gets modified for TPE detection.
5343          * Always reinit to do health check first.
5344          */
5345         phba->hw_check.function = beiscsi_hw_health_check;
5346         mod_timer(&phba->hw_check,
5347                   jiffies + msecs_to_jiffies(BEISCSI_UE_DETECT_INTERVAL));
5348         return 0;
5349
5350 cleanup_port:
5351         for (i = 0; i < phba->num_cpus; i++) {
5352                 pbe_eq = &phwi_context->be_eq[i];
5353                 irq_poll_disable(&pbe_eq->iopoll);
5354         }
5355         hwi_cleanup_port(phba);
5356
5357 disable_msix:
5358         pci_free_irq_vectors(phba->pcidev);
5359         return ret;
5360 }
5361
5362 /*
5363  * beiscsi_disable_port()- Disable port and cleanup driver resources.
5364  * This is called in HBA error handling and driver removal.
5365  * @phba: Instance Priv structure
5366  * @unload: indicate driver is unloading
5367  *
5368  * Free the OS and HW resources held by the driver
5369  **/
5370 static void beiscsi_disable_port(struct beiscsi_hba *phba, int unload)
5371 {
5372         struct hwi_context_memory *phwi_context;
5373         struct hwi_controller *phwi_ctrlr;
5374         struct be_eq_obj *pbe_eq;
5375         unsigned int i;
5376
5377         if (!test_and_clear_bit(BEISCSI_HBA_ONLINE, &phba->state))
5378                 return;
5379
5380         phwi_ctrlr = phba->phwi_ctrlr;
5381         phwi_context = phwi_ctrlr->phwi_ctxt;
5382         hwi_disable_intr(phba);
5383         beiscsi_free_irqs(phba);
5384         pci_free_irq_vectors(phba->pcidev);
5385
5386         for (i = 0; i < phba->num_cpus; i++) {
5387                 pbe_eq = &phwi_context->be_eq[i];
5388                 irq_poll_disable(&pbe_eq->iopoll);
5389         }
5390         cancel_delayed_work_sync(&phba->eqd_update);
5391         cancel_work_sync(&phba->boot_work);
5392         /* WQ might be running cancel queued mcc_work if we are not exiting */
5393         if (!unload && beiscsi_hba_in_error(phba)) {
5394                 pbe_eq = &phwi_context->be_eq[i];
5395                 cancel_work_sync(&pbe_eq->mcc_work);
5396         }
5397         hwi_cleanup_port(phba);
5398         beiscsi_cleanup_port(phba);
5399 }
5400
5401 static void beiscsi_sess_work(struct work_struct *work)
5402 {
5403         struct beiscsi_hba *phba;
5404
5405         phba = container_of(work, struct beiscsi_hba, sess_work);
5406         /*
5407          * This work gets scheduled only in case of HBA error.
5408          * Old sessions are gone so need to be re-established.
5409          * iscsi_session_failure needs process context hence this work.
5410          */
5411         iscsi_host_for_each_session(phba->shost, beiscsi_session_fail);
5412 }
5413
5414 static void beiscsi_recover_port(struct work_struct *work)
5415 {
5416         struct beiscsi_hba *phba;
5417
5418         phba = container_of(work, struct beiscsi_hba, recover_port.work);
5419         beiscsi_disable_port(phba, 0);
5420         beiscsi_enable_port(phba);
5421 }
5422
5423 static pci_ers_result_t beiscsi_eeh_err_detected(struct pci_dev *pdev,
5424                 pci_channel_state_t state)
5425 {
5426         struct beiscsi_hba *phba = NULL;
5427
5428         phba = (struct beiscsi_hba *)pci_get_drvdata(pdev);
5429         set_bit(BEISCSI_HBA_PCI_ERR, &phba->state);
5430
5431         beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
5432                     "BM_%d : EEH error detected\n");
5433
5434         /* first stop UE detection when PCI error detected */
5435         del_timer_sync(&phba->hw_check);
5436         cancel_delayed_work_sync(&phba->recover_port);
5437
5438         /* sessions are no longer valid, so first fail the sessions */
5439         iscsi_host_for_each_session(phba->shost, beiscsi_session_fail);
5440         beiscsi_disable_port(phba, 0);
5441
5442         if (state == pci_channel_io_perm_failure) {
5443                 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
5444                             "BM_%d : EEH : State PERM Failure");
5445                 return PCI_ERS_RESULT_DISCONNECT;
5446         }
5447
5448         pci_disable_device(pdev);
5449
5450         /* The error could cause the FW to trigger a flash debug dump.
5451          * Resetting the card while flash dump is in progress
5452          * can cause it not to recover; wait for it to finish.
5453          * Wait only for first function as it is needed only once per
5454          * adapter.
5455          **/
5456         if (pdev->devfn == 0)
5457                 ssleep(30);
5458
5459         return PCI_ERS_RESULT_NEED_RESET;
5460 }
5461
5462 static pci_ers_result_t beiscsi_eeh_reset(struct pci_dev *pdev)
5463 {
5464         struct beiscsi_hba *phba = NULL;
5465         int status = 0;
5466
5467         phba = (struct beiscsi_hba *)pci_get_drvdata(pdev);
5468
5469         beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
5470                     "BM_%d : EEH Reset\n");
5471
5472         status = pci_enable_device(pdev);
5473         if (status)
5474                 return PCI_ERS_RESULT_DISCONNECT;
5475
5476         pci_set_master(pdev);
5477         pci_set_power_state(pdev, PCI_D0);
5478         pci_restore_state(pdev);
5479
5480         status = beiscsi_check_fw_rdy(phba);
5481         if (status) {
5482                 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
5483                             "BM_%d : EEH Reset Completed\n");
5484         } else {
5485                 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
5486                             "BM_%d : EEH Reset Completion Failure\n");
5487                 return PCI_ERS_RESULT_DISCONNECT;
5488         }
5489
5490         return PCI_ERS_RESULT_RECOVERED;
5491 }
5492
5493 static void beiscsi_eeh_resume(struct pci_dev *pdev)
5494 {
5495         struct beiscsi_hba *phba;
5496         int ret;
5497
5498         phba = (struct beiscsi_hba *)pci_get_drvdata(pdev);
5499         pci_save_state(pdev);
5500
5501         ret = beiscsi_enable_port(phba);
5502         if (ret)
5503                 __beiscsi_log(phba, KERN_ERR,
5504                               "BM_%d : AER EEH resume failed\n");
5505 }
5506
5507 static int beiscsi_dev_probe(struct pci_dev *pcidev,
5508                              const struct pci_device_id *id)
5509 {
5510         struct hwi_context_memory *phwi_context;
5511         struct hwi_controller *phwi_ctrlr;
5512         struct beiscsi_hba *phba = NULL;
5513         struct be_eq_obj *pbe_eq;
5514         unsigned int s_handle;
5515         char wq_name[20];
5516         int ret, i;
5517
5518         ret = beiscsi_enable_pci(pcidev);
5519         if (ret < 0) {
5520                 dev_err(&pcidev->dev,
5521                         "beiscsi_dev_probe - Failed to enable pci device\n");
5522                 return ret;
5523         }
5524
5525         phba = beiscsi_hba_alloc(pcidev);
5526         if (!phba) {
5527                 dev_err(&pcidev->dev,
5528                         "beiscsi_dev_probe - Failed in beiscsi_hba_alloc\n");
5529                 ret = -ENOMEM;
5530                 goto disable_pci;
5531         }
5532
5533         /* Enable EEH reporting */
5534         ret = pci_enable_pcie_error_reporting(pcidev);
5535         if (ret)
5536                 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
5537                             "BM_%d : PCIe Error Reporting "
5538                             "Enabling Failed\n");
5539
5540         pci_save_state(pcidev);
5541
5542         /* Initialize Driver configuration Paramters */
5543         beiscsi_hba_attrs_init(phba);
5544
5545         phba->mac_addr_set = false;
5546
5547         switch (pcidev->device) {
5548         case BE_DEVICE_ID1:
5549         case OC_DEVICE_ID1:
5550         case OC_DEVICE_ID2:
5551                 phba->generation = BE_GEN2;
5552                 phba->iotask_fn = beiscsi_iotask;
5553                 dev_warn(&pcidev->dev,
5554                          "Obsolete/Unsupported BE2 Adapter Family\n");
5555                 break;
5556         case BE_DEVICE_ID2:
5557         case OC_DEVICE_ID3:
5558                 phba->generation = BE_GEN3;
5559                 phba->iotask_fn = beiscsi_iotask;
5560                 break;
5561         case OC_SKH_ID1:
5562                 phba->generation = BE_GEN4;
5563                 phba->iotask_fn = beiscsi_iotask_v2;
5564                 break;
5565         default:
5566                 phba->generation = 0;
5567         }
5568
5569         ret = be_ctrl_init(phba, pcidev);
5570         if (ret) {
5571                 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
5572                             "BM_%d : be_ctrl_init failed\n");
5573                 goto free_hba;
5574         }
5575
5576         ret = beiscsi_init_sliport(phba);
5577         if (ret)
5578                 goto free_hba;
5579
5580         spin_lock_init(&phba->io_sgl_lock);
5581         spin_lock_init(&phba->mgmt_sgl_lock);
5582         spin_lock_init(&phba->async_pdu_lock);
5583         ret = beiscsi_get_fw_config(&phba->ctrl, phba);
5584         if (ret != 0) {
5585                 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
5586                             "BM_%d : Error getting fw config\n");
5587                 goto free_port;
5588         }
5589         beiscsi_get_port_name(&phba->ctrl, phba);
5590         beiscsi_get_params(phba);
5591         beiscsi_set_host_data(phba);
5592         beiscsi_set_uer_feature(phba);
5593
5594         be2iscsi_enable_msix(phba);
5595
5596         beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
5597                     "BM_%d : num_cpus = %d\n",
5598                     phba->num_cpus);
5599
5600         phba->shost->max_id = phba->params.cxns_per_ctrl;
5601         phba->shost->can_queue = phba->params.ios_per_ctrl;
5602         ret = beiscsi_get_memory(phba);
5603         if (ret < 0) {
5604                 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
5605                             "BM_%d : alloc host mem failed\n");
5606                 goto free_port;
5607         }
5608
5609         ret = beiscsi_init_port(phba);
5610         if (ret < 0) {
5611                 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
5612                             "BM_%d : init port failed\n");
5613                 beiscsi_free_mem(phba);
5614                 goto free_port;
5615         }
5616
5617         for (i = 0; i < MAX_MCC_CMD; i++) {
5618                 init_waitqueue_head(&phba->ctrl.mcc_wait[i + 1]);
5619                 phba->ctrl.mcc_tag[i] = i + 1;
5620                 phba->ctrl.mcc_tag_status[i + 1] = 0;
5621                 phba->ctrl.mcc_tag_available++;
5622                 memset(&phba->ctrl.ptag_state[i].tag_mem_state, 0,
5623                        sizeof(struct be_dma_mem));
5624         }
5625
5626         phba->ctrl.mcc_alloc_index = phba->ctrl.mcc_free_index = 0;
5627
5628         snprintf(wq_name, sizeof(wq_name), "beiscsi_%02x_wq",
5629                  phba->shost->host_no);
5630         phba->wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 1, wq_name);
5631         if (!phba->wq) {
5632                 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
5633                             "BM_%d : beiscsi_dev_probe-"
5634                             "Failed to allocate work queue\n");
5635                 ret = -ENOMEM;
5636                 goto free_twq;
5637         }
5638
5639         INIT_DELAYED_WORK(&phba->eqd_update, beiscsi_eqd_update_work);
5640
5641         phwi_ctrlr = phba->phwi_ctrlr;
5642         phwi_context = phwi_ctrlr->phwi_ctxt;
5643
5644         for (i = 0; i < phba->num_cpus; i++) {
5645                 pbe_eq = &phwi_context->be_eq[i];
5646                 irq_poll_init(&pbe_eq->iopoll, be_iopoll_budget, be_iopoll);
5647         }
5648
5649         i = (phba->pcidev->msix_enabled) ? i : 0;
5650         /* Work item for MCC handling */
5651         pbe_eq = &phwi_context->be_eq[i];
5652         INIT_WORK(&pbe_eq->mcc_work, beiscsi_mcc_work);
5653
5654         ret = beiscsi_init_irqs(phba);
5655         if (ret < 0) {
5656                 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
5657                             "BM_%d : beiscsi_dev_probe-"
5658                             "Failed to beiscsi_init_irqs\n");
5659                 goto disable_iopoll;
5660         }
5661         hwi_enable_intr(phba);
5662
5663         ret = iscsi_host_add(phba->shost, &phba->pcidev->dev);
5664         if (ret)
5665                 goto free_irqs;
5666
5667         /* set online bit after port is operational */
5668         set_bit(BEISCSI_HBA_ONLINE, &phba->state);
5669         __beiscsi_log(phba, KERN_INFO,
5670                       "BM_%d : port online: 0x%lx\n", phba->state);
5671
5672         INIT_WORK(&phba->boot_work, beiscsi_boot_work);
5673         ret = beiscsi_boot_get_shandle(phba, &s_handle);
5674         if (ret > 0) {
5675                 beiscsi_start_boot_work(phba, s_handle);
5676                 /**
5677                  * Set this bit after starting the work to let
5678                  * probe handle it first.
5679                  * ASYNC event can too schedule this work.
5680                  */
5681                 set_bit(BEISCSI_HBA_BOOT_FOUND, &phba->state);
5682         }
5683
5684         beiscsi_iface_create_default(phba);
5685         schedule_delayed_work(&phba->eqd_update,
5686                               msecs_to_jiffies(BEISCSI_EQD_UPDATE_INTERVAL));
5687
5688         INIT_WORK(&phba->sess_work, beiscsi_sess_work);
5689         INIT_DELAYED_WORK(&phba->recover_port, beiscsi_recover_port);
5690         /**
5691          * Start UE detection here. UE before this will cause stall in probe
5692          * and eventually fail the probe.
5693          */
5694         timer_setup(&phba->hw_check, beiscsi_hw_health_check, 0);
5695         mod_timer(&phba->hw_check,
5696                   jiffies + msecs_to_jiffies(BEISCSI_UE_DETECT_INTERVAL));
5697         beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
5698                     "\n\n\n BM_%d : SUCCESS - DRIVER LOADED\n\n\n");
5699         return 0;
5700
5701 free_irqs:
5702         hwi_disable_intr(phba);
5703         beiscsi_free_irqs(phba);
5704 disable_iopoll:
5705         for (i = 0; i < phba->num_cpus; i++) {
5706                 pbe_eq = &phwi_context->be_eq[i];
5707                 irq_poll_disable(&pbe_eq->iopoll);
5708         }
5709         destroy_workqueue(phba->wq);
5710 free_twq:
5711         hwi_cleanup_port(phba);
5712         beiscsi_cleanup_port(phba);
5713         beiscsi_free_mem(phba);
5714 free_port:
5715         dma_free_coherent(&phba->pcidev->dev,
5716                             phba->ctrl.mbox_mem_alloced.size,
5717                             phba->ctrl.mbox_mem_alloced.va,
5718                             phba->ctrl.mbox_mem_alloced.dma);
5719         beiscsi_unmap_pci_function(phba);
5720 free_hba:
5721         pci_disable_msix(phba->pcidev);
5722         pci_dev_put(phba->pcidev);
5723         iscsi_host_free(phba->shost);
5724         pci_disable_pcie_error_reporting(pcidev);
5725         pci_set_drvdata(pcidev, NULL);
5726 disable_pci:
5727         pci_release_regions(pcidev);
5728         pci_disable_device(pcidev);
5729         return ret;
5730 }
5731
5732 static void beiscsi_remove(struct pci_dev *pcidev)
5733 {
5734         struct beiscsi_hba *phba = NULL;
5735
5736         phba = pci_get_drvdata(pcidev);
5737         if (!phba) {
5738                 dev_err(&pcidev->dev, "beiscsi_remove called with no phba\n");
5739                 return;
5740         }
5741
5742         /* first stop UE detection before unloading */
5743         del_timer_sync(&phba->hw_check);
5744         cancel_delayed_work_sync(&phba->recover_port);
5745         cancel_work_sync(&phba->sess_work);
5746
5747         beiscsi_iface_destroy_default(phba);
5748         iscsi_host_remove(phba->shost);
5749         beiscsi_disable_port(phba, 1);
5750
5751         /* after cancelling boot_work */
5752         iscsi_boot_destroy_kset(phba->boot_struct.boot_kset);
5753
5754         /* free all resources */
5755         destroy_workqueue(phba->wq);
5756         beiscsi_free_mem(phba);
5757
5758         /* ctrl uninit */
5759         beiscsi_unmap_pci_function(phba);
5760         dma_free_coherent(&phba->pcidev->dev,
5761                             phba->ctrl.mbox_mem_alloced.size,
5762                             phba->ctrl.mbox_mem_alloced.va,
5763                             phba->ctrl.mbox_mem_alloced.dma);
5764
5765         pci_dev_put(phba->pcidev);
5766         iscsi_host_free(phba->shost);
5767         pci_disable_pcie_error_reporting(pcidev);
5768         pci_set_drvdata(pcidev, NULL);
5769         pci_release_regions(pcidev);
5770         pci_disable_device(pcidev);
5771 }
5772
5773
5774 static struct pci_error_handlers beiscsi_eeh_handlers = {
5775         .error_detected = beiscsi_eeh_err_detected,
5776         .slot_reset = beiscsi_eeh_reset,
5777         .resume = beiscsi_eeh_resume,
5778 };
5779
5780 struct iscsi_transport beiscsi_iscsi_transport = {
5781         .owner = THIS_MODULE,
5782         .name = DRV_NAME,
5783         .caps = CAP_RECOVERY_L0 | CAP_HDRDGST | CAP_TEXT_NEGO |
5784                 CAP_MULTI_R2T | CAP_DATADGST | CAP_DATA_PATH_OFFLOAD,
5785         .create_session = beiscsi_session_create,
5786         .destroy_session = beiscsi_session_destroy,
5787         .create_conn = beiscsi_conn_create,
5788         .bind_conn = beiscsi_conn_bind,
5789         .unbind_conn = iscsi_conn_unbind,
5790         .destroy_conn = iscsi_conn_teardown,
5791         .attr_is_visible = beiscsi_attr_is_visible,
5792         .set_iface_param = beiscsi_iface_set_param,
5793         .get_iface_param = beiscsi_iface_get_param,
5794         .set_param = beiscsi_set_param,
5795         .get_conn_param = iscsi_conn_get_param,
5796         .get_session_param = iscsi_session_get_param,
5797         .get_host_param = beiscsi_get_host_param,
5798         .start_conn = beiscsi_conn_start,
5799         .stop_conn = iscsi_conn_stop,
5800         .send_pdu = iscsi_conn_send_pdu,
5801         .xmit_task = beiscsi_task_xmit,
5802         .cleanup_task = beiscsi_cleanup_task,
5803         .alloc_pdu = beiscsi_alloc_pdu,
5804         .parse_pdu_itt = beiscsi_parse_pdu,
5805         .get_stats = beiscsi_conn_get_stats,
5806         .get_ep_param = beiscsi_ep_get_param,
5807         .ep_connect = beiscsi_ep_connect,
5808         .ep_poll = beiscsi_ep_poll,
5809         .ep_disconnect = beiscsi_ep_disconnect,
5810         .session_recovery_timedout = iscsi_session_recovery_timedout,
5811         .bsg_request = beiscsi_bsg_request,
5812 };
5813
5814 static struct pci_driver beiscsi_pci_driver = {
5815         .name = DRV_NAME,
5816         .probe = beiscsi_dev_probe,
5817         .remove = beiscsi_remove,
5818         .id_table = beiscsi_pci_id_table,
5819         .err_handler = &beiscsi_eeh_handlers
5820 };
5821
5822 static int __init beiscsi_module_init(void)
5823 {
5824         int ret;
5825
5826         beiscsi_scsi_transport =
5827                         iscsi_register_transport(&beiscsi_iscsi_transport);
5828         if (!beiscsi_scsi_transport) {
5829                 printk(KERN_ERR
5830                        "beiscsi_module_init - Unable to  register beiscsi transport.\n");
5831                 return -ENOMEM;
5832         }
5833         printk(KERN_INFO "In beiscsi_module_init, tt=%p\n",
5834                &beiscsi_iscsi_transport);
5835
5836         ret = pci_register_driver(&beiscsi_pci_driver);
5837         if (ret) {
5838                 printk(KERN_ERR
5839                        "beiscsi_module_init - Unable to  register beiscsi pci driver.\n");
5840                 goto unregister_iscsi_transport;
5841         }
5842         return 0;
5843
5844 unregister_iscsi_transport:
5845         iscsi_unregister_transport(&beiscsi_iscsi_transport);
5846         return ret;
5847 }
5848
5849 static void __exit beiscsi_module_exit(void)
5850 {
5851         pci_unregister_driver(&beiscsi_pci_driver);
5852         iscsi_unregister_transport(&beiscsi_iscsi_transport);
5853 }
5854
5855 module_init(beiscsi_module_init);
5856 module_exit(beiscsi_module_exit);