GNU Linux-libre 5.15.54-gnu
[releases.git] / drivers / scsi / qla2xxx / qla_edif.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Marvell Fibre Channel HBA Driver
4  * Copyright (c)  2021     Marvell
5  */
6 #include "qla_def.h"
7 #include "qla_edif.h"
8
9 #include <linux/kthread.h>
10 #include <linux/vmalloc.h>
11 #include <linux/delay.h>
12 #include <scsi/scsi_tcq.h>
13
14 static struct edif_sa_index_entry *qla_edif_sadb_find_sa_index_entry(uint16_t nport_handle,
15                 struct list_head *sa_list);
16 static uint16_t qla_edif_sadb_get_sa_index(fc_port_t *fcport,
17                 struct qla_sa_update_frame *sa_frame);
18 static int qla_edif_sadb_delete_sa_index(fc_port_t *fcport, uint16_t nport_handle,
19                 uint16_t sa_index);
20 static int qla_pur_get_pending(scsi_qla_host_t *, fc_port_t *, struct bsg_job *);
21
22 struct edb_node {
23         struct  list_head       list;
24         uint32_t                ntype;
25         union {
26                 port_id_t       plogi_did;
27                 uint32_t        async;
28                 port_id_t       els_sid;
29                 struct edif_sa_update_aen       sa_aen;
30         } u;
31 };
32
33 static struct els_sub_cmd {
34         uint16_t cmd;
35         const char *str;
36 } sc_str[] = {
37         {SEND_ELS, "send ELS"},
38         {SEND_ELS_REPLY, "send ELS Reply"},
39         {PULL_ELS, "retrieve ELS"},
40 };
41
42 const char *sc_to_str(uint16_t cmd)
43 {
44         int i;
45         struct els_sub_cmd *e;
46
47         for (i = 0; i < ARRAY_SIZE(sc_str); i++) {
48                 e = sc_str + i;
49                 if (cmd == e->cmd)
50                         return e->str;
51         }
52         return "unknown";
53 }
54
55 static struct edif_list_entry *qla_edif_list_find_sa_index(fc_port_t *fcport,
56                 uint16_t handle)
57 {
58         struct edif_list_entry *entry;
59         struct edif_list_entry *tentry;
60         struct list_head *indx_list = &fcport->edif.edif_indx_list;
61
62         list_for_each_entry_safe(entry, tentry, indx_list, next) {
63                 if (entry->handle == handle)
64                         return entry;
65         }
66         return NULL;
67 }
68
69 /* timeout called when no traffic and delayed rx sa_index delete */
70 static void qla2x00_sa_replace_iocb_timeout(struct timer_list *t)
71 {
72         struct edif_list_entry *edif_entry = from_timer(edif_entry, t, timer);
73         fc_port_t *fcport = edif_entry->fcport;
74         struct scsi_qla_host *vha = fcport->vha;
75         struct  edif_sa_ctl *sa_ctl;
76         uint16_t nport_handle;
77         unsigned long flags = 0;
78
79         ql_dbg(ql_dbg_edif, vha, 0x3069,
80             "%s:  nport_handle 0x%x,  SA REPL Delay Timeout, %8phC portid=%06x\n",
81             __func__, edif_entry->handle, fcport->port_name, fcport->d_id.b24);
82
83         /*
84          * if delete_sa_index is valid then no one has serviced this
85          * delayed delete
86          */
87         spin_lock_irqsave(&fcport->edif.indx_list_lock, flags);
88
89         /*
90          * delete_sa_index is invalidated when we find the new sa_index in
91          * the incoming data stream.  If it is not invalidated then we are
92          * still looking for the new sa_index because there is no I/O and we
93          * need to just force the rx delete and move on.  Otherwise
94          * we could get another rekey which will result in an error 66.
95          */
96         if (edif_entry->delete_sa_index != INVALID_EDIF_SA_INDEX) {
97                 uint16_t delete_sa_index = edif_entry->delete_sa_index;
98
99                 edif_entry->delete_sa_index = INVALID_EDIF_SA_INDEX;
100                 nport_handle = edif_entry->handle;
101                 spin_unlock_irqrestore(&fcport->edif.indx_list_lock, flags);
102
103                 sa_ctl = qla_edif_find_sa_ctl_by_index(fcport,
104                     delete_sa_index, 0);
105
106                 if (sa_ctl) {
107                         ql_dbg(ql_dbg_edif, vha, 0x3063,
108                             "%s: sa_ctl: %p, delete index %d, update index: %d, lid: 0x%x\n",
109                             __func__, sa_ctl, delete_sa_index, edif_entry->update_sa_index,
110                             nport_handle);
111
112                         sa_ctl->flags = EDIF_SA_CTL_FLG_DEL;
113                         set_bit(EDIF_SA_CTL_REPL, &sa_ctl->state);
114                         qla_post_sa_replace_work(fcport->vha, fcport,
115                             nport_handle, sa_ctl);
116
117                 } else {
118                         ql_dbg(ql_dbg_edif, vha, 0x3063,
119                             "%s: sa_ctl not found for delete_sa_index: %d\n",
120                             __func__, edif_entry->delete_sa_index);
121                 }
122         } else {
123                 spin_unlock_irqrestore(&fcport->edif.indx_list_lock, flags);
124         }
125 }
126
127 /*
128  * create a new list entry for this nport handle and
129  * add an sa_update index to the list - called for sa_update
130  */
131 static int qla_edif_list_add_sa_update_index(fc_port_t *fcport,
132                 uint16_t sa_index, uint16_t handle)
133 {
134         struct edif_list_entry *entry;
135         unsigned long flags = 0;
136
137         /* if the entry exists, then just update the sa_index */
138         entry = qla_edif_list_find_sa_index(fcport, handle);
139         if (entry) {
140                 entry->update_sa_index = sa_index;
141                 entry->count = 0;
142                 return 0;
143         }
144
145         /*
146          * This is the normal path - there should be no existing entry
147          * when update is called.  The exception is at startup
148          * when update is called for the first two sa_indexes
149          * followed by a delete of the first sa_index
150          */
151         entry = kzalloc((sizeof(struct edif_list_entry)), GFP_ATOMIC);
152         if (!entry)
153                 return -ENOMEM;
154
155         INIT_LIST_HEAD(&entry->next);
156         entry->handle = handle;
157         entry->update_sa_index = sa_index;
158         entry->delete_sa_index = INVALID_EDIF_SA_INDEX;
159         entry->count = 0;
160         entry->flags = 0;
161         timer_setup(&entry->timer, qla2x00_sa_replace_iocb_timeout, 0);
162         spin_lock_irqsave(&fcport->edif.indx_list_lock, flags);
163         list_add_tail(&entry->next, &fcport->edif.edif_indx_list);
164         spin_unlock_irqrestore(&fcport->edif.indx_list_lock, flags);
165         return 0;
166 }
167
168 /* remove an entry from the list */
169 static void qla_edif_list_delete_sa_index(fc_port_t *fcport, struct edif_list_entry *entry)
170 {
171         unsigned long flags = 0;
172
173         spin_lock_irqsave(&fcport->edif.indx_list_lock, flags);
174         list_del(&entry->next);
175         spin_unlock_irqrestore(&fcport->edif.indx_list_lock, flags);
176 }
177
178 int qla_post_sa_replace_work(struct scsi_qla_host *vha,
179          fc_port_t *fcport, uint16_t nport_handle, struct edif_sa_ctl *sa_ctl)
180 {
181         struct qla_work_evt *e;
182
183         e = qla2x00_alloc_work(vha, QLA_EVT_SA_REPLACE);
184         if (!e)
185                 return QLA_FUNCTION_FAILED;
186
187         e->u.sa_update.fcport = fcport;
188         e->u.sa_update.sa_ctl = sa_ctl;
189         e->u.sa_update.nport_handle = nport_handle;
190         fcport->flags |= FCF_ASYNC_ACTIVE;
191         return qla2x00_post_work(vha, e);
192 }
193
194 static void
195 qla_edif_sa_ctl_init(scsi_qla_host_t *vha, struct fc_port  *fcport)
196 {
197         ql_dbg(ql_dbg_edif, vha, 0x2058,
198             "Init SA_CTL List for fcport - nn %8phN pn %8phN portid=%06x.\n",
199             fcport->node_name, fcport->port_name, fcport->d_id.b24);
200
201         fcport->edif.tx_rekey_cnt = 0;
202         fcport->edif.rx_rekey_cnt = 0;
203
204         fcport->edif.tx_bytes = 0;
205         fcport->edif.rx_bytes = 0;
206 }
207
208 static int qla_bsg_check(scsi_qla_host_t *vha, struct bsg_job *bsg_job,
209 fc_port_t *fcport)
210 {
211         struct extra_auth_els *p;
212         struct fc_bsg_reply *bsg_reply = bsg_job->reply;
213         struct qla_bsg_auth_els_request *req =
214             (struct qla_bsg_auth_els_request *)bsg_job->request;
215
216         if (!vha->hw->flags.edif_enabled) {
217                 ql_dbg(ql_dbg_edif, vha, 0x9105,
218                     "%s edif not enabled\n", __func__);
219                 goto done;
220         }
221         if (vha->e_dbell.db_flags != EDB_ACTIVE) {
222                 ql_dbg(ql_dbg_edif, vha, 0x09102,
223                     "%s doorbell not enabled\n", __func__);
224                 goto done;
225         }
226
227         p = &req->e;
228
229         /* Get response */
230         if (p->sub_cmd == PULL_ELS) {
231                 struct qla_bsg_auth_els_reply *rpl =
232                         (struct qla_bsg_auth_els_reply *)bsg_job->reply;
233
234                 qla_pur_get_pending(vha, fcport, bsg_job);
235
236                 ql_dbg(ql_dbg_edif, vha, 0x911d,
237                         "%s %s %8phN sid=%x. xchg %x, nb=%xh bsg ptr %p\n",
238                         __func__, sc_to_str(p->sub_cmd), fcport->port_name,
239                         fcport->d_id.b24, rpl->rx_xchg_address,
240                         rpl->r.reply_payload_rcv_len, bsg_job);
241
242                 goto done;
243         }
244         return 0;
245
246 done:
247
248         bsg_job_done(bsg_job, bsg_reply->result,
249                         bsg_reply->reply_payload_rcv_len);
250         return -EIO;
251 }
252
253 fc_port_t *
254 qla2x00_find_fcport_by_pid(scsi_qla_host_t *vha, port_id_t *id)
255 {
256         fc_port_t *f, *tf;
257
258         f = NULL;
259         list_for_each_entry_safe(f, tf, &vha->vp_fcports, list) {
260                 if ((f->flags & FCF_FCSP_DEVICE)) {
261                         ql_dbg(ql_dbg_edif + ql_dbg_verbose, vha, 0x2058,
262                             "Found secure fcport - nn %8phN pn %8phN portid=0x%x, 0x%x.\n",
263                             f->node_name, f->port_name,
264                             f->d_id.b24, id->b24);
265                         if (f->d_id.b24 == id->b24)
266                                 return f;
267                 }
268         }
269         return NULL;
270 }
271
272 /**
273  * qla_edif_app_check(): check for valid application id.
274  * @vha: host adapter pointer
275  * @appid: application id
276  * Return: false = fail, true = pass
277  */
278 static bool
279 qla_edif_app_check(scsi_qla_host_t *vha, struct app_id appid)
280 {
281         /* check that the app is allow/known to the driver */
282
283         if (appid.app_vid == EDIF_APP_ID) {
284                 ql_dbg(ql_dbg_edif + ql_dbg_verbose, vha, 0x911d, "%s app id ok\n", __func__);
285                 return true;
286         }
287         ql_dbg(ql_dbg_edif, vha, 0x911d, "%s app id not ok (%x)",
288             __func__, appid.app_vid);
289
290         return false;
291 }
292
293 static void
294 qla_edif_free_sa_ctl(fc_port_t *fcport, struct edif_sa_ctl *sa_ctl,
295         int index)
296 {
297         unsigned long flags = 0;
298
299         spin_lock_irqsave(&fcport->edif.sa_list_lock, flags);
300         list_del(&sa_ctl->next);
301         spin_unlock_irqrestore(&fcport->edif.sa_list_lock, flags);
302         if (index >= 512)
303                 fcport->edif.tx_rekey_cnt--;
304         else
305                 fcport->edif.rx_rekey_cnt--;
306         kfree(sa_ctl);
307 }
308
309 /* return an index to the freepool */
310 static void qla_edif_add_sa_index_to_freepool(fc_port_t *fcport, int dir,
311                 uint16_t sa_index)
312 {
313         void *sa_id_map;
314         struct scsi_qla_host *vha = fcport->vha;
315         struct qla_hw_data *ha = vha->hw;
316         unsigned long flags = 0;
317         u16 lsa_index = sa_index;
318
319         ql_dbg(ql_dbg_edif + ql_dbg_verbose, vha, 0x3063,
320             "%s: entry\n", __func__);
321
322         if (dir) {
323                 sa_id_map = ha->edif_tx_sa_id_map;
324                 lsa_index -= EDIF_TX_SA_INDEX_BASE;
325         } else {
326                 sa_id_map = ha->edif_rx_sa_id_map;
327         }
328
329         spin_lock_irqsave(&ha->sadb_fp_lock, flags);
330         clear_bit(lsa_index, sa_id_map);
331         spin_unlock_irqrestore(&ha->sadb_fp_lock, flags);
332         ql_dbg(ql_dbg_edif, vha, 0x3063,
333             "%s: index %d added to free pool\n", __func__, sa_index);
334 }
335
336 static void __qla2x00_release_all_sadb(struct scsi_qla_host *vha,
337         struct fc_port *fcport, struct edif_sa_index_entry *entry,
338         int pdir)
339 {
340         struct edif_list_entry *edif_entry;
341         struct  edif_sa_ctl *sa_ctl;
342         int i, dir;
343         int key_cnt = 0;
344
345         for (i = 0; i < 2; i++) {
346                 if (entry->sa_pair[i].sa_index == INVALID_EDIF_SA_INDEX)
347                         continue;
348
349                 if (fcport->loop_id != entry->handle) {
350                         ql_dbg(ql_dbg_edif, vha, 0x3063,
351                             "%s: ** WARNING %d** entry handle: 0x%x, lid: 0x%x, sa_index: %d\n",
352                             __func__, i, entry->handle, fcport->loop_id,
353                             entry->sa_pair[i].sa_index);
354                 }
355
356                 /* release the sa_ctl */
357                 sa_ctl = qla_edif_find_sa_ctl_by_index(fcport,
358                                 entry->sa_pair[i].sa_index, pdir);
359                 if (sa_ctl &&
360                     qla_edif_find_sa_ctl_by_index(fcport, sa_ctl->index, pdir)) {
361                         ql_dbg(ql_dbg_edif, vha, 0x3063,
362                             "%s: freeing sa_ctl for index %d\n", __func__, sa_ctl->index);
363                         qla_edif_free_sa_ctl(fcport, sa_ctl, sa_ctl->index);
364                 } else {
365                         ql_dbg(ql_dbg_edif, vha, 0x3063,
366                             "%s: sa_ctl NOT freed, sa_ctl: %p\n", __func__, sa_ctl);
367                 }
368
369                 /* Release the index */
370                 ql_dbg(ql_dbg_edif, vha, 0x3063,
371                         "%s: freeing sa_index %d, nph: 0x%x\n",
372                         __func__, entry->sa_pair[i].sa_index, entry->handle);
373
374                 dir = (entry->sa_pair[i].sa_index <
375                         EDIF_TX_SA_INDEX_BASE) ? 0 : 1;
376                 qla_edif_add_sa_index_to_freepool(fcport, dir,
377                         entry->sa_pair[i].sa_index);
378
379                 /* Delete timer on RX */
380                 if (pdir != SAU_FLG_TX) {
381                         edif_entry =
382                                 qla_edif_list_find_sa_index(fcport, entry->handle);
383                         if (edif_entry) {
384                                 ql_dbg(ql_dbg_edif, vha, 0x5033,
385                                     "%s: remove edif_entry %p, update_sa_index: 0x%x, delete_sa_index: 0x%x\n",
386                                     __func__, edif_entry, edif_entry->update_sa_index,
387                                     edif_entry->delete_sa_index);
388                                 qla_edif_list_delete_sa_index(fcport, edif_entry);
389                                 /*
390                                  * valid delete_sa_index indicates there is a rx
391                                  * delayed delete queued
392                                  */
393                                 if (edif_entry->delete_sa_index !=
394                                                 INVALID_EDIF_SA_INDEX) {
395                                         del_timer(&edif_entry->timer);
396
397                                         /* build and send the aen */
398                                         fcport->edif.rx_sa_set = 1;
399                                         fcport->edif.rx_sa_pending = 0;
400                                         qla_edb_eventcreate(vha,
401                                                         VND_CMD_AUTH_STATE_SAUPDATE_COMPL,
402                                                         QL_VND_SA_STAT_SUCCESS,
403                                                         QL_VND_RX_SA_KEY, fcport);
404                                 }
405                                 ql_dbg(ql_dbg_edif, vha, 0x5033,
406                                     "%s: release edif_entry %p, update_sa_index: 0x%x, delete_sa_index: 0x%x\n",
407                                     __func__, edif_entry, edif_entry->update_sa_index,
408                                     edif_entry->delete_sa_index);
409
410                                 kfree(edif_entry);
411                         }
412                 }
413                 key_cnt++;
414         }
415         ql_dbg(ql_dbg_edif, vha, 0x3063,
416             "%s: %d %s keys released\n",
417             __func__, key_cnt, pdir ? "tx" : "rx");
418 }
419
420 /* find an release all outstanding sadb sa_indicies */
421 void qla2x00_release_all_sadb(struct scsi_qla_host *vha, struct fc_port *fcport)
422 {
423         struct edif_sa_index_entry *entry, *tmp;
424         struct qla_hw_data *ha = vha->hw;
425         unsigned long flags;
426
427         ql_dbg(ql_dbg_edif + ql_dbg_verbose, vha, 0x3063,
428             "%s: Starting...\n", __func__);
429
430         spin_lock_irqsave(&ha->sadb_lock, flags);
431
432         list_for_each_entry_safe(entry, tmp, &ha->sadb_rx_index_list, next) {
433                 if (entry->fcport == fcport) {
434                         list_del(&entry->next);
435                         spin_unlock_irqrestore(&ha->sadb_lock, flags);
436                         __qla2x00_release_all_sadb(vha, fcport, entry, 0);
437                         kfree(entry);
438                         spin_lock_irqsave(&ha->sadb_lock, flags);
439                         break;
440                 }
441         }
442
443         list_for_each_entry_safe(entry, tmp, &ha->sadb_tx_index_list, next) {
444                 if (entry->fcport == fcport) {
445                         list_del(&entry->next);
446                         spin_unlock_irqrestore(&ha->sadb_lock, flags);
447
448                         __qla2x00_release_all_sadb(vha, fcport, entry, SAU_FLG_TX);
449
450                         kfree(entry);
451                         spin_lock_irqsave(&ha->sadb_lock, flags);
452                         break;
453                 }
454         }
455         spin_unlock_irqrestore(&ha->sadb_lock, flags);
456 }
457
458 /**
459  * qla_edif_app_start:  application has announce its present
460  * @vha: host adapter pointer
461  * @bsg_job: user request
462  *
463  * Set/activate doorbell.  Reset current sessions and re-login with
464  * secure flag.
465  */
466 static int
467 qla_edif_app_start(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
468 {
469         int32_t                 rval = 0;
470         struct fc_bsg_reply     *bsg_reply = bsg_job->reply;
471         struct app_start        appstart;
472         struct app_start_reply  appreply;
473         struct fc_port  *fcport, *tf;
474
475         ql_log(ql_log_info, vha, 0x1313,
476                "EDIF application registration with driver, FC device connections will be re-established.\n");
477
478         sg_copy_to_buffer(bsg_job->request_payload.sg_list,
479             bsg_job->request_payload.sg_cnt, &appstart,
480             sizeof(struct app_start));
481
482         ql_dbg(ql_dbg_edif, vha, 0x911d, "%s app_vid=%x app_start_flags %x\n",
483              __func__, appstart.app_info.app_vid, appstart.app_start_flags);
484
485         if (vha->e_dbell.db_flags != EDB_ACTIVE) {
486                 /* mark doorbell as active since an app is now present */
487                 vha->e_dbell.db_flags = EDB_ACTIVE;
488         } else {
489                 ql_dbg(ql_dbg_edif, vha, 0x911e, "%s doorbell already active\n",
490                      __func__);
491         }
492
493         if (N2N_TOPO(vha->hw)) {
494                 if (vha->hw->flags.n2n_fw_acc_sec)
495                         set_bit(N2N_LINK_RESET, &vha->dpc_flags);
496                 else
497                         set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
498                 qla2xxx_wake_dpc(vha);
499         } else {
500                 list_for_each_entry_safe(fcport, tf, &vha->vp_fcports, list) {
501                         ql_dbg(ql_dbg_edif, vha, 0x2058,
502                                "FCSP - nn %8phN pn %8phN portid=%06x.\n",
503                                fcport->node_name, fcport->port_name,
504                                fcport->d_id.b24);
505                         ql_dbg(ql_dbg_edif, vha, 0xf084,
506                                "%s: se_sess %p / sess %p from port %8phC "
507                                "loop_id %#04x s_id %06x logout %d "
508                                "keep %d els_logo %d disc state %d auth state %d"
509                                "stop state %d\n",
510                                __func__, fcport->se_sess, fcport,
511                                fcport->port_name, fcport->loop_id,
512                                fcport->d_id.b24, fcport->logout_on_delete,
513                                fcport->keep_nport_handle, fcport->send_els_logo,
514                                fcport->disc_state, fcport->edif.auth_state,
515                                fcport->edif.app_stop);
516
517                         if (atomic_read(&vha->loop_state) == LOOP_DOWN)
518                                 break;
519
520                         fcport->edif.app_started = 1;
521                         fcport->login_retry = vha->hw->login_retry_count;
522
523                         /* no activity */
524                         fcport->edif.app_stop = 0;
525
526                         ql_dbg(ql_dbg_edif, vha, 0x911e,
527                                "%s wwpn %8phC calling qla_edif_reset_auth_wait\n",
528                                __func__, fcport->port_name);
529                         fcport->edif.app_sess_online = 0;
530                         qlt_schedule_sess_for_deletion(fcport);
531                         qla_edif_sa_ctl_init(vha, fcport);
532                 }
533         }
534
535         if (vha->pur_cinfo.enode_flags != ENODE_ACTIVE) {
536                 /* mark as active since an app is now present */
537                 vha->pur_cinfo.enode_flags = ENODE_ACTIVE;
538         } else {
539                 ql_dbg(ql_dbg_edif, vha, 0x911f, "%s enode already active\n",
540                      __func__);
541         }
542
543         appreply.host_support_edif = vha->hw->flags.edif_enabled;
544         appreply.edif_enode_active = vha->pur_cinfo.enode_flags;
545         appreply.edif_edb_active = vha->e_dbell.db_flags;
546
547         bsg_job->reply_len = sizeof(struct fc_bsg_reply);
548
549         SET_DID_STATUS(bsg_reply->result, DID_OK);
550
551         bsg_reply->reply_payload_rcv_len = sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
552                                                                bsg_job->reply_payload.sg_cnt,
553                                                                &appreply,
554                                                                sizeof(struct app_start_reply));
555
556         ql_dbg(ql_dbg_edif, vha, 0x911d,
557             "%s app start completed with 0x%x\n",
558             __func__, rval);
559
560         return rval;
561 }
562
563 /**
564  * qla_edif_app_stop - app has announced it's exiting.
565  * @vha: host adapter pointer
566  * @bsg_job: user space command pointer
567  *
568  * Free any in flight messages, clear all doorbell events
569  * to application. Reject any message relate to security.
570  */
571 static int
572 qla_edif_app_stop(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
573 {
574         struct app_stop         appstop;
575         struct fc_bsg_reply     *bsg_reply = bsg_job->reply;
576         struct fc_port  *fcport, *tf;
577
578         sg_copy_to_buffer(bsg_job->request_payload.sg_list,
579             bsg_job->request_payload.sg_cnt, &appstop,
580             sizeof(struct app_stop));
581
582         ql_dbg(ql_dbg_edif, vha, 0x911d, "%s Stopping APP: app_vid=%x\n",
583             __func__, appstop.app_info.app_vid);
584
585         /* Call db stop and enode stop functions */
586
587         /* if we leave this running short waits are operational < 16 secs */
588         qla_enode_stop(vha);        /* stop enode */
589         qla_edb_stop(vha);          /* stop db */
590
591         list_for_each_entry_safe(fcport, tf, &vha->vp_fcports, list) {
592                 if (!(fcport->flags & FCF_FCSP_DEVICE))
593                         continue;
594
595                 if (fcport->flags & FCF_FCSP_DEVICE) {
596                         ql_dbg(ql_dbg_edif, vha, 0xf084,
597                             "%s: sess %p from port %8phC lid %#04x s_id %06x logout %d keep %d els_logo %d\n",
598                             __func__, fcport,
599                             fcport->port_name, fcport->loop_id, fcport->d_id.b24,
600                             fcport->logout_on_delete, fcport->keep_nport_handle,
601                             fcport->send_els_logo);
602
603                         if (atomic_read(&vha->loop_state) == LOOP_DOWN)
604                                 break;
605
606                         fcport->edif.app_stop = 1;
607                         ql_dbg(ql_dbg_edif, vha, 0x911e,
608                                 "%s wwpn %8phC calling qla_edif_reset_auth_wait\n",
609                                 __func__, fcport->port_name);
610
611                         fcport->send_els_logo = 1;
612                         qlt_schedule_sess_for_deletion(fcport);
613
614                         /* qla_edif_flush_sa_ctl_lists(fcport); */
615                         fcport->edif.app_started = 0;
616                 }
617         }
618
619         bsg_job->reply_len = sizeof(struct fc_bsg_reply);
620         SET_DID_STATUS(bsg_reply->result, DID_OK);
621
622         /* no return interface to app - it assumes we cleaned up ok */
623
624         return 0;
625 }
626
627 static int
628 qla_edif_app_chk_sa_update(scsi_qla_host_t *vha, fc_port_t *fcport,
629                 struct app_plogi_reply *appplogireply)
630 {
631         int     ret = 0;
632
633         if (!(fcport->edif.rx_sa_set && fcport->edif.tx_sa_set)) {
634                 ql_dbg(ql_dbg_edif, vha, 0x911e,
635                     "%s: wwpn %8phC Both SA indexes has not been SET TX %d, RX %d.\n",
636                     __func__, fcport->port_name, fcport->edif.tx_sa_set,
637                     fcport->edif.rx_sa_set);
638                 appplogireply->prli_status = 0;
639                 ret = 1;
640         } else  {
641                 ql_dbg(ql_dbg_edif, vha, 0x911e,
642                     "%s wwpn %8phC Both SA(s) updated.\n", __func__,
643                     fcport->port_name);
644                 fcport->edif.rx_sa_set = fcport->edif.tx_sa_set = 0;
645                 fcport->edif.rx_sa_pending = fcport->edif.tx_sa_pending = 0;
646                 appplogireply->prli_status = 1;
647         }
648         return ret;
649 }
650
651 /**
652  * qla_edif_app_authok - authentication by app succeeded.  Driver can proceed
653  *   with prli
654  * @vha: host adapter pointer
655  * @bsg_job: user request
656  */
657 static int
658 qla_edif_app_authok(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
659 {
660         int32_t                 rval = 0;
661         struct auth_complete_cmd appplogiok;
662         struct app_plogi_reply  appplogireply = {0};
663         struct fc_bsg_reply     *bsg_reply = bsg_job->reply;
664         fc_port_t               *fcport = NULL;
665         port_id_t               portid = {0};
666
667         sg_copy_to_buffer(bsg_job->request_payload.sg_list,
668             bsg_job->request_payload.sg_cnt, &appplogiok,
669             sizeof(struct auth_complete_cmd));
670
671         /* silent unaligned access warning */
672         portid.b.domain = appplogiok.u.d_id.b.domain;
673         portid.b.area   = appplogiok.u.d_id.b.area;
674         portid.b.al_pa  = appplogiok.u.d_id.b.al_pa;
675
676         switch (appplogiok.type) {
677         case PL_TYPE_WWPN:
678                 fcport = qla2x00_find_fcport_by_wwpn(vha,
679                     appplogiok.u.wwpn, 0);
680                 if (!fcport)
681                         ql_dbg(ql_dbg_edif, vha, 0x911d,
682                             "%s wwpn lookup failed: %8phC\n",
683                             __func__, appplogiok.u.wwpn);
684                 break;
685         case PL_TYPE_DID:
686                 fcport = qla2x00_find_fcport_by_pid(vha, &portid);
687                 if (!fcport)
688                         ql_dbg(ql_dbg_edif, vha, 0x911d,
689                             "%s d_id lookup failed: %x\n", __func__,
690                             portid.b24);
691                 break;
692         default:
693                 ql_dbg(ql_dbg_edif, vha, 0x911d,
694                     "%s undefined type: %x\n", __func__,
695                     appplogiok.type);
696                 break;
697         }
698
699         if (!fcport) {
700                 SET_DID_STATUS(bsg_reply->result, DID_ERROR);
701                 goto errstate_exit;
702         }
703
704         /*
705          * if port is online then this is a REKEY operation
706          * Only do sa update checking
707          */
708         if (atomic_read(&fcport->state) == FCS_ONLINE) {
709                 ql_dbg(ql_dbg_edif, vha, 0x911d,
710                     "%s Skipping PRLI complete based on rekey\n", __func__);
711                 appplogireply.prli_status = 1;
712                 SET_DID_STATUS(bsg_reply->result, DID_OK);
713                 qla_edif_app_chk_sa_update(vha, fcport, &appplogireply);
714                 goto errstate_exit;
715         }
716
717         /* make sure in AUTH_PENDING or else reject */
718         if (fcport->disc_state != DSC_LOGIN_AUTH_PEND) {
719                 ql_dbg(ql_dbg_edif, vha, 0x911e,
720                     "%s wwpn %8phC is not in auth pending state (%x)\n",
721                     __func__, fcport->port_name, fcport->disc_state);
722                 SET_DID_STATUS(bsg_reply->result, DID_OK);
723                 appplogireply.prli_status = 0;
724                 goto errstate_exit;
725         }
726
727         SET_DID_STATUS(bsg_reply->result, DID_OK);
728         appplogireply.prli_status = 1;
729         fcport->edif.authok = 1;
730         if (!(fcport->edif.rx_sa_set && fcport->edif.tx_sa_set)) {
731                 ql_dbg(ql_dbg_edif, vha, 0x911e,
732                     "%s: wwpn %8phC Both SA indexes has not been SET TX %d, RX %d.\n",
733                     __func__, fcport->port_name, fcport->edif.tx_sa_set,
734                     fcport->edif.rx_sa_set);
735                 SET_DID_STATUS(bsg_reply->result, DID_OK);
736                 appplogireply.prli_status = 0;
737                 goto errstate_exit;
738
739         } else {
740                 ql_dbg(ql_dbg_edif, vha, 0x911e,
741                     "%s wwpn %8phC Both SA(s) updated.\n", __func__,
742                     fcport->port_name);
743                 fcport->edif.rx_sa_set = fcport->edif.tx_sa_set = 0;
744                 fcport->edif.rx_sa_pending = fcport->edif.tx_sa_pending = 0;
745         }
746
747         if (qla_ini_mode_enabled(vha)) {
748                 ql_dbg(ql_dbg_edif, vha, 0x911e,
749                     "%s AUTH complete - RESUME with prli for wwpn %8phC\n",
750                     __func__, fcport->port_name);
751                 qla24xx_post_prli_work(vha, fcport);
752         }
753
754 errstate_exit:
755         bsg_job->reply_len = sizeof(struct fc_bsg_reply);
756         bsg_reply->reply_payload_rcv_len = sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
757                                                                bsg_job->reply_payload.sg_cnt,
758                                                                &appplogireply,
759                                                                sizeof(struct app_plogi_reply));
760
761         return rval;
762 }
763
764 /**
765  * qla_edif_app_authfail - authentication by app has failed.  Driver is given
766  *   notice to tear down current session.
767  * @vha: host adapter pointer
768  * @bsg_job: user request
769  */
770 static int
771 qla_edif_app_authfail(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
772 {
773         int32_t                 rval = 0;
774         struct auth_complete_cmd appplogifail;
775         struct fc_bsg_reply     *bsg_reply = bsg_job->reply;
776         fc_port_t               *fcport = NULL;
777         port_id_t               portid = {0};
778
779         ql_dbg(ql_dbg_edif, vha, 0x911d, "%s app auth fail\n", __func__);
780
781         sg_copy_to_buffer(bsg_job->request_payload.sg_list,
782             bsg_job->request_payload.sg_cnt, &appplogifail,
783             sizeof(struct auth_complete_cmd));
784
785         /* silent unaligned access warning */
786         portid.b.domain = appplogifail.u.d_id.b.domain;
787         portid.b.area   = appplogifail.u.d_id.b.area;
788         portid.b.al_pa  = appplogifail.u.d_id.b.al_pa;
789
790         /*
791          * TODO: edif: app has failed this plogi. Inform driver to
792          * take any action (if any).
793          */
794         switch (appplogifail.type) {
795         case PL_TYPE_WWPN:
796                 fcport = qla2x00_find_fcport_by_wwpn(vha,
797                     appplogifail.u.wwpn, 0);
798                 SET_DID_STATUS(bsg_reply->result, DID_OK);
799                 break;
800         case PL_TYPE_DID:
801                 fcport = qla2x00_find_fcport_by_pid(vha, &portid);
802                 if (!fcport)
803                         ql_dbg(ql_dbg_edif, vha, 0x911d,
804                             "%s d_id lookup failed: %x\n", __func__,
805                             portid.b24);
806                 SET_DID_STATUS(bsg_reply->result, DID_OK);
807                 break;
808         default:
809                 ql_dbg(ql_dbg_edif, vha, 0x911e,
810                     "%s undefined type: %x\n", __func__,
811                     appplogifail.type);
812                 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
813                 SET_DID_STATUS(bsg_reply->result, DID_ERROR);
814                 rval = -1;
815                 break;
816         }
817
818         ql_dbg(ql_dbg_edif, vha, 0x911d,
819             "%s fcport is 0x%p\n", __func__, fcport);
820
821         if (fcport) {
822                 /* set/reset edif values and flags */
823                 ql_dbg(ql_dbg_edif, vha, 0x911e,
824                     "%s reset the auth process - %8phC, loopid=%x portid=%06x.\n",
825                     __func__, fcport->port_name, fcport->loop_id, fcport->d_id.b24);
826
827                 if (qla_ini_mode_enabled(fcport->vha)) {
828                         fcport->send_els_logo = 1;
829                         qlt_schedule_sess_for_deletion(fcport);
830                 }
831         }
832
833         return rval;
834 }
835
836 /**
837  * qla_edif_app_getfcinfo - app would like to read session info (wwpn, nportid,
838  *   [initiator|target] mode.  It can specific session with specific nport id or
839  *   all sessions.
840  * @vha: host adapter pointer
841  * @bsg_job: user request pointer
842  */
843 static int
844 qla_edif_app_getfcinfo(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
845 {
846         int32_t                 rval = 0;
847         int32_t                 pcnt = 0;
848         struct fc_bsg_reply     *bsg_reply = bsg_job->reply;
849         struct app_pinfo_req    app_req;
850         struct app_pinfo_reply  *app_reply;
851         port_id_t               tdid;
852
853         ql_dbg(ql_dbg_edif, vha, 0x911d, "%s app get fcinfo\n", __func__);
854
855         sg_copy_to_buffer(bsg_job->request_payload.sg_list,
856             bsg_job->request_payload.sg_cnt, &app_req,
857             sizeof(struct app_pinfo_req));
858
859         app_reply = kzalloc((sizeof(struct app_pinfo_reply) +
860             sizeof(struct app_pinfo) * app_req.num_ports), GFP_KERNEL);
861
862         if (!app_reply) {
863                 SET_DID_STATUS(bsg_reply->result, DID_ERROR);
864                 rval = -1;
865         } else {
866                 struct fc_port  *fcport = NULL, *tf;
867
868                 list_for_each_entry_safe(fcport, tf, &vha->vp_fcports, list) {
869                         if (!(fcport->flags & FCF_FCSP_DEVICE))
870                                 continue;
871
872                         tdid = app_req.remote_pid;
873
874                         ql_dbg(ql_dbg_edif, vha, 0x2058,
875                             "APP request entry - portid=%06x.\n", tdid.b24);
876
877                         /* Ran out of space */
878                         if (pcnt >= app_req.num_ports)
879                                 break;
880
881                         if (tdid.b24 != 0 && tdid.b24 != fcport->d_id.b24)
882                                 continue;
883
884                         app_reply->ports[pcnt].rekey_count =
885                                 fcport->edif.rekey_cnt;
886
887                         app_reply->ports[pcnt].remote_type =
888                                 VND_CMD_RTYPE_UNKNOWN;
889                         if (fcport->port_type & (FCT_NVME_TARGET | FCT_TARGET))
890                                 app_reply->ports[pcnt].remote_type |=
891                                         VND_CMD_RTYPE_TARGET;
892                         if (fcport->port_type & (FCT_NVME_INITIATOR | FCT_INITIATOR))
893                                 app_reply->ports[pcnt].remote_type |=
894                                         VND_CMD_RTYPE_INITIATOR;
895
896                         app_reply->ports[pcnt].remote_pid = fcport->d_id;
897
898                         ql_dbg(ql_dbg_edif, vha, 0x2058,
899                             "Found FC_SP fcport - nn %8phN pn %8phN pcnt %d portid=%06x secure %d.\n",
900                             fcport->node_name, fcport->port_name, pcnt,
901                             fcport->d_id.b24, fcport->flags & FCF_FCSP_DEVICE);
902
903                         switch (fcport->edif.auth_state) {
904                         case VND_CMD_AUTH_STATE_ELS_RCVD:
905                                 if (fcport->disc_state == DSC_LOGIN_AUTH_PEND) {
906                                         fcport->edif.auth_state = VND_CMD_AUTH_STATE_NEEDED;
907                                         app_reply->ports[pcnt].auth_state =
908                                                 VND_CMD_AUTH_STATE_NEEDED;
909                                 } else {
910                                         app_reply->ports[pcnt].auth_state =
911                                                 VND_CMD_AUTH_STATE_ELS_RCVD;
912                                 }
913                                 break;
914                         default:
915                                 app_reply->ports[pcnt].auth_state = fcport->edif.auth_state;
916                                 break;
917                         }
918
919                         memcpy(app_reply->ports[pcnt].remote_wwpn,
920                             fcport->port_name, 8);
921
922                         app_reply->ports[pcnt].remote_state =
923                                 (atomic_read(&fcport->state) ==
924                                     FCS_ONLINE ? 1 : 0);
925
926                         pcnt++;
927
928                         if (tdid.b24 != 0)
929                                 break;
930                 }
931                 app_reply->port_count = pcnt;
932                 SET_DID_STATUS(bsg_reply->result, DID_OK);
933         }
934
935         bsg_job->reply_len = sizeof(struct fc_bsg_reply);
936         bsg_reply->reply_payload_rcv_len = sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
937                                                                bsg_job->reply_payload.sg_cnt,
938                                                                app_reply,
939                                                                sizeof(struct app_pinfo_reply) + sizeof(struct app_pinfo) * pcnt);
940
941         kfree(app_reply);
942
943         return rval;
944 }
945
946 /**
947  * qla_edif_app_getstats - app would like to read various statistics info
948  * @vha: host adapter pointer
949  * @bsg_job: user request
950  */
951 static int32_t
952 qla_edif_app_getstats(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
953 {
954         int32_t                 rval = 0;
955         struct fc_bsg_reply     *bsg_reply = bsg_job->reply;
956         uint32_t size;
957
958         struct app_sinfo_req    app_req;
959         struct app_stats_reply  *app_reply;
960         uint32_t pcnt = 0;
961
962         sg_copy_to_buffer(bsg_job->request_payload.sg_list,
963             bsg_job->request_payload.sg_cnt, &app_req,
964             sizeof(struct app_sinfo_req));
965         if (app_req.num_ports == 0) {
966                 ql_dbg(ql_dbg_async, vha, 0x911d,
967                    "%s app did not indicate number of ports to return\n",
968                     __func__);
969                 SET_DID_STATUS(bsg_reply->result, DID_ERROR);
970                 rval = -1;
971         }
972
973         size = sizeof(struct app_stats_reply) +
974             (sizeof(struct app_sinfo) * app_req.num_ports);
975
976         app_reply = kzalloc(size, GFP_KERNEL);
977         if (!app_reply) {
978                 SET_DID_STATUS(bsg_reply->result, DID_ERROR);
979                 rval = -1;
980         } else {
981                 struct fc_port  *fcport = NULL, *tf;
982
983                 list_for_each_entry_safe(fcport, tf, &vha->vp_fcports, list) {
984                         if (fcport->edif.enable) {
985                                 if (pcnt > app_req.num_ports)
986                                         break;
987
988                                 app_reply->elem[pcnt].rekey_count =
989                                     fcport->edif.rekey_cnt;
990                                 app_reply->elem[pcnt].tx_bytes =
991                                     fcport->edif.tx_bytes;
992                                 app_reply->elem[pcnt].rx_bytes =
993                                     fcport->edif.rx_bytes;
994
995                                 memcpy(app_reply->elem[pcnt].remote_wwpn,
996                                     fcport->port_name, 8);
997
998                                 pcnt++;
999                         }
1000                 }
1001                 app_reply->elem_count = pcnt;
1002                 SET_DID_STATUS(bsg_reply->result, DID_OK);
1003         }
1004
1005         bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1006         bsg_reply->reply_payload_rcv_len =
1007             sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
1008                bsg_job->reply_payload.sg_cnt, app_reply,
1009                sizeof(struct app_stats_reply) + (sizeof(struct app_sinfo) * pcnt));
1010
1011         kfree(app_reply);
1012
1013         return rval;
1014 }
1015
1016 int32_t
1017 qla_edif_app_mgmt(struct bsg_job *bsg_job)
1018 {
1019         struct fc_bsg_request   *bsg_request = bsg_job->request;
1020         struct fc_bsg_reply     *bsg_reply = bsg_job->reply;
1021         struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1022         scsi_qla_host_t         *vha = shost_priv(host);
1023         struct app_id           appcheck;
1024         bool done = true;
1025         int32_t         rval = 0;
1026         uint32_t        vnd_sc = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
1027
1028         ql_dbg(ql_dbg_edif, vha, 0x911d, "%s vnd subcmd=%x\n",
1029             __func__, vnd_sc);
1030
1031         sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1032             bsg_job->request_payload.sg_cnt, &appcheck,
1033             sizeof(struct app_id));
1034
1035         if (!vha->hw->flags.edif_enabled ||
1036                 test_bit(VPORT_DELETE, &vha->dpc_flags)) {
1037                 ql_dbg(ql_dbg_edif, vha, 0x911d,
1038                     "%s edif not enabled or vp delete. bsg ptr done %p. dpc_flags %lx\n",
1039                     __func__, bsg_job, vha->dpc_flags);
1040
1041                 SET_DID_STATUS(bsg_reply->result, DID_ERROR);
1042                 goto done;
1043         }
1044
1045         if (!qla_edif_app_check(vha, appcheck)) {
1046                 ql_dbg(ql_dbg_edif, vha, 0x911d,
1047                     "%s app checked failed.\n",
1048                     __func__);
1049
1050                 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1051                 SET_DID_STATUS(bsg_reply->result, DID_ERROR);
1052                 goto done;
1053         }
1054
1055         switch (vnd_sc) {
1056         case QL_VND_SC_SA_UPDATE:
1057                 done = false;
1058                 rval = qla24xx_sadb_update(bsg_job);
1059                 break;
1060         case QL_VND_SC_APP_START:
1061                 rval = qla_edif_app_start(vha, bsg_job);
1062                 break;
1063         case QL_VND_SC_APP_STOP:
1064                 rval = qla_edif_app_stop(vha, bsg_job);
1065                 break;
1066         case QL_VND_SC_AUTH_OK:
1067                 rval = qla_edif_app_authok(vha, bsg_job);
1068                 break;
1069         case QL_VND_SC_AUTH_FAIL:
1070                 rval = qla_edif_app_authfail(vha, bsg_job);
1071                 break;
1072         case QL_VND_SC_GET_FCINFO:
1073                 rval = qla_edif_app_getfcinfo(vha, bsg_job);
1074                 break;
1075         case QL_VND_SC_GET_STATS:
1076                 rval = qla_edif_app_getstats(vha, bsg_job);
1077                 break;
1078         default:
1079                 ql_dbg(ql_dbg_edif, vha, 0x911d, "%s unknown cmd=%x\n",
1080                     __func__,
1081                     bsg_request->rqst_data.h_vendor.vendor_cmd[1]);
1082                 rval = EXT_STATUS_INVALID_PARAM;
1083                 done = false;
1084                 break;
1085         }
1086
1087 done:
1088         if (done) {
1089                 ql_dbg(ql_dbg_user, vha, 0x7009,
1090                     "%s: %d  bsg ptr done %p\n", __func__, __LINE__, bsg_job);
1091                 bsg_job_done(bsg_job, bsg_reply->result,
1092                     bsg_reply->reply_payload_rcv_len);
1093         }
1094
1095         return rval;
1096 }
1097
1098 static struct edif_sa_ctl *
1099 qla_edif_add_sa_ctl(fc_port_t *fcport, struct qla_sa_update_frame *sa_frame,
1100         int dir)
1101 {
1102         struct  edif_sa_ctl *sa_ctl;
1103         struct qla_sa_update_frame *sap;
1104         int     index = sa_frame->fast_sa_index;
1105         unsigned long flags = 0;
1106
1107         sa_ctl = kzalloc(sizeof(*sa_ctl), GFP_KERNEL);
1108         if (!sa_ctl) {
1109                 /* couldn't get space */
1110                 ql_dbg(ql_dbg_edif, fcport->vha, 0x9100,
1111                     "unable to allocate SA CTL\n");
1112                 return NULL;
1113         }
1114
1115         /*
1116          * need to allocate sa_index here and save it
1117          * in both sa_ctl->index and sa_frame->fast_sa_index;
1118          * If alloc fails then delete sa_ctl and return NULL
1119          */
1120         INIT_LIST_HEAD(&sa_ctl->next);
1121         sap = &sa_ctl->sa_frame;
1122         *sap = *sa_frame;
1123         sa_ctl->index = index;
1124         sa_ctl->fcport = fcport;
1125         sa_ctl->flags = 0;
1126         sa_ctl->state = 0L;
1127         ql_dbg(ql_dbg_edif, fcport->vha, 0x9100,
1128             "%s: Added sa_ctl %p, index %d, state 0x%lx\n",
1129             __func__, sa_ctl, sa_ctl->index, sa_ctl->state);
1130         spin_lock_irqsave(&fcport->edif.sa_list_lock, flags);
1131         if (dir == SAU_FLG_TX)
1132                 list_add_tail(&sa_ctl->next, &fcport->edif.tx_sa_list);
1133         else
1134                 list_add_tail(&sa_ctl->next, &fcport->edif.rx_sa_list);
1135         spin_unlock_irqrestore(&fcport->edif.sa_list_lock, flags);
1136
1137         return sa_ctl;
1138 }
1139
1140 void
1141 qla_edif_flush_sa_ctl_lists(fc_port_t *fcport)
1142 {
1143         struct edif_sa_ctl *sa_ctl, *tsa_ctl;
1144         unsigned long flags = 0;
1145
1146         spin_lock_irqsave(&fcport->edif.sa_list_lock, flags);
1147
1148         list_for_each_entry_safe(sa_ctl, tsa_ctl, &fcport->edif.tx_sa_list,
1149             next) {
1150                 list_del(&sa_ctl->next);
1151                 kfree(sa_ctl);
1152         }
1153
1154         list_for_each_entry_safe(sa_ctl, tsa_ctl, &fcport->edif.rx_sa_list,
1155             next) {
1156                 list_del(&sa_ctl->next);
1157                 kfree(sa_ctl);
1158         }
1159
1160         spin_unlock_irqrestore(&fcport->edif.sa_list_lock, flags);
1161 }
1162
1163 struct edif_sa_ctl *
1164 qla_edif_find_sa_ctl_by_index(fc_port_t *fcport, int index, int dir)
1165 {
1166         struct edif_sa_ctl *sa_ctl, *tsa_ctl;
1167         struct list_head *sa_list;
1168
1169         if (dir == SAU_FLG_TX)
1170                 sa_list = &fcport->edif.tx_sa_list;
1171         else
1172                 sa_list = &fcport->edif.rx_sa_list;
1173
1174         list_for_each_entry_safe(sa_ctl, tsa_ctl, sa_list, next) {
1175                 if (test_bit(EDIF_SA_CTL_USED, &sa_ctl->state) &&
1176                     sa_ctl->index == index)
1177                         return sa_ctl;
1178         }
1179         return NULL;
1180 }
1181
1182 /* add the sa to the correct list */
1183 static int
1184 qla24xx_check_sadb_avail_slot(struct bsg_job *bsg_job, fc_port_t *fcport,
1185         struct qla_sa_update_frame *sa_frame)
1186 {
1187         struct edif_sa_ctl *sa_ctl = NULL;
1188         int dir;
1189         uint16_t sa_index;
1190
1191         dir = (sa_frame->flags & SAU_FLG_TX);
1192
1193         /* map the spi to an sa_index */
1194         sa_index = qla_edif_sadb_get_sa_index(fcport, sa_frame);
1195         if (sa_index == RX_DELETE_NO_EDIF_SA_INDEX) {
1196                 /* process rx delete */
1197                 ql_dbg(ql_dbg_edif, fcport->vha, 0x3063,
1198                     "%s: rx delete for lid 0x%x, spi 0x%x, no entry found\n",
1199                     __func__, fcport->loop_id, sa_frame->spi);
1200
1201                 /* build and send the aen */
1202                 fcport->edif.rx_sa_set = 1;
1203                 fcport->edif.rx_sa_pending = 0;
1204                 qla_edb_eventcreate(fcport->vha,
1205                     VND_CMD_AUTH_STATE_SAUPDATE_COMPL,
1206                     QL_VND_SA_STAT_SUCCESS,
1207                     QL_VND_RX_SA_KEY, fcport);
1208
1209                 /* force a return of good bsg status; */
1210                 return RX_DELETE_NO_EDIF_SA_INDEX;
1211         } else if (sa_index == INVALID_EDIF_SA_INDEX) {
1212                 ql_dbg(ql_dbg_edif, fcport->vha, 0x9100,
1213                     "%s: Failed to get sa_index for spi 0x%x, dir: %d\n",
1214                     __func__, sa_frame->spi, dir);
1215                 return INVALID_EDIF_SA_INDEX;
1216         }
1217
1218         ql_dbg(ql_dbg_edif, fcport->vha, 0x9100,
1219             "%s: index %d allocated to spi 0x%x, dir: %d, nport_handle: 0x%x\n",
1220             __func__, sa_index, sa_frame->spi, dir, fcport->loop_id);
1221
1222         /* This is a local copy of sa_frame. */
1223         sa_frame->fast_sa_index = sa_index;
1224         /* create the sa_ctl */
1225         sa_ctl = qla_edif_add_sa_ctl(fcport, sa_frame, dir);
1226         if (!sa_ctl) {
1227                 ql_dbg(ql_dbg_edif, fcport->vha, 0x9100,
1228                     "%s: Failed to add sa_ctl for spi 0x%x, dir: %d, sa_index: %d\n",
1229                     __func__, sa_frame->spi, dir, sa_index);
1230                 return -1;
1231         }
1232
1233         set_bit(EDIF_SA_CTL_USED, &sa_ctl->state);
1234
1235         if (dir == SAU_FLG_TX)
1236                 fcport->edif.tx_rekey_cnt++;
1237         else
1238                 fcport->edif.rx_rekey_cnt++;
1239
1240         ql_dbg(ql_dbg_edif, fcport->vha, 0x9100,
1241             "%s: Found sa_ctl %p, index %d, state 0x%lx, tx_cnt %d, rx_cnt %d, nport_handle: 0x%x\n",
1242             __func__, sa_ctl, sa_ctl->index, sa_ctl->state,
1243             fcport->edif.tx_rekey_cnt,
1244             fcport->edif.rx_rekey_cnt, fcport->loop_id);
1245
1246         return 0;
1247 }
1248
1249 #define QLA_SA_UPDATE_FLAGS_RX_KEY      0x0
1250 #define QLA_SA_UPDATE_FLAGS_TX_KEY      0x2
1251
1252 int
1253 qla24xx_sadb_update(struct bsg_job *bsg_job)
1254 {
1255         struct  fc_bsg_reply    *bsg_reply = bsg_job->reply;
1256         struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1257         scsi_qla_host_t *vha = shost_priv(host);
1258         fc_port_t               *fcport = NULL;
1259         srb_t                   *sp = NULL;
1260         struct edif_list_entry *edif_entry = NULL;
1261         int                     found = 0;
1262         int                     rval = 0;
1263         int result = 0;
1264         struct qla_sa_update_frame sa_frame;
1265         struct srb_iocb *iocb_cmd;
1266         port_id_t portid;
1267
1268         ql_dbg(ql_dbg_edif + ql_dbg_verbose, vha, 0x911d,
1269             "%s entered, vha: 0x%p\n", __func__, vha);
1270
1271         sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1272             bsg_job->request_payload.sg_cnt, &sa_frame,
1273             sizeof(struct qla_sa_update_frame));
1274
1275         /* Check if host is online */
1276         if (!vha->flags.online) {
1277                 ql_log(ql_log_warn, vha, 0x70a1, "Host is not online\n");
1278                 rval = -EIO;
1279                 SET_DID_STATUS(bsg_reply->result, DID_ERROR);
1280                 goto done;
1281         }
1282
1283         if (vha->e_dbell.db_flags != EDB_ACTIVE) {
1284                 ql_log(ql_log_warn, vha, 0x70a1, "App not started\n");
1285                 rval = -EIO;
1286                 SET_DID_STATUS(bsg_reply->result, DID_ERROR);
1287                 goto done;
1288         }
1289
1290         /* silent unaligned access warning */
1291         portid.b.domain = sa_frame.port_id.b.domain;
1292         portid.b.area   = sa_frame.port_id.b.area;
1293         portid.b.al_pa  = sa_frame.port_id.b.al_pa;
1294
1295         fcport = qla2x00_find_fcport_by_pid(vha, &portid);
1296         if (fcport) {
1297                 found = 1;
1298                 if (sa_frame.flags == QLA_SA_UPDATE_FLAGS_TX_KEY)
1299                         fcport->edif.tx_bytes = 0;
1300                 if (sa_frame.flags == QLA_SA_UPDATE_FLAGS_RX_KEY)
1301                         fcport->edif.rx_bytes = 0;
1302         }
1303
1304         if (!found) {
1305                 ql_dbg(ql_dbg_edif, vha, 0x70a3, "Failed to find port= %06x\n",
1306                     sa_frame.port_id.b24);
1307                 rval = -EINVAL;
1308                 SET_DID_STATUS(bsg_reply->result, DID_TARGET_FAILURE);
1309                 goto done;
1310         }
1311
1312         /* make sure the nport_handle is valid */
1313         if (fcport->loop_id == FC_NO_LOOP_ID) {
1314                 ql_dbg(ql_dbg_edif, vha, 0x70e1,
1315                     "%s: %8phN lid=FC_NO_LOOP_ID, spi: 0x%x, DS %d, returning NO_CONNECT\n",
1316                     __func__, fcport->port_name, sa_frame.spi,
1317                     fcport->disc_state);
1318                 rval = -EINVAL;
1319                 SET_DID_STATUS(bsg_reply->result, DID_NO_CONNECT);
1320                 goto done;
1321         }
1322
1323         /* allocate and queue an sa_ctl */
1324         result = qla24xx_check_sadb_avail_slot(bsg_job, fcport, &sa_frame);
1325
1326         /* failure of bsg */
1327         if (result == INVALID_EDIF_SA_INDEX) {
1328                 ql_dbg(ql_dbg_edif, vha, 0x70e1,
1329                     "%s: %8phN, skipping update.\n",
1330                     __func__, fcport->port_name);
1331                 rval = -EINVAL;
1332                 SET_DID_STATUS(bsg_reply->result, DID_ERROR);
1333                 goto done;
1334
1335         /* rx delete failure */
1336         } else if (result == RX_DELETE_NO_EDIF_SA_INDEX) {
1337                 ql_dbg(ql_dbg_edif, vha, 0x70e1,
1338                     "%s: %8phN, skipping rx delete.\n",
1339                     __func__, fcport->port_name);
1340                 SET_DID_STATUS(bsg_reply->result, DID_OK);
1341                 goto done;
1342         }
1343
1344         ql_dbg(ql_dbg_edif, vha, 0x70e1,
1345             "%s: %8phN, sa_index in sa_frame: %d flags %xh\n",
1346             __func__, fcport->port_name, sa_frame.fast_sa_index,
1347             sa_frame.flags);
1348
1349         /* looking for rx index and delete */
1350         if (((sa_frame.flags & SAU_FLG_TX) == 0) &&
1351             (sa_frame.flags & SAU_FLG_INV)) {
1352                 uint16_t nport_handle = fcport->loop_id;
1353                 uint16_t sa_index = sa_frame.fast_sa_index;
1354
1355                 /*
1356                  * make sure we have an existing rx key, otherwise just process
1357                  * this as a straight delete just like TX
1358                  * This is NOT a normal case, it indicates an error recovery or key cleanup
1359                  * by the ipsec code above us.
1360                  */
1361                 edif_entry = qla_edif_list_find_sa_index(fcport, fcport->loop_id);
1362                 if (!edif_entry) {
1363                         ql_dbg(ql_dbg_edif, vha, 0x911d,
1364                             "%s: WARNING: no active sa_index for nport_handle 0x%x, forcing delete for sa_index 0x%x\n",
1365                             __func__, fcport->loop_id, sa_index);
1366                         goto force_rx_delete;
1367                 }
1368
1369                 /*
1370                  * if we have a forced delete for rx, remove the sa_index from the edif list
1371                  * and proceed with normal delete.  The rx delay timer should not be running
1372                  */
1373                 if ((sa_frame.flags & SAU_FLG_FORCE_DELETE) == SAU_FLG_FORCE_DELETE) {
1374                         qla_edif_list_delete_sa_index(fcport, edif_entry);
1375                         ql_dbg(ql_dbg_edif, vha, 0x911d,
1376                             "%s: FORCE DELETE flag found for nport_handle 0x%x, sa_index 0x%x, forcing DELETE\n",
1377                             __func__, fcport->loop_id, sa_index);
1378                         kfree(edif_entry);
1379                         goto force_rx_delete;
1380                 }
1381
1382                 /*
1383                  * delayed rx delete
1384                  *
1385                  * if delete_sa_index is not invalid then there is already
1386                  * a delayed index in progress, return bsg bad status
1387                  */
1388                 if (edif_entry->delete_sa_index != INVALID_EDIF_SA_INDEX) {
1389                         struct edif_sa_ctl *sa_ctl;
1390
1391                         ql_dbg(ql_dbg_edif, vha, 0x911d,
1392                             "%s: delete for lid 0x%x, delete_sa_index %d is pending\n",
1393                             __func__, edif_entry->handle, edif_entry->delete_sa_index);
1394
1395                         /* free up the sa_ctl that was allocated with the sa_index */
1396                         sa_ctl = qla_edif_find_sa_ctl_by_index(fcport, sa_index,
1397                             (sa_frame.flags & SAU_FLG_TX));
1398                         if (sa_ctl) {
1399                                 ql_dbg(ql_dbg_edif, vha, 0x3063,
1400                                     "%s: freeing sa_ctl for index %d\n",
1401                                     __func__, sa_ctl->index);
1402                                 qla_edif_free_sa_ctl(fcport, sa_ctl, sa_ctl->index);
1403                         }
1404
1405                         /* release the sa_index */
1406                         ql_dbg(ql_dbg_edif, vha, 0x3063,
1407                             "%s: freeing sa_index %d, nph: 0x%x\n",
1408                             __func__, sa_index, nport_handle);
1409                         qla_edif_sadb_delete_sa_index(fcport, nport_handle, sa_index);
1410
1411                         rval = -EINVAL;
1412                         SET_DID_STATUS(bsg_reply->result, DID_ERROR);
1413                         goto done;
1414                 }
1415
1416                 fcport->edif.rekey_cnt++;
1417
1418                 /* configure and start the rx delay timer */
1419                 edif_entry->fcport = fcport;
1420                 edif_entry->timer.expires = jiffies + RX_DELAY_DELETE_TIMEOUT * HZ;
1421
1422                 ql_dbg(ql_dbg_edif, vha, 0x911d,
1423                     "%s: adding timer, entry: %p, delete sa_index %d, lid 0x%x to edif_list\n",
1424                     __func__, edif_entry, sa_index, nport_handle);
1425
1426                 /*
1427                  * Start the timer when we queue the delayed rx delete.
1428                  * This is an activity timer that goes off if we have not
1429                  * received packets with the new sa_index
1430                  */
1431                 add_timer(&edif_entry->timer);
1432
1433                 /*
1434                  * sa_delete for rx key with an active rx key including this one
1435                  * add the delete rx sa index to the hash so we can look for it
1436                  * in the rsp queue.  Do this after making any changes to the
1437                  * edif_entry as part of the rx delete.
1438                  */
1439
1440                 ql_dbg(ql_dbg_edif, vha, 0x911d,
1441                     "%s: delete sa_index %d, lid 0x%x to edif_list. bsg done ptr %p\n",
1442                     __func__, sa_index, nport_handle, bsg_job);
1443
1444                 edif_entry->delete_sa_index = sa_index;
1445
1446                 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1447                 bsg_reply->result = DID_OK << 16;
1448
1449                 goto done;
1450
1451         /*
1452          * rx index and update
1453          * add the index to the list and continue with normal update
1454          */
1455         } else if (((sa_frame.flags & SAU_FLG_TX) == 0) &&
1456             ((sa_frame.flags & SAU_FLG_INV) == 0)) {
1457                 /* sa_update for rx key */
1458                 uint32_t nport_handle = fcport->loop_id;
1459                 uint16_t sa_index = sa_frame.fast_sa_index;
1460                 int result;
1461
1462                 /*
1463                  * add the update rx sa index to the hash so we can look for it
1464                  * in the rsp queue and continue normally
1465                  */
1466
1467                 ql_dbg(ql_dbg_edif, vha, 0x911d,
1468                     "%s:  adding update sa_index %d, lid 0x%x to edif_list\n",
1469                     __func__, sa_index, nport_handle);
1470
1471                 result = qla_edif_list_add_sa_update_index(fcport, sa_index,
1472                     nport_handle);
1473                 if (result) {
1474                         ql_dbg(ql_dbg_edif, vha, 0x911d,
1475                             "%s: SA_UPDATE failed to add new sa index %d to list for lid 0x%x\n",
1476                             __func__, sa_index, nport_handle);
1477                 }
1478         }
1479         if (sa_frame.flags & SAU_FLG_GMAC_MODE)
1480                 fcport->edif.aes_gmac = 1;
1481         else
1482                 fcport->edif.aes_gmac = 0;
1483
1484 force_rx_delete:
1485         /*
1486          * sa_update for both rx and tx keys, sa_delete for tx key
1487          * immediately process the request
1488          */
1489         sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
1490         if (!sp) {
1491                 rval = -ENOMEM;
1492                 SET_DID_STATUS(bsg_reply->result, DID_IMM_RETRY);
1493                 goto done;
1494         }
1495
1496         sp->type = SRB_SA_UPDATE;
1497         sp->name = "bsg_sa_update";
1498         sp->u.bsg_job = bsg_job;
1499         /* sp->free = qla2x00_bsg_sp_free; */
1500         sp->free = qla2x00_rel_sp;
1501         sp->done = qla2x00_bsg_job_done;
1502         iocb_cmd = &sp->u.iocb_cmd;
1503         iocb_cmd->u.sa_update.sa_frame  = sa_frame;
1504
1505         rval = qla2x00_start_sp(sp);
1506         if (rval != QLA_SUCCESS) {
1507                 ql_log(ql_dbg_edif, vha, 0x70e3,
1508                     "qla2x00_start_sp failed=%d.\n", rval);
1509
1510                 qla2x00_rel_sp(sp);
1511                 rval = -EIO;
1512                 SET_DID_STATUS(bsg_reply->result, DID_IMM_RETRY);
1513                 goto done;
1514         }
1515
1516         ql_dbg(ql_dbg_edif, vha, 0x911d,
1517             "%s:  %s sent, hdl=%x, portid=%06x.\n",
1518             __func__, sp->name, sp->handle, fcport->d_id.b24);
1519
1520         fcport->edif.rekey_cnt++;
1521         bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1522         SET_DID_STATUS(bsg_reply->result, DID_OK);
1523
1524         return 0;
1525
1526 /*
1527  * send back error status
1528  */
1529 done:
1530         bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1531         ql_dbg(ql_dbg_edif, vha, 0x911d,
1532             "%s:status: FAIL, result: 0x%x, bsg ptr done %p\n",
1533             __func__, bsg_reply->result, bsg_job);
1534         bsg_job_done(bsg_job, bsg_reply->result,
1535             bsg_reply->reply_payload_rcv_len);
1536
1537         return 0;
1538 }
1539
1540 static void
1541 qla_enode_free(scsi_qla_host_t *vha, struct enode *node)
1542 {
1543         node->ntype = N_UNDEF;
1544         kfree(node);
1545 }
1546
1547 /**
1548  * qla_enode_init - initialize enode structs & lock
1549  * @vha: host adapter pointer
1550  *
1551  * should only be called when driver attaching
1552  */
1553 void
1554 qla_enode_init(scsi_qla_host_t *vha)
1555 {
1556         struct  qla_hw_data *ha = vha->hw;
1557         char    name[32];
1558
1559         if (vha->pur_cinfo.enode_flags == ENODE_ACTIVE) {
1560                 /* list still active - error */
1561                 ql_dbg(ql_dbg_edif, vha, 0x09102, "%s enode still active\n",
1562                     __func__);
1563                 return;
1564         }
1565
1566         /* initialize lock which protects pur_core & init list */
1567         spin_lock_init(&vha->pur_cinfo.pur_lock);
1568         INIT_LIST_HEAD(&vha->pur_cinfo.head);
1569
1570         snprintf(name, sizeof(name), "%s_%d_purex", QLA2XXX_DRIVER_NAME,
1571             ha->pdev->device);
1572 }
1573
1574 /**
1575  * qla_enode_stop - stop and clear and enode data
1576  * @vha: host adapter pointer
1577  *
1578  * called when app notified it is exiting
1579  */
1580 void
1581 qla_enode_stop(scsi_qla_host_t *vha)
1582 {
1583         unsigned long flags;
1584         struct enode *node, *q;
1585
1586         if (vha->pur_cinfo.enode_flags != ENODE_ACTIVE) {
1587                 /* doorbell list not enabled */
1588                 ql_dbg(ql_dbg_edif, vha, 0x09102,
1589                     "%s enode not active\n", __func__);
1590                 return;
1591         }
1592
1593         /* grab lock so list doesn't move */
1594         spin_lock_irqsave(&vha->pur_cinfo.pur_lock, flags);
1595
1596         vha->pur_cinfo.enode_flags &= ~ENODE_ACTIVE; /* mark it not active */
1597
1598         /* hopefully this is a null list at this point */
1599         list_for_each_entry_safe(node, q, &vha->pur_cinfo.head, list) {
1600                 ql_dbg(ql_dbg_edif, vha, 0x910f,
1601                     "%s freeing enode type=%x, cnt=%x\n", __func__, node->ntype,
1602                     node->dinfo.nodecnt);
1603                 list_del_init(&node->list);
1604                 qla_enode_free(vha, node);
1605         }
1606         spin_unlock_irqrestore(&vha->pur_cinfo.pur_lock, flags);
1607 }
1608
1609 static void qla_enode_clear(scsi_qla_host_t *vha, port_id_t portid)
1610 {
1611         unsigned    long flags;
1612         struct enode    *e, *tmp;
1613         struct purexevent   *purex;
1614         LIST_HEAD(enode_list);
1615
1616         if (vha->pur_cinfo.enode_flags != ENODE_ACTIVE) {
1617                 ql_dbg(ql_dbg_edif, vha, 0x09102,
1618                        "%s enode not active\n", __func__);
1619                 return;
1620         }
1621         spin_lock_irqsave(&vha->pur_cinfo.pur_lock, flags);
1622         list_for_each_entry_safe(e, tmp, &vha->pur_cinfo.head, list) {
1623                 purex = &e->u.purexinfo;
1624                 if (purex->pur_info.pur_sid.b24 == portid.b24) {
1625                         ql_dbg(ql_dbg_edif, vha, 0x911d,
1626                             "%s free ELS sid=%06x. xchg %x, nb=%xh\n",
1627                             __func__, portid.b24,
1628                             purex->pur_info.pur_rx_xchg_address,
1629                             purex->pur_info.pur_bytes_rcvd);
1630
1631                         list_del_init(&e->list);
1632                         list_add_tail(&e->list, &enode_list);
1633                 }
1634         }
1635         spin_unlock_irqrestore(&vha->pur_cinfo.pur_lock, flags);
1636
1637         list_for_each_entry_safe(e, tmp, &enode_list, list) {
1638                 list_del_init(&e->list);
1639                 qla_enode_free(vha, e);
1640         }
1641 }
1642
1643 /*
1644  *  allocate enode struct and populate buffer
1645  *  returns: enode pointer with buffers
1646  *           NULL on error
1647  */
1648 static struct enode *
1649 qla_enode_alloc(scsi_qla_host_t *vha, uint32_t ntype)
1650 {
1651         struct enode            *node;
1652         struct purexevent       *purex;
1653
1654         node = kzalloc(RX_ELS_SIZE, GFP_ATOMIC);
1655         if (!node)
1656                 return NULL;
1657
1658         purex = &node->u.purexinfo;
1659         purex->msgp = (u8 *)(node + 1);
1660         purex->msgp_len = ELS_MAX_PAYLOAD;
1661
1662         node->ntype = ntype;
1663         INIT_LIST_HEAD(&node->list);
1664         return node;
1665 }
1666
1667 static void
1668 qla_enode_add(scsi_qla_host_t *vha, struct enode *ptr)
1669 {
1670         unsigned long flags;
1671
1672         ql_dbg(ql_dbg_edif + ql_dbg_verbose, vha, 0x9109,
1673             "%s add enode for type=%x, cnt=%x\n",
1674             __func__, ptr->ntype, ptr->dinfo.nodecnt);
1675
1676         spin_lock_irqsave(&vha->pur_cinfo.pur_lock, flags);
1677         list_add_tail(&ptr->list, &vha->pur_cinfo.head);
1678         spin_unlock_irqrestore(&vha->pur_cinfo.pur_lock, flags);
1679
1680         return;
1681 }
1682
1683 static struct enode *
1684 qla_enode_find(scsi_qla_host_t *vha, uint32_t ntype, uint32_t p1, uint32_t p2)
1685 {
1686         struct enode            *node_rtn = NULL;
1687         struct enode            *list_node, *q;
1688         unsigned long           flags;
1689         uint32_t                sid;
1690         struct purexevent       *purex;
1691
1692         /* secure the list from moving under us */
1693         spin_lock_irqsave(&vha->pur_cinfo.pur_lock, flags);
1694
1695         list_for_each_entry_safe(list_node, q, &vha->pur_cinfo.head, list) {
1696
1697                 /* node type determines what p1 and p2 are */
1698                 purex = &list_node->u.purexinfo;
1699                 sid = p1;
1700
1701                 if (purex->pur_info.pur_sid.b24 == sid) {
1702                         /* found it and its complete */
1703                         node_rtn = list_node;
1704                         list_del(&list_node->list);
1705                         break;
1706                 }
1707         }
1708
1709         spin_unlock_irqrestore(&vha->pur_cinfo.pur_lock, flags);
1710
1711         return node_rtn;
1712 }
1713
1714 /**
1715  * qla_pur_get_pending - read/return authentication message sent
1716  *  from remote port
1717  * @vha: host adapter pointer
1718  * @fcport: session pointer
1719  * @bsg_job: user request where the message is copy to.
1720  */
1721 static int
1722 qla_pur_get_pending(scsi_qla_host_t *vha, fc_port_t *fcport,
1723         struct bsg_job *bsg_job)
1724 {
1725         struct enode            *ptr;
1726         struct purexevent       *purex;
1727         struct qla_bsg_auth_els_reply *rpl =
1728             (struct qla_bsg_auth_els_reply *)bsg_job->reply;
1729
1730         bsg_job->reply_len = sizeof(*rpl);
1731
1732         ptr = qla_enode_find(vha, N_PUREX, fcport->d_id.b24, PUR_GET);
1733         if (!ptr) {
1734                 ql_dbg(ql_dbg_edif, vha, 0x9111,
1735                     "%s no enode data found for %8phN sid=%06x\n",
1736                     __func__, fcport->port_name, fcport->d_id.b24);
1737                 SET_DID_STATUS(rpl->r.result, DID_IMM_RETRY);
1738                 return -EIO;
1739         }
1740
1741         /*
1742          * enode is now off the linked list and is ours to deal with
1743          */
1744         purex = &ptr->u.purexinfo;
1745
1746         /* Copy info back to caller */
1747         rpl->rx_xchg_address = purex->pur_info.pur_rx_xchg_address;
1748
1749         SET_DID_STATUS(rpl->r.result, DID_OK);
1750         rpl->r.reply_payload_rcv_len =
1751             sg_pcopy_from_buffer(bsg_job->reply_payload.sg_list,
1752                 bsg_job->reply_payload.sg_cnt, purex->msgp,
1753                 purex->pur_info.pur_bytes_rcvd, 0);
1754
1755         /* data copy / passback completed - destroy enode */
1756         qla_enode_free(vha, ptr);
1757
1758         return 0;
1759 }
1760
1761 /* it is assume qpair lock is held */
1762 static int
1763 qla_els_reject_iocb(scsi_qla_host_t *vha, struct qla_qpair *qp,
1764         struct qla_els_pt_arg *a)
1765 {
1766         struct els_entry_24xx *els_iocb;
1767
1768         els_iocb = __qla2x00_alloc_iocbs(qp, NULL);
1769         if (!els_iocb) {
1770                 ql_log(ql_log_warn, vha, 0x700c,
1771                     "qla2x00_alloc_iocbs failed.\n");
1772                 return QLA_FUNCTION_FAILED;
1773         }
1774
1775         qla_els_pt_iocb(vha, els_iocb, a);
1776
1777         ql_dbg(ql_dbg_edif, vha, 0x0183,
1778             "Sending ELS reject...\n");
1779         ql_dump_buffer(ql_dbg_edif + ql_dbg_verbose, vha, 0x0185,
1780             vha->hw->elsrej.c, sizeof(*vha->hw->elsrej.c));
1781         /* flush iocb to mem before notifying hw doorbell */
1782         wmb();
1783         qla2x00_start_iocbs(vha, qp->req);
1784         return 0;
1785 }
1786
1787 void
1788 qla_edb_init(scsi_qla_host_t *vha)
1789 {
1790         if (vha->e_dbell.db_flags == EDB_ACTIVE) {
1791                 /* list already init'd - error */
1792                 ql_dbg(ql_dbg_edif, vha, 0x09102,
1793                     "edif db already initialized, cannot reinit\n");
1794                 return;
1795         }
1796
1797         /* initialize lock which protects doorbell & init list */
1798         spin_lock_init(&vha->e_dbell.db_lock);
1799         INIT_LIST_HEAD(&vha->e_dbell.head);
1800
1801         /* create and initialize doorbell */
1802         init_completion(&vha->e_dbell.dbell);
1803 }
1804
1805 static void
1806 qla_edb_node_free(scsi_qla_host_t *vha, struct edb_node *node)
1807 {
1808         /*
1809          * releases the space held by this edb node entry
1810          * this function does _not_ free the edb node itself
1811          * NB: the edb node entry passed should not be on any list
1812          *
1813          * currently for doorbell there's no additional cleanup
1814          * needed, but here as a placeholder for furture use.
1815          */
1816
1817         if (!node) {
1818                 ql_dbg(ql_dbg_edif, vha, 0x09122,
1819                     "%s error - no valid node passed\n", __func__);
1820                 return;
1821         }
1822
1823         node->ntype = N_UNDEF;
1824 }
1825
1826 static void qla_edb_clear(scsi_qla_host_t *vha, port_id_t portid)
1827 {
1828         unsigned long flags;
1829         struct edb_node *e, *tmp;
1830         port_id_t sid;
1831         LIST_HEAD(edb_list);
1832
1833         if (vha->e_dbell.db_flags != EDB_ACTIVE) {
1834                 /* doorbell list not enabled */
1835                 ql_dbg(ql_dbg_edif, vha, 0x09102,
1836                        "%s doorbell not enabled\n", __func__);
1837                 return;
1838         }
1839
1840         /* grab lock so list doesn't move */
1841         spin_lock_irqsave(&vha->e_dbell.db_lock, flags);
1842         list_for_each_entry_safe(e, tmp, &vha->e_dbell.head, list) {
1843                 switch (e->ntype) {
1844                 case VND_CMD_AUTH_STATE_NEEDED:
1845                 case VND_CMD_AUTH_STATE_SESSION_SHUTDOWN:
1846                         sid = e->u.plogi_did;
1847                         break;
1848                 case VND_CMD_AUTH_STATE_ELS_RCVD:
1849                         sid = e->u.els_sid;
1850                         break;
1851                 case VND_CMD_AUTH_STATE_SAUPDATE_COMPL:
1852                         /* app wants to see this  */
1853                         continue;
1854                 default:
1855                         ql_log(ql_log_warn, vha, 0x09102,
1856                                "%s unknown node type: %x\n", __func__, e->ntype);
1857                         sid.b24 = 0;
1858                         break;
1859                 }
1860                 if (sid.b24 == portid.b24) {
1861                         ql_dbg(ql_dbg_edif, vha, 0x910f,
1862                                "%s free doorbell event : node type = %x %p\n",
1863                                __func__, e->ntype, e);
1864                         list_del_init(&e->list);
1865                         list_add_tail(&e->list, &edb_list);
1866                 }
1867         }
1868         spin_unlock_irqrestore(&vha->e_dbell.db_lock, flags);
1869
1870         list_for_each_entry_safe(e, tmp, &edb_list, list) {
1871                 qla_edb_node_free(vha, e);
1872                 list_del_init(&e->list);
1873                 kfree(e);
1874         }
1875 }
1876
1877 /* function called when app is stopping */
1878
1879 void
1880 qla_edb_stop(scsi_qla_host_t *vha)
1881 {
1882         unsigned long flags;
1883         struct edb_node *node, *q;
1884
1885         if (vha->e_dbell.db_flags != EDB_ACTIVE) {
1886                 /* doorbell list not enabled */
1887                 ql_dbg(ql_dbg_edif, vha, 0x09102,
1888                     "%s doorbell not enabled\n", __func__);
1889                 return;
1890         }
1891
1892         /* grab lock so list doesn't move */
1893         spin_lock_irqsave(&vha->e_dbell.db_lock, flags);
1894
1895         vha->e_dbell.db_flags &= ~EDB_ACTIVE; /* mark it not active */
1896         /* hopefully this is a null list at this point */
1897         list_for_each_entry_safe(node, q, &vha->e_dbell.head, list) {
1898                 ql_dbg(ql_dbg_edif, vha, 0x910f,
1899                     "%s freeing edb_node type=%x\n",
1900                     __func__, node->ntype);
1901                 qla_edb_node_free(vha, node);
1902                 list_del(&node->list);
1903
1904                 kfree(node);
1905         }
1906         spin_unlock_irqrestore(&vha->e_dbell.db_lock, flags);
1907
1908         /* wake up doorbell waiters - they'll be dismissed with error code */
1909         complete_all(&vha->e_dbell.dbell);
1910 }
1911
1912 static struct edb_node *
1913 qla_edb_node_alloc(scsi_qla_host_t *vha, uint32_t ntype)
1914 {
1915         struct edb_node *node;
1916
1917         node = kzalloc(sizeof(*node), GFP_ATOMIC);
1918         if (!node) {
1919                 /* couldn't get space */
1920                 ql_dbg(ql_dbg_edif, vha, 0x9100,
1921                     "edb node unable to be allocated\n");
1922                 return NULL;
1923         }
1924
1925         node->ntype = ntype;
1926         INIT_LIST_HEAD(&node->list);
1927         return node;
1928 }
1929
1930 /* adds a already allocated enode to the linked list */
1931 static bool
1932 qla_edb_node_add(scsi_qla_host_t *vha, struct edb_node *ptr)
1933 {
1934         unsigned long           flags;
1935
1936         if (vha->e_dbell.db_flags != EDB_ACTIVE) {
1937                 /* doorbell list not enabled */
1938                 ql_dbg(ql_dbg_edif, vha, 0x09102,
1939                     "%s doorbell not enabled\n", __func__);
1940                 return false;
1941         }
1942
1943         spin_lock_irqsave(&vha->e_dbell.db_lock, flags);
1944         list_add_tail(&ptr->list, &vha->e_dbell.head);
1945         spin_unlock_irqrestore(&vha->e_dbell.db_lock, flags);
1946
1947         /* ring doorbell for waiters */
1948         complete(&vha->e_dbell.dbell);
1949
1950         return true;
1951 }
1952
1953 /* adds event to doorbell list */
1954 void
1955 qla_edb_eventcreate(scsi_qla_host_t *vha, uint32_t dbtype,
1956         uint32_t data, uint32_t data2, fc_port_t        *sfcport)
1957 {
1958         struct edb_node *edbnode;
1959         fc_port_t *fcport = sfcport;
1960         port_id_t id;
1961
1962         if (!vha->hw->flags.edif_enabled) {
1963                 /* edif not enabled */
1964                 return;
1965         }
1966
1967         if (vha->e_dbell.db_flags != EDB_ACTIVE) {
1968                 if (fcport)
1969                         fcport->edif.auth_state = dbtype;
1970                 /* doorbell list not enabled */
1971                 ql_dbg(ql_dbg_edif, vha, 0x09102,
1972                     "%s doorbell not enabled (type=%d\n", __func__, dbtype);
1973                 return;
1974         }
1975
1976         edbnode = qla_edb_node_alloc(vha, dbtype);
1977         if (!edbnode) {
1978                 ql_dbg(ql_dbg_edif, vha, 0x09102,
1979                     "%s unable to alloc db node\n", __func__);
1980                 return;
1981         }
1982
1983         if (!fcport) {
1984                 id.b.domain = (data >> 16) & 0xff;
1985                 id.b.area = (data >> 8) & 0xff;
1986                 id.b.al_pa = data & 0xff;
1987                 ql_dbg(ql_dbg_edif, vha, 0x09222,
1988                     "%s: Arrived s_id: %06x\n", __func__,
1989                     id.b24);
1990                 fcport = qla2x00_find_fcport_by_pid(vha, &id);
1991                 if (!fcport) {
1992                         ql_dbg(ql_dbg_edif, vha, 0x09102,
1993                             "%s can't find fcport for sid= 0x%x - ignoring\n",
1994                         __func__, id.b24);
1995                         kfree(edbnode);
1996                         return;
1997                 }
1998         }
1999
2000         /* populate the edb node */
2001         switch (dbtype) {
2002         case VND_CMD_AUTH_STATE_NEEDED:
2003         case VND_CMD_AUTH_STATE_SESSION_SHUTDOWN:
2004                 edbnode->u.plogi_did.b24 = fcport->d_id.b24;
2005                 break;
2006         case VND_CMD_AUTH_STATE_ELS_RCVD:
2007                 edbnode->u.els_sid.b24 = fcport->d_id.b24;
2008                 break;
2009         case VND_CMD_AUTH_STATE_SAUPDATE_COMPL:
2010                 edbnode->u.sa_aen.port_id = fcport->d_id;
2011                 edbnode->u.sa_aen.status =  data;
2012                 edbnode->u.sa_aen.key_type =  data2;
2013                 break;
2014         default:
2015                 ql_dbg(ql_dbg_edif, vha, 0x09102,
2016                         "%s unknown type: %x\n", __func__, dbtype);
2017                 qla_edb_node_free(vha, edbnode);
2018                 kfree(edbnode);
2019                 edbnode = NULL;
2020                 break;
2021         }
2022
2023         if (edbnode && (!qla_edb_node_add(vha, edbnode))) {
2024                 ql_dbg(ql_dbg_edif, vha, 0x09102,
2025                     "%s unable to add dbnode\n", __func__);
2026                 qla_edb_node_free(vha, edbnode);
2027                 kfree(edbnode);
2028                 return;
2029         }
2030         if (edbnode && fcport)
2031                 fcport->edif.auth_state = dbtype;
2032         ql_dbg(ql_dbg_edif, vha, 0x09102,
2033             "%s Doorbell produced : type=%d %p\n", __func__, dbtype, edbnode);
2034 }
2035
2036 static struct edb_node *
2037 qla_edb_getnext(scsi_qla_host_t *vha)
2038 {
2039         unsigned long   flags;
2040         struct edb_node *edbnode = NULL;
2041
2042         spin_lock_irqsave(&vha->e_dbell.db_lock, flags);
2043
2044         /* db nodes are fifo - no qualifications done */
2045         if (!list_empty(&vha->e_dbell.head)) {
2046                 edbnode = list_first_entry(&vha->e_dbell.head,
2047                     struct edb_node, list);
2048                 list_del(&edbnode->list);
2049         }
2050
2051         spin_unlock_irqrestore(&vha->e_dbell.db_lock, flags);
2052
2053         return edbnode;
2054 }
2055
2056 void
2057 qla_edif_timer(scsi_qla_host_t *vha)
2058 {
2059         struct qla_hw_data *ha = vha->hw;
2060
2061         if (!vha->vp_idx && N2N_TOPO(ha) && ha->flags.n2n_fw_acc_sec) {
2062                 if (vha->e_dbell.db_flags != EDB_ACTIVE &&
2063                     ha->edif_post_stop_cnt_down) {
2064                         ha->edif_post_stop_cnt_down--;
2065
2066                         /*
2067                          * turn off auto 'Plogi Acc + secure=1' feature
2068                          * Set Add FW option[3]
2069                          * BIT_15, if.
2070                          */
2071                         if (ha->edif_post_stop_cnt_down == 0) {
2072                                 ql_dbg(ql_dbg_async, vha, 0x911d,
2073                                        "%s chip reset to turn off PLOGI ACC + secure\n",
2074                                        __func__);
2075                                 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2076                         }
2077                 } else {
2078                         ha->edif_post_stop_cnt_down = 60;
2079                 }
2080         }
2081 }
2082
2083 /*
2084  * app uses separate thread to read this. It'll wait until the doorbell
2085  * is rung by the driver or the max wait time has expired
2086  */
2087 ssize_t
2088 edif_doorbell_show(struct device *dev, struct device_attribute *attr,
2089                 char *buf)
2090 {
2091         scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
2092         struct edb_node *dbnode = NULL;
2093         struct edif_app_dbell *ap = (struct edif_app_dbell *)buf;
2094         uint32_t dat_siz, buf_size, sz;
2095
2096         /* TODO: app currently hardcoded to 256. Will transition to bsg */
2097         sz = 256;
2098
2099         /* stop new threads from waiting if we're not init'd */
2100         if (vha->e_dbell.db_flags != EDB_ACTIVE) {
2101                 ql_dbg(ql_dbg_edif + ql_dbg_verbose, vha, 0x09122,
2102                     "%s error - edif db not enabled\n", __func__);
2103                 return 0;
2104         }
2105
2106         if (!vha->hw->flags.edif_enabled) {
2107                 /* edif not enabled */
2108                 ql_dbg(ql_dbg_edif + ql_dbg_verbose, vha, 0x09122,
2109                     "%s error - edif not enabled\n", __func__);
2110                 return -1;
2111         }
2112
2113         buf_size = 0;
2114         while ((sz - buf_size) >= sizeof(struct edb_node)) {
2115                 /* remove the next item from the doorbell list */
2116                 dat_siz = 0;
2117                 dbnode = qla_edb_getnext(vha);
2118                 if (dbnode) {
2119                         ap->event_code = dbnode->ntype;
2120                         switch (dbnode->ntype) {
2121                         case VND_CMD_AUTH_STATE_SESSION_SHUTDOWN:
2122                         case VND_CMD_AUTH_STATE_NEEDED:
2123                                 ap->port_id = dbnode->u.plogi_did;
2124                                 dat_siz += sizeof(ap->port_id);
2125                                 break;
2126                         case VND_CMD_AUTH_STATE_ELS_RCVD:
2127                                 ap->port_id = dbnode->u.els_sid;
2128                                 dat_siz += sizeof(ap->port_id);
2129                                 break;
2130                         case VND_CMD_AUTH_STATE_SAUPDATE_COMPL:
2131                                 ap->port_id = dbnode->u.sa_aen.port_id;
2132                                 memcpy(ap->event_data, &dbnode->u,
2133                                                 sizeof(struct edif_sa_update_aen));
2134                                 dat_siz += sizeof(struct edif_sa_update_aen);
2135                                 break;
2136                         default:
2137                                 /* unknown node type, rtn unknown ntype */
2138                                 ap->event_code = VND_CMD_AUTH_STATE_UNDEF;
2139                                 memcpy(ap->event_data, &dbnode->ntype, 4);
2140                                 dat_siz += 4;
2141                                 break;
2142                         }
2143
2144                         ql_dbg(ql_dbg_edif, vha, 0x09102,
2145                                 "%s Doorbell consumed : type=%d %p\n",
2146                                 __func__, dbnode->ntype, dbnode);
2147                         /* we're done with the db node, so free it up */
2148                         qla_edb_node_free(vha, dbnode);
2149                         kfree(dbnode);
2150                 } else {
2151                         break;
2152                 }
2153
2154                 ap->event_data_size = dat_siz;
2155                 /* 8bytes = ap->event_code + ap->event_data_size */
2156                 buf_size += dat_siz + 8;
2157                 ap = (struct edif_app_dbell *)(buf + buf_size);
2158         }
2159         return buf_size;
2160 }
2161
2162 static void qla_noop_sp_done(srb_t *sp, int res)
2163 {
2164         /* ref: INIT */
2165         kref_put(&sp->cmd_kref, qla2x00_sp_release);
2166 }
2167
2168 /*
2169  * Called from work queue
2170  * build and send the sa_update iocb to delete an rx sa_index
2171  */
2172 int
2173 qla24xx_issue_sa_replace_iocb(scsi_qla_host_t *vha, struct qla_work_evt *e)
2174 {
2175         srb_t *sp;
2176         fc_port_t       *fcport = NULL;
2177         struct srb_iocb *iocb_cmd = NULL;
2178         int rval = QLA_SUCCESS;
2179         struct  edif_sa_ctl *sa_ctl = e->u.sa_update.sa_ctl;
2180         uint16_t nport_handle = e->u.sa_update.nport_handle;
2181
2182         ql_dbg(ql_dbg_edif, vha, 0x70e6,
2183             "%s: starting,  sa_ctl: %p\n", __func__, sa_ctl);
2184
2185         if (!sa_ctl) {
2186                 ql_dbg(ql_dbg_edif, vha, 0x70e6,
2187                     "sa_ctl allocation failed\n");
2188                 return -ENOMEM;
2189         }
2190
2191         fcport = sa_ctl->fcport;
2192
2193         /* Alloc SRB structure */
2194         sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
2195         if (!sp) {
2196                 ql_dbg(ql_dbg_edif, vha, 0x70e6,
2197                  "SRB allocation failed\n");
2198                 return -ENOMEM;
2199         }
2200
2201         fcport->flags |= FCF_ASYNC_SENT;
2202         iocb_cmd = &sp->u.iocb_cmd;
2203         iocb_cmd->u.sa_update.sa_ctl = sa_ctl;
2204
2205         ql_dbg(ql_dbg_edif, vha, 0x3073,
2206             "Enter: SA REPL portid=%06x, sa_ctl %p, index %x, nport_handle: 0x%x\n",
2207             fcport->d_id.b24, sa_ctl, sa_ctl->index, nport_handle);
2208         /*
2209          * if this is a sadb cleanup delete, mark it so the isr can
2210          * take the correct action
2211          */
2212         if (sa_ctl->flags & EDIF_SA_CTL_FLG_CLEANUP_DEL) {
2213                 /* mark this srb as a cleanup delete */
2214                 sp->flags |= SRB_EDIF_CLEANUP_DELETE;
2215                 ql_dbg(ql_dbg_edif, vha, 0x70e6,
2216                     "%s: sp 0x%p flagged as cleanup delete\n", __func__, sp);
2217         }
2218
2219         sp->type = SRB_SA_REPLACE;
2220         sp->name = "SA_REPLACE";
2221         sp->fcport = fcport;
2222         sp->free = qla2x00_rel_sp;
2223         sp->done = qla_noop_sp_done;
2224
2225         rval = qla2x00_start_sp(sp);
2226
2227         if (rval != QLA_SUCCESS)
2228                 rval = QLA_FUNCTION_FAILED;
2229
2230         return rval;
2231 }
2232
2233 void qla24xx_sa_update_iocb(srb_t *sp, struct sa_update_28xx *sa_update_iocb)
2234 {
2235         int     itr = 0;
2236         struct  scsi_qla_host           *vha = sp->vha;
2237         struct  qla_sa_update_frame     *sa_frame =
2238                 &sp->u.iocb_cmd.u.sa_update.sa_frame;
2239         u8 flags = 0;
2240
2241         switch (sa_frame->flags & (SAU_FLG_INV | SAU_FLG_TX)) {
2242         case 0:
2243                 ql_dbg(ql_dbg_edif, vha, 0x911d,
2244                     "%s: EDIF SA UPDATE RX IOCB  vha: 0x%p  index: %d\n",
2245                     __func__, vha, sa_frame->fast_sa_index);
2246                 break;
2247         case 1:
2248                 ql_dbg(ql_dbg_edif, vha, 0x911d,
2249                     "%s: EDIF SA DELETE RX IOCB  vha: 0x%p  index: %d\n",
2250                     __func__, vha, sa_frame->fast_sa_index);
2251                 flags |= SA_FLAG_INVALIDATE;
2252                 break;
2253         case 2:
2254                 ql_dbg(ql_dbg_edif, vha, 0x911d,
2255                     "%s: EDIF SA UPDATE TX IOCB  vha: 0x%p  index: %d\n",
2256                     __func__, vha, sa_frame->fast_sa_index);
2257                 flags |= SA_FLAG_TX;
2258                 break;
2259         case 3:
2260                 ql_dbg(ql_dbg_edif, vha, 0x911d,
2261                     "%s: EDIF SA DELETE TX IOCB  vha: 0x%p  index: %d\n",
2262                     __func__, vha, sa_frame->fast_sa_index);
2263                 flags |= SA_FLAG_TX | SA_FLAG_INVALIDATE;
2264                 break;
2265         }
2266
2267         sa_update_iocb->entry_type = SA_UPDATE_IOCB_TYPE;
2268         sa_update_iocb->entry_count = 1;
2269         sa_update_iocb->sys_define = 0;
2270         sa_update_iocb->entry_status = 0;
2271         sa_update_iocb->handle = sp->handle;
2272         sa_update_iocb->u.nport_handle = cpu_to_le16(sp->fcport->loop_id);
2273         sa_update_iocb->vp_index = sp->fcport->vha->vp_idx;
2274         sa_update_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
2275         sa_update_iocb->port_id[1] = sp->fcport->d_id.b.area;
2276         sa_update_iocb->port_id[2] = sp->fcport->d_id.b.domain;
2277
2278         sa_update_iocb->flags = flags;
2279         sa_update_iocb->salt = cpu_to_le32(sa_frame->salt);
2280         sa_update_iocb->spi = cpu_to_le32(sa_frame->spi);
2281         sa_update_iocb->sa_index = cpu_to_le16(sa_frame->fast_sa_index);
2282
2283         sa_update_iocb->sa_control |= SA_CNTL_ENC_FCSP;
2284         if (sp->fcport->edif.aes_gmac)
2285                 sa_update_iocb->sa_control |= SA_CNTL_AES_GMAC;
2286
2287         if (sa_frame->flags & SAU_FLG_KEY256) {
2288                 sa_update_iocb->sa_control |= SA_CNTL_KEY256;
2289                 for (itr = 0; itr < 32; itr++)
2290                         sa_update_iocb->sa_key[itr] = sa_frame->sa_key[itr];
2291         } else {
2292                 sa_update_iocb->sa_control |= SA_CNTL_KEY128;
2293                 for (itr = 0; itr < 16; itr++)
2294                         sa_update_iocb->sa_key[itr] = sa_frame->sa_key[itr];
2295         }
2296
2297         ql_dbg(ql_dbg_edif, vha, 0x921d,
2298             "%s SAU Port ID = %02x%02x%02x, flags=%xh, index=%u, ctl=%xh, SPI 0x%x flags 0x%x hdl=%x gmac %d\n",
2299             __func__, sa_update_iocb->port_id[2], sa_update_iocb->port_id[1],
2300             sa_update_iocb->port_id[0], sa_update_iocb->flags, sa_update_iocb->sa_index,
2301             sa_update_iocb->sa_control, sa_update_iocb->spi, sa_frame->flags, sp->handle,
2302             sp->fcport->edif.aes_gmac);
2303
2304         if (sa_frame->flags & SAU_FLG_TX)
2305                 sp->fcport->edif.tx_sa_pending = 1;
2306         else
2307                 sp->fcport->edif.rx_sa_pending = 1;
2308
2309         sp->fcport->vha->qla_stats.control_requests++;
2310 }
2311
2312 void
2313 qla24xx_sa_replace_iocb(srb_t *sp, struct sa_update_28xx *sa_update_iocb)
2314 {
2315         struct  scsi_qla_host           *vha = sp->vha;
2316         struct srb_iocb *srb_iocb = &sp->u.iocb_cmd;
2317         struct  edif_sa_ctl             *sa_ctl = srb_iocb->u.sa_update.sa_ctl;
2318         uint16_t nport_handle = sp->fcport->loop_id;
2319
2320         sa_update_iocb->entry_type = SA_UPDATE_IOCB_TYPE;
2321         sa_update_iocb->entry_count = 1;
2322         sa_update_iocb->sys_define = 0;
2323         sa_update_iocb->entry_status = 0;
2324         sa_update_iocb->handle = sp->handle;
2325
2326         sa_update_iocb->u.nport_handle = cpu_to_le16(nport_handle);
2327
2328         sa_update_iocb->vp_index = sp->fcport->vha->vp_idx;
2329         sa_update_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
2330         sa_update_iocb->port_id[1] = sp->fcport->d_id.b.area;
2331         sa_update_iocb->port_id[2] = sp->fcport->d_id.b.domain;
2332
2333         /* Invalidate the index. salt, spi, control & key are ignore */
2334         sa_update_iocb->flags = SA_FLAG_INVALIDATE;
2335         sa_update_iocb->salt = 0;
2336         sa_update_iocb->spi = 0;
2337         sa_update_iocb->sa_index = cpu_to_le16(sa_ctl->index);
2338         sa_update_iocb->sa_control = 0;
2339
2340         ql_dbg(ql_dbg_edif, vha, 0x921d,
2341             "%s SAU DELETE RX Port ID = %02x:%02x:%02x, lid %d flags=%xh, index=%u, hdl=%x\n",
2342             __func__, sa_update_iocb->port_id[2], sa_update_iocb->port_id[1],
2343             sa_update_iocb->port_id[0], nport_handle, sa_update_iocb->flags,
2344             sa_update_iocb->sa_index, sp->handle);
2345
2346         sp->fcport->vha->qla_stats.control_requests++;
2347 }
2348
2349 void qla24xx_auth_els(scsi_qla_host_t *vha, void **pkt, struct rsp_que **rsp)
2350 {
2351         struct purex_entry_24xx *p = *pkt;
2352         struct enode            *ptr;
2353         int             sid;
2354         u16 totlen;
2355         struct purexevent       *purex;
2356         struct scsi_qla_host *host = NULL;
2357         int rc;
2358         struct fc_port *fcport;
2359         struct qla_els_pt_arg a;
2360         be_id_t beid;
2361
2362         memset(&a, 0, sizeof(a));
2363
2364         a.els_opcode = ELS_AUTH_ELS;
2365         a.nport_handle = p->nport_handle;
2366         a.rx_xchg_address = p->rx_xchg_addr;
2367         a.did.b.domain = p->s_id[2];
2368         a.did.b.area   = p->s_id[1];
2369         a.did.b.al_pa  = p->s_id[0];
2370         a.tx_byte_count = a.tx_len = sizeof(struct fc_els_ls_rjt);
2371         a.tx_addr = vha->hw->elsrej.cdma;
2372         a.vp_idx = vha->vp_idx;
2373         a.control_flags = EPD_ELS_RJT;
2374
2375         sid = p->s_id[0] | (p->s_id[1] << 8) | (p->s_id[2] << 16);
2376
2377         totlen = (le16_to_cpu(p->frame_size) & 0x0fff) - PURX_ELS_HEADER_SIZE;
2378         if (le16_to_cpu(p->status_flags) & 0x8000) {
2379                 totlen = le16_to_cpu(p->trunc_frame_size);
2380                 qla_els_reject_iocb(vha, (*rsp)->qpair, &a);
2381                 __qla_consume_iocb(vha, pkt, rsp);
2382                 return;
2383         }
2384
2385         if (totlen > ELS_MAX_PAYLOAD) {
2386                 ql_dbg(ql_dbg_edif, vha, 0x0910d,
2387                     "%s WARNING: verbose ELS frame received (totlen=%x)\n",
2388                     __func__, totlen);
2389                 qla_els_reject_iocb(vha, (*rsp)->qpair, &a);
2390                 __qla_consume_iocb(vha, pkt, rsp);
2391                 return;
2392         }
2393
2394         if (!vha->hw->flags.edif_enabled) {
2395                 /* edif support not enabled */
2396                 ql_dbg(ql_dbg_edif, vha, 0x910e, "%s edif not enabled\n",
2397                     __func__);
2398                 qla_els_reject_iocb(vha, (*rsp)->qpair, &a);
2399                 __qla_consume_iocb(vha, pkt, rsp);
2400                 return;
2401         }
2402
2403         ptr = qla_enode_alloc(vha, N_PUREX);
2404         if (!ptr) {
2405                 ql_dbg(ql_dbg_edif, vha, 0x09109,
2406                     "WARNING: enode alloc failed for sid=%x\n",
2407                     sid);
2408                 qla_els_reject_iocb(vha, (*rsp)->qpair, &a);
2409                 __qla_consume_iocb(vha, pkt, rsp);
2410                 return;
2411         }
2412
2413         purex = &ptr->u.purexinfo;
2414         purex->pur_info.pur_sid = a.did;
2415         purex->pur_info.pur_bytes_rcvd = totlen;
2416         purex->pur_info.pur_rx_xchg_address = le32_to_cpu(p->rx_xchg_addr);
2417         purex->pur_info.pur_nphdl = le16_to_cpu(p->nport_handle);
2418         purex->pur_info.pur_did.b.domain =  p->d_id[2];
2419         purex->pur_info.pur_did.b.area =  p->d_id[1];
2420         purex->pur_info.pur_did.b.al_pa =  p->d_id[0];
2421         purex->pur_info.vp_idx = p->vp_idx;
2422
2423         rc = __qla_copy_purex_to_buffer(vha, pkt, rsp, purex->msgp,
2424                 purex->msgp_len);
2425         if (rc) {
2426                 qla_els_reject_iocb(vha, (*rsp)->qpair, &a);
2427                 qla_enode_free(vha, ptr);
2428                 return;
2429         }
2430         beid.al_pa = purex->pur_info.pur_did.b.al_pa;
2431         beid.area   = purex->pur_info.pur_did.b.area;
2432         beid.domain = purex->pur_info.pur_did.b.domain;
2433         host = qla_find_host_by_d_id(vha, beid);
2434         if (!host) {
2435                 ql_log(ql_log_fatal, vha, 0x508b,
2436                     "%s Drop ELS due to unable to find host %06x\n",
2437                     __func__, purex->pur_info.pur_did.b24);
2438
2439                 qla_els_reject_iocb(vha, (*rsp)->qpair, &a);
2440                 qla_enode_free(vha, ptr);
2441                 return;
2442         }
2443
2444         fcport = qla2x00_find_fcport_by_pid(host, &purex->pur_info.pur_sid);
2445
2446         if (host->e_dbell.db_flags != EDB_ACTIVE ||
2447             (fcport && EDIF_SESSION_DOWN(fcport))) {
2448                 ql_dbg(ql_dbg_edif, host, 0x0910c, "%s e_dbell.db_flags =%x %06x\n",
2449                     __func__, host->e_dbell.db_flags,
2450                     fcport ? fcport->d_id.b24 : 0);
2451
2452                 qla_els_reject_iocb(host, (*rsp)->qpair, &a);
2453                 qla_enode_free(host, ptr);
2454                 return;
2455         }
2456
2457         /* add the local enode to the list */
2458         qla_enode_add(host, ptr);
2459
2460         ql_dbg(ql_dbg_edif, host, 0x0910c,
2461             "%s COMPLETE purex->pur_info.pur_bytes_rcvd =%xh s:%06x -> d:%06x xchg=%xh\n",
2462             __func__, purex->pur_info.pur_bytes_rcvd, purex->pur_info.pur_sid.b24,
2463             purex->pur_info.pur_did.b24, purex->pur_info.pur_rx_xchg_address);
2464
2465         qla_edb_eventcreate(host, VND_CMD_AUTH_STATE_ELS_RCVD, sid, 0, NULL);
2466 }
2467
2468 static uint16_t  qla_edif_get_sa_index_from_freepool(fc_port_t *fcport, int dir)
2469 {
2470         struct scsi_qla_host *vha = fcport->vha;
2471         struct qla_hw_data *ha = vha->hw;
2472         void *sa_id_map;
2473         unsigned long flags = 0;
2474         u16 sa_index;
2475
2476         ql_dbg(ql_dbg_edif + ql_dbg_verbose, vha, 0x3063,
2477             "%s: entry\n", __func__);
2478
2479         if (dir)
2480                 sa_id_map = ha->edif_tx_sa_id_map;
2481         else
2482                 sa_id_map = ha->edif_rx_sa_id_map;
2483
2484         spin_lock_irqsave(&ha->sadb_fp_lock, flags);
2485         sa_index = find_first_zero_bit(sa_id_map, EDIF_NUM_SA_INDEX);
2486         if (sa_index >=  EDIF_NUM_SA_INDEX) {
2487                 spin_unlock_irqrestore(&ha->sadb_fp_lock, flags);
2488                 return INVALID_EDIF_SA_INDEX;
2489         }
2490         set_bit(sa_index, sa_id_map);
2491         spin_unlock_irqrestore(&ha->sadb_fp_lock, flags);
2492
2493         if (dir)
2494                 sa_index += EDIF_TX_SA_INDEX_BASE;
2495
2496         ql_dbg(ql_dbg_edif, vha, 0x3063,
2497             "%s: index retrieved from free pool %d\n", __func__, sa_index);
2498
2499         return sa_index;
2500 }
2501
2502 /* find an sadb entry for an nport_handle */
2503 static struct edif_sa_index_entry *
2504 qla_edif_sadb_find_sa_index_entry(uint16_t nport_handle,
2505                 struct list_head *sa_list)
2506 {
2507         struct edif_sa_index_entry *entry;
2508         struct edif_sa_index_entry *tentry;
2509         struct list_head *indx_list = sa_list;
2510
2511         list_for_each_entry_safe(entry, tentry, indx_list, next) {
2512                 if (entry->handle == nport_handle)
2513                         return entry;
2514         }
2515         return NULL;
2516 }
2517
2518 /* remove an sa_index from the nport_handle and return it to the free pool */
2519 static int qla_edif_sadb_delete_sa_index(fc_port_t *fcport, uint16_t nport_handle,
2520                 uint16_t sa_index)
2521 {
2522         struct edif_sa_index_entry *entry;
2523         struct list_head *sa_list;
2524         int dir = (sa_index < EDIF_TX_SA_INDEX_BASE) ? 0 : 1;
2525         int slot = 0;
2526         int free_slot_count = 0;
2527         scsi_qla_host_t *vha = fcport->vha;
2528         struct qla_hw_data *ha = vha->hw;
2529         unsigned long flags = 0;
2530
2531         ql_dbg(ql_dbg_edif, vha, 0x3063,
2532             "%s: entry\n", __func__);
2533
2534         if (dir)
2535                 sa_list = &ha->sadb_tx_index_list;
2536         else
2537                 sa_list = &ha->sadb_rx_index_list;
2538
2539         entry = qla_edif_sadb_find_sa_index_entry(nport_handle, sa_list);
2540         if (!entry) {
2541                 ql_dbg(ql_dbg_edif, vha, 0x3063,
2542                     "%s: no entry found for nport_handle 0x%x\n",
2543                     __func__, nport_handle);
2544                 return -1;
2545         }
2546
2547         spin_lock_irqsave(&ha->sadb_lock, flags);
2548         /*
2549          * each tx/rx direction has up to 2 sa indexes/slots. 1 slot for in flight traffic
2550          * the other is use at re-key time.
2551          */
2552         for (slot = 0; slot < 2; slot++) {
2553                 if (entry->sa_pair[slot].sa_index == sa_index) {
2554                         entry->sa_pair[slot].sa_index = INVALID_EDIF_SA_INDEX;
2555                         entry->sa_pair[slot].spi = 0;
2556                         free_slot_count++;
2557                         qla_edif_add_sa_index_to_freepool(fcport, dir, sa_index);
2558                 } else if (entry->sa_pair[slot].sa_index == INVALID_EDIF_SA_INDEX) {
2559                         free_slot_count++;
2560                 }
2561         }
2562
2563         if (free_slot_count == 2) {
2564                 list_del(&entry->next);
2565                 kfree(entry);
2566         }
2567         spin_unlock_irqrestore(&ha->sadb_lock, flags);
2568
2569         ql_dbg(ql_dbg_edif, vha, 0x3063,
2570             "%s: sa_index %d removed, free_slot_count: %d\n",
2571             __func__, sa_index, free_slot_count);
2572
2573         return 0;
2574 }
2575
2576 void
2577 qla28xx_sa_update_iocb_entry(scsi_qla_host_t *v, struct req_que *req,
2578         struct sa_update_28xx *pkt)
2579 {
2580         const char *func = "SA_UPDATE_RESPONSE_IOCB";
2581         srb_t *sp;
2582         struct edif_sa_ctl *sa_ctl;
2583         int old_sa_deleted = 1;
2584         uint16_t nport_handle;
2585         struct scsi_qla_host *vha;
2586
2587         sp = qla2x00_get_sp_from_handle(v, func, req, pkt);
2588
2589         if (!sp) {
2590                 ql_dbg(ql_dbg_edif, v, 0x3063,
2591                         "%s: no sp found for pkt\n", __func__);
2592                 return;
2593         }
2594         /* use sp->vha due to npiv */
2595         vha = sp->vha;
2596
2597         switch (pkt->flags & (SA_FLAG_INVALIDATE | SA_FLAG_TX)) {
2598         case 0:
2599                 ql_dbg(ql_dbg_edif, vha, 0x3063,
2600                     "%s: EDIF SA UPDATE RX IOCB  vha: 0x%p  index: %d\n",
2601                     __func__, vha, pkt->sa_index);
2602                 break;
2603         case 1:
2604                 ql_dbg(ql_dbg_edif, vha, 0x3063,
2605                     "%s: EDIF SA DELETE RX IOCB  vha: 0x%p  index: %d\n",
2606                     __func__, vha, pkt->sa_index);
2607                 break;
2608         case 2:
2609                 ql_dbg(ql_dbg_edif, vha, 0x3063,
2610                     "%s: EDIF SA UPDATE TX IOCB  vha: 0x%p  index: %d\n",
2611                     __func__, vha, pkt->sa_index);
2612                 break;
2613         case 3:
2614                 ql_dbg(ql_dbg_edif, vha, 0x3063,
2615                     "%s: EDIF SA DELETE TX IOCB  vha: 0x%p  index: %d\n",
2616                     __func__, vha, pkt->sa_index);
2617                 break;
2618         }
2619
2620         /*
2621          * dig the nport handle out of the iocb, fcport->loop_id can not be trusted
2622          * to be correct during cleanup sa_update iocbs.
2623          */
2624         nport_handle = sp->fcport->loop_id;
2625
2626         ql_dbg(ql_dbg_edif, vha, 0x3063,
2627             "%s: %8phN comp status=%x old_sa_info=%x new_sa_info=%x lid %d, index=0x%x pkt_flags %xh hdl=%x\n",
2628             __func__, sp->fcport->port_name, pkt->u.comp_sts, pkt->old_sa_info, pkt->new_sa_info,
2629             nport_handle, pkt->sa_index, pkt->flags, sp->handle);
2630
2631         /* if rx delete, remove the timer */
2632         if ((pkt->flags & (SA_FLAG_INVALIDATE | SA_FLAG_TX)) ==  SA_FLAG_INVALIDATE) {
2633                 struct edif_list_entry *edif_entry;
2634
2635                 sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
2636
2637                 edif_entry = qla_edif_list_find_sa_index(sp->fcport, nport_handle);
2638                 if (edif_entry) {
2639                         ql_dbg(ql_dbg_edif, vha, 0x5033,
2640                             "%s: removing edif_entry %p, new sa_index: 0x%x\n",
2641                             __func__, edif_entry, pkt->sa_index);
2642                         qla_edif_list_delete_sa_index(sp->fcport, edif_entry);
2643                         del_timer(&edif_entry->timer);
2644
2645                         ql_dbg(ql_dbg_edif, vha, 0x5033,
2646                             "%s: releasing edif_entry %p, new sa_index: 0x%x\n",
2647                             __func__, edif_entry, pkt->sa_index);
2648
2649                         kfree(edif_entry);
2650                 }
2651         }
2652
2653         /*
2654          * if this is a delete for either tx or rx, make sure it succeeded.
2655          * The new_sa_info field should be 0xffff on success
2656          */
2657         if (pkt->flags & SA_FLAG_INVALIDATE)
2658                 old_sa_deleted = (le16_to_cpu(pkt->new_sa_info) == 0xffff) ? 1 : 0;
2659
2660         /* Process update and delete the same way */
2661
2662         /* If this is an sadb cleanup delete, bypass sending events to IPSEC */
2663         if (sp->flags & SRB_EDIF_CLEANUP_DELETE) {
2664                 sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
2665                 ql_dbg(ql_dbg_edif, vha, 0x3063,
2666                     "%s: nph 0x%x, sa_index %d removed from fw\n",
2667                     __func__, sp->fcport->loop_id, pkt->sa_index);
2668
2669         } else if ((pkt->entry_status == 0) && (pkt->u.comp_sts == 0) &&
2670             old_sa_deleted) {
2671                 /*
2672                  * Note: Wa are only keeping track of latest SA,
2673                  * so we know when we can start enableing encryption per I/O.
2674                  * If all SA's get deleted, let FW reject the IOCB.
2675
2676                  * TODO: edif: don't set enabled here I think
2677                  * TODO: edif: prli complete is where it should be set
2678                  */
2679                 ql_dbg(ql_dbg_edif + ql_dbg_verbose, vha, 0x3063,
2680                         "SA(%x)updated for s_id %02x%02x%02x\n",
2681                         pkt->new_sa_info,
2682                         pkt->port_id[2], pkt->port_id[1], pkt->port_id[0]);
2683                 sp->fcport->edif.enable = 1;
2684                 if (pkt->flags & SA_FLAG_TX) {
2685                         sp->fcport->edif.tx_sa_set = 1;
2686                         sp->fcport->edif.tx_sa_pending = 0;
2687                         qla_edb_eventcreate(vha, VND_CMD_AUTH_STATE_SAUPDATE_COMPL,
2688                                 QL_VND_SA_STAT_SUCCESS,
2689                                 QL_VND_TX_SA_KEY, sp->fcport);
2690                 } else {
2691                         sp->fcport->edif.rx_sa_set = 1;
2692                         sp->fcport->edif.rx_sa_pending = 0;
2693                         qla_edb_eventcreate(vha, VND_CMD_AUTH_STATE_SAUPDATE_COMPL,
2694                                 QL_VND_SA_STAT_SUCCESS,
2695                                 QL_VND_RX_SA_KEY, sp->fcport);
2696                 }
2697         } else {
2698                 ql_dbg(ql_dbg_edif, vha, 0x3063,
2699                     "%s: %8phN SA update FAILED: sa_index: %d, new_sa_info %d, %02x%02x%02x\n",
2700                     __func__, sp->fcport->port_name, pkt->sa_index, pkt->new_sa_info,
2701                     pkt->port_id[2], pkt->port_id[1], pkt->port_id[0]);
2702
2703                 if (pkt->flags & SA_FLAG_TX)
2704                         qla_edb_eventcreate(vha, VND_CMD_AUTH_STATE_SAUPDATE_COMPL,
2705                                 (le16_to_cpu(pkt->u.comp_sts) << 16) | QL_VND_SA_STAT_FAILED,
2706                                 QL_VND_TX_SA_KEY, sp->fcport);
2707                 else
2708                         qla_edb_eventcreate(vha, VND_CMD_AUTH_STATE_SAUPDATE_COMPL,
2709                                 (le16_to_cpu(pkt->u.comp_sts) << 16) | QL_VND_SA_STAT_FAILED,
2710                                 QL_VND_RX_SA_KEY, sp->fcport);
2711         }
2712
2713         /* for delete, release sa_ctl, sa_index */
2714         if (pkt->flags & SA_FLAG_INVALIDATE) {
2715                 /* release the sa_ctl */
2716                 sa_ctl = qla_edif_find_sa_ctl_by_index(sp->fcport,
2717                     le16_to_cpu(pkt->sa_index), (pkt->flags & SA_FLAG_TX));
2718                 if (sa_ctl &&
2719                     qla_edif_find_sa_ctl_by_index(sp->fcport, sa_ctl->index,
2720                         (pkt->flags & SA_FLAG_TX)) != NULL) {
2721                         ql_dbg(ql_dbg_edif + ql_dbg_verbose, vha, 0x3063,
2722                             "%s: freeing sa_ctl for index %d\n",
2723                             __func__, sa_ctl->index);
2724                         qla_edif_free_sa_ctl(sp->fcport, sa_ctl, sa_ctl->index);
2725                 } else {
2726                         ql_dbg(ql_dbg_edif, vha, 0x3063,
2727                             "%s: sa_ctl NOT freed, sa_ctl: %p\n",
2728                             __func__, sa_ctl);
2729                 }
2730                 ql_dbg(ql_dbg_edif, vha, 0x3063,
2731                     "%s: freeing sa_index %d, nph: 0x%x\n",
2732                     __func__, le16_to_cpu(pkt->sa_index), nport_handle);
2733                 qla_edif_sadb_delete_sa_index(sp->fcport, nport_handle,
2734                     le16_to_cpu(pkt->sa_index));
2735         /*
2736          * check for a failed sa_update and remove
2737          * the sadb entry.
2738          */
2739         } else if (pkt->u.comp_sts) {
2740                 ql_dbg(ql_dbg_edif, vha, 0x3063,
2741                     "%s: freeing sa_index %d, nph: 0x%x\n",
2742                     __func__, pkt->sa_index, nport_handle);
2743                 qla_edif_sadb_delete_sa_index(sp->fcport, nport_handle,
2744                     le16_to_cpu(pkt->sa_index));
2745                 switch (le16_to_cpu(pkt->u.comp_sts)) {
2746                 case CS_PORT_EDIF_UNAVAIL:
2747                 case CS_PORT_EDIF_LOGOUT:
2748                         qlt_schedule_sess_for_deletion(sp->fcport);
2749                         break;
2750                 default:
2751                         break;
2752                 }
2753         }
2754
2755         sp->done(sp, 0);
2756 }
2757
2758 /**
2759  * qla28xx_start_scsi_edif() - Send a SCSI type 6 command to the ISP
2760  * @sp: command to send to the ISP
2761  *
2762  * Return: non-zero if a failure occurred, else zero.
2763  */
2764 int
2765 qla28xx_start_scsi_edif(srb_t *sp)
2766 {
2767         int             nseg;
2768         unsigned long   flags;
2769         struct scsi_cmnd *cmd;
2770         uint32_t        *clr_ptr;
2771         uint32_t        index, i;
2772         uint32_t        handle;
2773         uint16_t        cnt;
2774         int16_t        req_cnt;
2775         uint16_t        tot_dsds;
2776         __be32 *fcp_dl;
2777         uint8_t additional_cdb_len;
2778         struct ct6_dsd *ctx;
2779         struct scsi_qla_host *vha = sp->vha;
2780         struct qla_hw_data *ha = vha->hw;
2781         struct cmd_type_6 *cmd_pkt;
2782         struct dsd64    *cur_dsd;
2783         uint8_t         avail_dsds = 0;
2784         struct scatterlist *sg;
2785         struct req_que *req = sp->qpair->req;
2786         spinlock_t *lock = sp->qpair->qp_lock_ptr;
2787
2788         /* Setup device pointers. */
2789         cmd = GET_CMD_SP(sp);
2790
2791         /* So we know we haven't pci_map'ed anything yet */
2792         tot_dsds = 0;
2793
2794         /* Send marker if required */
2795         if (vha->marker_needed != 0) {
2796                 if (qla2x00_marker(vha, sp->qpair, 0, 0, MK_SYNC_ALL) !=
2797                         QLA_SUCCESS) {
2798                         ql_log(ql_log_warn, vha, 0x300c,
2799                             "qla2x00_marker failed for cmd=%p.\n", cmd);
2800                         return QLA_FUNCTION_FAILED;
2801                 }
2802                 vha->marker_needed = 0;
2803         }
2804
2805         /* Acquire ring specific lock */
2806         spin_lock_irqsave(lock, flags);
2807
2808         /* Check for room in outstanding command list. */
2809         handle = req->current_outstanding_cmd;
2810         for (index = 1; index < req->num_outstanding_cmds; index++) {
2811                 handle++;
2812                 if (handle == req->num_outstanding_cmds)
2813                         handle = 1;
2814                 if (!req->outstanding_cmds[handle])
2815                         break;
2816         }
2817         if (index == req->num_outstanding_cmds)
2818                 goto queuing_error;
2819
2820         /* Map the sg table so we have an accurate count of sg entries needed */
2821         if (scsi_sg_count(cmd)) {
2822                 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
2823                     scsi_sg_count(cmd), cmd->sc_data_direction);
2824                 if (unlikely(!nseg))
2825                         goto queuing_error;
2826         } else {
2827                 nseg = 0;
2828         }
2829
2830         tot_dsds = nseg;
2831         req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
2832         if (req->cnt < (req_cnt + 2)) {
2833                 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
2834                     rd_reg_dword(req->req_q_out);
2835                 if (req->ring_index < cnt)
2836                         req->cnt = cnt - req->ring_index;
2837                 else
2838                         req->cnt = req->length -
2839                             (req->ring_index - cnt);
2840                 if (req->cnt < (req_cnt + 2))
2841                         goto queuing_error;
2842         }
2843
2844         ctx = sp->u.scmd.ct6_ctx =
2845             mempool_alloc(ha->ctx_mempool, GFP_ATOMIC);
2846         if (!ctx) {
2847                 ql_log(ql_log_fatal, vha, 0x3010,
2848                     "Failed to allocate ctx for cmd=%p.\n", cmd);
2849                 goto queuing_error;
2850         }
2851
2852         memset(ctx, 0, sizeof(struct ct6_dsd));
2853         ctx->fcp_cmnd = dma_pool_zalloc(ha->fcp_cmnd_dma_pool,
2854             GFP_ATOMIC, &ctx->fcp_cmnd_dma);
2855         if (!ctx->fcp_cmnd) {
2856                 ql_log(ql_log_fatal, vha, 0x3011,
2857                     "Failed to allocate fcp_cmnd for cmd=%p.\n", cmd);
2858                 goto queuing_error;
2859         }
2860
2861         /* Initialize the DSD list and dma handle */
2862         INIT_LIST_HEAD(&ctx->dsd_list);
2863         ctx->dsd_use_cnt = 0;
2864
2865         if (cmd->cmd_len > 16) {
2866                 additional_cdb_len = cmd->cmd_len - 16;
2867                 if ((cmd->cmd_len % 4) != 0) {
2868                         /*
2869                          * SCSI command bigger than 16 bytes must be
2870                          * multiple of 4
2871                          */
2872                         ql_log(ql_log_warn, vha, 0x3012,
2873                             "scsi cmd len %d not multiple of 4 for cmd=%p.\n",
2874                             cmd->cmd_len, cmd);
2875                         goto queuing_error_fcp_cmnd;
2876                 }
2877                 ctx->fcp_cmnd_len = 12 + cmd->cmd_len + 4;
2878         } else {
2879                 additional_cdb_len = 0;
2880                 ctx->fcp_cmnd_len = 12 + 16 + 4;
2881         }
2882
2883         cmd_pkt = (struct cmd_type_6 *)req->ring_ptr;
2884         cmd_pkt->handle = make_handle(req->id, handle);
2885
2886         /*
2887          * Zero out remaining portion of packet.
2888          * tagged queuing modifier -- default is TSK_SIMPLE (0).
2889          */
2890         clr_ptr = (uint32_t *)cmd_pkt + 2;
2891         memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2892         cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2893
2894         /* No data transfer */
2895         if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
2896                 cmd_pkt->byte_count = cpu_to_le32(0);
2897                 goto no_dsds;
2898         }
2899
2900         /* Set transfer direction */
2901         if (cmd->sc_data_direction == DMA_TO_DEVICE) {
2902                 cmd_pkt->control_flags = cpu_to_le16(CF_WRITE_DATA);
2903                 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
2904                 vha->qla_stats.output_requests++;
2905                 sp->fcport->edif.tx_bytes += scsi_bufflen(cmd);
2906         } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
2907                 cmd_pkt->control_flags = cpu_to_le16(CF_READ_DATA);
2908                 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
2909                 vha->qla_stats.input_requests++;
2910                 sp->fcport->edif.rx_bytes += scsi_bufflen(cmd);
2911         }
2912
2913         cmd_pkt->control_flags |= cpu_to_le16(CF_EN_EDIF);
2914         cmd_pkt->control_flags &= ~(cpu_to_le16(CF_NEW_SA));
2915
2916         /* One DSD is available in the Command Type 6 IOCB */
2917         avail_dsds = 1;
2918         cur_dsd = &cmd_pkt->fcp_dsd;
2919
2920         /* Load data segments */
2921         scsi_for_each_sg(cmd, sg, tot_dsds, i) {
2922                 dma_addr_t      sle_dma;
2923                 cont_a64_entry_t *cont_pkt;
2924
2925                 /* Allocate additional continuation packets? */
2926                 if (avail_dsds == 0) {
2927                         /*
2928                          * Five DSDs are available in the Continuation
2929                          * Type 1 IOCB.
2930                          */
2931                         cont_pkt = qla2x00_prep_cont_type1_iocb(vha, req);
2932                         cur_dsd = cont_pkt->dsd;
2933                         avail_dsds = 5;
2934                 }
2935
2936                 sle_dma = sg_dma_address(sg);
2937                 put_unaligned_le64(sle_dma, &cur_dsd->address);
2938                 cur_dsd->length = cpu_to_le32(sg_dma_len(sg));
2939                 cur_dsd++;
2940                 avail_dsds--;
2941         }
2942
2943 no_dsds:
2944         /* Set NPORT-ID and LUN number*/
2945         cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2946         cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2947         cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2948         cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
2949         cmd_pkt->vp_index = sp->vha->vp_idx;
2950
2951         cmd_pkt->entry_type = COMMAND_TYPE_6;
2952
2953         /* Set total data segment count. */
2954         cmd_pkt->entry_count = (uint8_t)req_cnt;
2955
2956         int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
2957         host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
2958
2959         /* build FCP_CMND IU */
2960         int_to_scsilun(cmd->device->lun, &ctx->fcp_cmnd->lun);
2961         ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len;
2962
2963         if (cmd->sc_data_direction == DMA_TO_DEVICE)
2964                 ctx->fcp_cmnd->additional_cdb_len |= 1;
2965         else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
2966                 ctx->fcp_cmnd->additional_cdb_len |= 2;
2967
2968         /* Populate the FCP_PRIO. */
2969         if (ha->flags.fcp_prio_enabled)
2970                 ctx->fcp_cmnd->task_attribute |=
2971                     sp->fcport->fcp_prio << 3;
2972
2973         memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
2974
2975         fcp_dl = (__be32 *)(ctx->fcp_cmnd->cdb + 16 +
2976             additional_cdb_len);
2977         *fcp_dl = htonl((uint32_t)scsi_bufflen(cmd));
2978
2979         cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(ctx->fcp_cmnd_len);
2980         put_unaligned_le64(ctx->fcp_cmnd_dma, &cmd_pkt->fcp_cmnd_dseg_address);
2981
2982         sp->flags |= SRB_FCP_CMND_DMA_VALID;
2983         cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
2984         /* Set total data segment count. */
2985         cmd_pkt->entry_count = (uint8_t)req_cnt;
2986         cmd_pkt->entry_status = 0;
2987
2988         /* Build command packet. */
2989         req->current_outstanding_cmd = handle;
2990         req->outstanding_cmds[handle] = sp;
2991         sp->handle = handle;
2992         cmd->host_scribble = (unsigned char *)(unsigned long)handle;
2993         req->cnt -= req_cnt;
2994
2995         /* Adjust ring index. */
2996         wmb();
2997         req->ring_index++;
2998         if (req->ring_index == req->length) {
2999                 req->ring_index = 0;
3000                 req->ring_ptr = req->ring;
3001         } else {
3002                 req->ring_ptr++;
3003         }
3004
3005         sp->qpair->cmd_cnt++;
3006         /* Set chip new ring index. */
3007         wrt_reg_dword(req->req_q_in, req->ring_index);
3008
3009         spin_unlock_irqrestore(lock, flags);
3010
3011         return QLA_SUCCESS;
3012
3013 queuing_error_fcp_cmnd:
3014         dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd, ctx->fcp_cmnd_dma);
3015 queuing_error:
3016         if (tot_dsds)
3017                 scsi_dma_unmap(cmd);
3018
3019         if (sp->u.scmd.ct6_ctx) {
3020                 mempool_free(sp->u.scmd.ct6_ctx, ha->ctx_mempool);
3021                 sp->u.scmd.ct6_ctx = NULL;
3022         }
3023         spin_unlock_irqrestore(lock, flags);
3024
3025         return QLA_FUNCTION_FAILED;
3026 }
3027
3028 /**********************************************
3029  * edif update/delete sa_index list functions *
3030  **********************************************/
3031
3032 /* clear the edif_indx_list for this port */
3033 void qla_edif_list_del(fc_port_t *fcport)
3034 {
3035         struct edif_list_entry *indx_lst;
3036         struct edif_list_entry *tindx_lst;
3037         struct list_head *indx_list = &fcport->edif.edif_indx_list;
3038         unsigned long flags = 0;
3039
3040         spin_lock_irqsave(&fcport->edif.indx_list_lock, flags);
3041         list_for_each_entry_safe(indx_lst, tindx_lst, indx_list, next) {
3042                 list_del(&indx_lst->next);
3043                 kfree(indx_lst);
3044         }
3045         spin_unlock_irqrestore(&fcport->edif.indx_list_lock, flags);
3046 }
3047
3048 /******************
3049  * SADB functions *
3050  ******************/
3051
3052 /* allocate/retrieve an sa_index for a given spi */
3053 static uint16_t qla_edif_sadb_get_sa_index(fc_port_t *fcport,
3054                 struct qla_sa_update_frame *sa_frame)
3055 {
3056         struct edif_sa_index_entry *entry;
3057         struct list_head *sa_list;
3058         uint16_t sa_index;
3059         int dir = sa_frame->flags & SAU_FLG_TX;
3060         int slot = 0;
3061         int free_slot = -1;
3062         scsi_qla_host_t *vha = fcport->vha;
3063         struct qla_hw_data *ha = vha->hw;
3064         unsigned long flags = 0;
3065         uint16_t nport_handle = fcport->loop_id;
3066
3067         ql_dbg(ql_dbg_edif, vha, 0x3063,
3068             "%s: entry  fc_port: %p, nport_handle: 0x%x\n",
3069             __func__, fcport, nport_handle);
3070
3071         if (dir)
3072                 sa_list = &ha->sadb_tx_index_list;
3073         else
3074                 sa_list = &ha->sadb_rx_index_list;
3075
3076         entry = qla_edif_sadb_find_sa_index_entry(nport_handle, sa_list);
3077         if (!entry) {
3078                 if ((sa_frame->flags & (SAU_FLG_TX | SAU_FLG_INV)) == SAU_FLG_INV) {
3079                         ql_dbg(ql_dbg_edif, vha, 0x3063,
3080                             "%s: rx delete request with no entry\n", __func__);
3081                         return RX_DELETE_NO_EDIF_SA_INDEX;
3082                 }
3083
3084                 /* if there is no entry for this nport, add one */
3085                 entry = kzalloc((sizeof(struct edif_sa_index_entry)), GFP_ATOMIC);
3086                 if (!entry)
3087                         return INVALID_EDIF_SA_INDEX;
3088
3089                 sa_index = qla_edif_get_sa_index_from_freepool(fcport, dir);
3090                 if (sa_index == INVALID_EDIF_SA_INDEX) {
3091                         kfree(entry);
3092                         return INVALID_EDIF_SA_INDEX;
3093                 }
3094
3095                 INIT_LIST_HEAD(&entry->next);
3096                 entry->handle = nport_handle;
3097                 entry->fcport = fcport;
3098                 entry->sa_pair[0].spi = sa_frame->spi;
3099                 entry->sa_pair[0].sa_index = sa_index;
3100                 entry->sa_pair[1].spi = 0;
3101                 entry->sa_pair[1].sa_index = INVALID_EDIF_SA_INDEX;
3102                 spin_lock_irqsave(&ha->sadb_lock, flags);
3103                 list_add_tail(&entry->next, sa_list);
3104                 spin_unlock_irqrestore(&ha->sadb_lock, flags);
3105                 ql_dbg(ql_dbg_edif, vha, 0x3063,
3106                     "%s: Created new sadb entry for nport_handle 0x%x, spi 0x%x, returning sa_index %d\n",
3107                     __func__, nport_handle, sa_frame->spi, sa_index);
3108
3109                 return sa_index;
3110         }
3111
3112         spin_lock_irqsave(&ha->sadb_lock, flags);
3113
3114         /* see if we already have an entry for this spi */
3115         for (slot = 0; slot < 2; slot++) {
3116                 if (entry->sa_pair[slot].sa_index == INVALID_EDIF_SA_INDEX) {
3117                         free_slot = slot;
3118                 } else {
3119                         if (entry->sa_pair[slot].spi == sa_frame->spi) {
3120                                 spin_unlock_irqrestore(&ha->sadb_lock, flags);
3121                                 ql_dbg(ql_dbg_edif, vha, 0x3063,
3122                                     "%s: sadb slot %d entry for lid 0x%x, spi 0x%x found, sa_index %d\n",
3123                                     __func__, slot, entry->handle, sa_frame->spi,
3124                                     entry->sa_pair[slot].sa_index);
3125                                 return entry->sa_pair[slot].sa_index;
3126                         }
3127                 }
3128         }
3129         spin_unlock_irqrestore(&ha->sadb_lock, flags);
3130
3131         /* both slots are used */
3132         if (free_slot == -1) {
3133                 ql_dbg(ql_dbg_edif, vha, 0x3063,
3134                     "%s: WARNING: No free slots in sadb for nport_handle 0x%x, spi: 0x%x\n",
3135                     __func__, entry->handle, sa_frame->spi);
3136                 ql_dbg(ql_dbg_edif, vha, 0x3063,
3137                     "%s: Slot 0  spi: 0x%x  sa_index: %d,  Slot 1  spi: 0x%x  sa_index: %d\n",
3138                     __func__, entry->sa_pair[0].spi, entry->sa_pair[0].sa_index,
3139                     entry->sa_pair[1].spi, entry->sa_pair[1].sa_index);
3140
3141                 return INVALID_EDIF_SA_INDEX;
3142         }
3143
3144         /* there is at least one free slot, use it */
3145         sa_index = qla_edif_get_sa_index_from_freepool(fcport, dir);
3146         if (sa_index == INVALID_EDIF_SA_INDEX) {
3147                 ql_dbg(ql_dbg_edif, fcport->vha, 0x3063,
3148                     "%s: empty freepool!!\n", __func__);
3149                 return INVALID_EDIF_SA_INDEX;
3150         }
3151
3152         spin_lock_irqsave(&ha->sadb_lock, flags);
3153         entry->sa_pair[free_slot].spi = sa_frame->spi;
3154         entry->sa_pair[free_slot].sa_index = sa_index;
3155         spin_unlock_irqrestore(&ha->sadb_lock, flags);
3156         ql_dbg(ql_dbg_edif, fcport->vha, 0x3063,
3157             "%s: sadb slot %d entry for nport_handle 0x%x, spi 0x%x added, returning sa_index %d\n",
3158             __func__, free_slot, entry->handle, sa_frame->spi, sa_index);
3159
3160         return sa_index;
3161 }
3162
3163 /* release any sadb entries -- only done at teardown */
3164 void qla_edif_sadb_release(struct qla_hw_data *ha)
3165 {
3166         struct edif_sa_index_entry *entry, *tmp;
3167
3168         list_for_each_entry_safe(entry, tmp, &ha->sadb_rx_index_list, next) {
3169                 list_del(&entry->next);
3170                 kfree(entry);
3171         }
3172
3173         list_for_each_entry_safe(entry, tmp, &ha->sadb_tx_index_list, next) {
3174                 list_del(&entry->next);
3175                 kfree(entry);
3176         }
3177 }
3178
3179 /**************************
3180  * sadb freepool functions
3181  **************************/
3182
3183 /* build the rx and tx sa_index free pools -- only done at fcport init */
3184 int qla_edif_sadb_build_free_pool(struct qla_hw_data *ha)
3185 {
3186         ha->edif_tx_sa_id_map =
3187             kcalloc(BITS_TO_LONGS(EDIF_NUM_SA_INDEX), sizeof(long), GFP_KERNEL);
3188
3189         if (!ha->edif_tx_sa_id_map) {
3190                 ql_log_pci(ql_log_fatal, ha->pdev, 0x0009,
3191                     "Unable to allocate memory for sadb tx.\n");
3192                 return -ENOMEM;
3193         }
3194
3195         ha->edif_rx_sa_id_map =
3196             kcalloc(BITS_TO_LONGS(EDIF_NUM_SA_INDEX), sizeof(long), GFP_KERNEL);
3197         if (!ha->edif_rx_sa_id_map) {
3198                 kfree(ha->edif_tx_sa_id_map);
3199                 ha->edif_tx_sa_id_map = NULL;
3200                 ql_log_pci(ql_log_fatal, ha->pdev, 0x0009,
3201                     "Unable to allocate memory for sadb rx.\n");
3202                 return -ENOMEM;
3203         }
3204         return 0;
3205 }
3206
3207 /* release the free pool - only done during fcport teardown */
3208 void qla_edif_sadb_release_free_pool(struct qla_hw_data *ha)
3209 {
3210         kfree(ha->edif_tx_sa_id_map);
3211         ha->edif_tx_sa_id_map = NULL;
3212         kfree(ha->edif_rx_sa_id_map);
3213         ha->edif_rx_sa_id_map = NULL;
3214 }
3215
3216 static void __chk_edif_rx_sa_delete_pending(scsi_qla_host_t *vha,
3217                 fc_port_t *fcport, uint32_t handle, uint16_t sa_index)
3218 {
3219         struct edif_list_entry *edif_entry;
3220         struct edif_sa_ctl *sa_ctl;
3221         uint16_t delete_sa_index = INVALID_EDIF_SA_INDEX;
3222         unsigned long flags = 0;
3223         uint16_t nport_handle = fcport->loop_id;
3224         uint16_t cached_nport_handle;
3225
3226         spin_lock_irqsave(&fcport->edif.indx_list_lock, flags);
3227         edif_entry = qla_edif_list_find_sa_index(fcport, nport_handle);
3228         if (!edif_entry) {
3229                 spin_unlock_irqrestore(&fcport->edif.indx_list_lock, flags);
3230                 return;         /* no pending delete for this handle */
3231         }
3232
3233         /*
3234          * check for no pending delete for this index or iocb does not
3235          * match rx sa_index
3236          */
3237         if (edif_entry->delete_sa_index == INVALID_EDIF_SA_INDEX ||
3238             edif_entry->update_sa_index != sa_index) {
3239                 spin_unlock_irqrestore(&fcport->edif.indx_list_lock, flags);
3240                 return;
3241         }
3242
3243         /*
3244          * wait until we have seen at least EDIF_DELAY_COUNT transfers before
3245          * queueing RX delete
3246          */
3247         if (edif_entry->count++ < EDIF_RX_DELETE_FILTER_COUNT) {
3248                 spin_unlock_irqrestore(&fcport->edif.indx_list_lock, flags);
3249                 return;
3250         }
3251
3252         ql_dbg(ql_dbg_edif, vha, 0x5033,
3253             "%s: invalidating delete_sa_index,  update_sa_index: 0x%x sa_index: 0x%x, delete_sa_index: 0x%x\n",
3254             __func__, edif_entry->update_sa_index, sa_index, edif_entry->delete_sa_index);
3255
3256         delete_sa_index = edif_entry->delete_sa_index;
3257         edif_entry->delete_sa_index = INVALID_EDIF_SA_INDEX;
3258         cached_nport_handle = edif_entry->handle;
3259         spin_unlock_irqrestore(&fcport->edif.indx_list_lock, flags);
3260
3261         /* sanity check on the nport handle */
3262         if (nport_handle != cached_nport_handle) {
3263                 ql_dbg(ql_dbg_edif, vha, 0x3063,
3264                     "%s: POST SA DELETE nport_handle mismatch: lid: 0x%x, edif_entry nph: 0x%x\n",
3265                     __func__, nport_handle, cached_nport_handle);
3266         }
3267
3268         /* find the sa_ctl for the delete and schedule the delete */
3269         sa_ctl = qla_edif_find_sa_ctl_by_index(fcport, delete_sa_index, 0);
3270         if (sa_ctl) {
3271                 ql_dbg(ql_dbg_edif, vha, 0x3063,
3272                     "%s: POST SA DELETE sa_ctl: %p, index recvd %d\n",
3273                     __func__, sa_ctl, sa_index);
3274                 ql_dbg(ql_dbg_edif, vha, 0x3063,
3275                     "delete index %d, update index: %d, nport handle: 0x%x, handle: 0x%x\n",
3276                     delete_sa_index,
3277                     edif_entry->update_sa_index, nport_handle, handle);
3278
3279                 sa_ctl->flags = EDIF_SA_CTL_FLG_DEL;
3280                 set_bit(EDIF_SA_CTL_REPL, &sa_ctl->state);
3281                 qla_post_sa_replace_work(fcport->vha, fcport,
3282                     nport_handle, sa_ctl);
3283         } else {
3284                 ql_dbg(ql_dbg_edif, vha, 0x3063,
3285                     "%s: POST SA DELETE sa_ctl not found for delete_sa_index: %d\n",
3286                     __func__, delete_sa_index);
3287         }
3288 }
3289
3290 void qla_chk_edif_rx_sa_delete_pending(scsi_qla_host_t *vha,
3291                 srb_t *sp, struct sts_entry_24xx *sts24)
3292 {
3293         fc_port_t *fcport = sp->fcport;
3294         /* sa_index used by this iocb */
3295         struct scsi_cmnd *cmd = GET_CMD_SP(sp);
3296         uint32_t handle;
3297
3298         handle = (uint32_t)LSW(sts24->handle);
3299
3300         /* find out if this status iosb is for a scsi read */
3301         if (cmd->sc_data_direction != DMA_FROM_DEVICE)
3302                 return;
3303
3304         return __chk_edif_rx_sa_delete_pending(vha, fcport, handle,
3305            le16_to_cpu(sts24->edif_sa_index));
3306 }
3307
3308 void qlt_chk_edif_rx_sa_delete_pending(scsi_qla_host_t *vha, fc_port_t *fcport,
3309                 struct ctio7_from_24xx *pkt)
3310 {
3311         __chk_edif_rx_sa_delete_pending(vha, fcport,
3312             pkt->handle, le16_to_cpu(pkt->edif_sa_index));
3313 }
3314
3315 static void qla_parse_auth_els_ctl(struct srb *sp)
3316 {
3317         struct qla_els_pt_arg *a = &sp->u.bsg_cmd.u.els_arg;
3318         struct bsg_job *bsg_job = sp->u.bsg_cmd.bsg_job;
3319         struct fc_bsg_request *request = bsg_job->request;
3320         struct qla_bsg_auth_els_request *p =
3321             (struct qla_bsg_auth_els_request *)bsg_job->request;
3322
3323         a->tx_len = a->tx_byte_count = sp->remap.req.len;
3324         a->tx_addr = sp->remap.req.dma;
3325         a->rx_len = a->rx_byte_count = sp->remap.rsp.len;
3326         a->rx_addr = sp->remap.rsp.dma;
3327
3328         if (p->e.sub_cmd == SEND_ELS_REPLY) {
3329                 a->control_flags = p->e.extra_control_flags << 13;
3330                 a->rx_xchg_address = cpu_to_le32(p->e.extra_rx_xchg_address);
3331                 if (p->e.extra_control_flags == BSG_CTL_FLAG_LS_ACC)
3332                         a->els_opcode = ELS_LS_ACC;
3333                 else if (p->e.extra_control_flags == BSG_CTL_FLAG_LS_RJT)
3334                         a->els_opcode = ELS_LS_RJT;
3335         }
3336         a->did = sp->fcport->d_id;
3337         a->els_opcode =  request->rqst_data.h_els.command_code;
3338         a->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3339         a->vp_idx = sp->vha->vp_idx;
3340 }
3341
3342 int qla_edif_process_els(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
3343 {
3344         struct fc_bsg_request *bsg_request = bsg_job->request;
3345         struct fc_bsg_reply *bsg_reply = bsg_job->reply;
3346         fc_port_t *fcport = NULL;
3347         struct qla_hw_data *ha = vha->hw;
3348         srb_t *sp;
3349         int rval =  (DID_ERROR << 16);
3350         port_id_t d_id;
3351         struct qla_bsg_auth_els_request *p =
3352             (struct qla_bsg_auth_els_request *)bsg_job->request;
3353
3354         d_id.b.al_pa = bsg_request->rqst_data.h_els.port_id[2];
3355         d_id.b.area = bsg_request->rqst_data.h_els.port_id[1];
3356         d_id.b.domain = bsg_request->rqst_data.h_els.port_id[0];
3357
3358         /* find matching d_id in fcport list */
3359         fcport = qla2x00_find_fcport_by_pid(vha, &d_id);
3360         if (!fcport) {
3361                 ql_dbg(ql_dbg_edif, vha, 0x911a,
3362                     "%s fcport not find online portid=%06x.\n",
3363                     __func__, d_id.b24);
3364                 SET_DID_STATUS(bsg_reply->result, DID_ERROR);
3365                 return -EIO;
3366         }
3367
3368         if (qla_bsg_check(vha, bsg_job, fcport))
3369                 return 0;
3370
3371         if (fcport->loop_id == FC_NO_LOOP_ID) {
3372                 ql_dbg(ql_dbg_edif, vha, 0x910d,
3373                     "%s ELS code %x, no loop id.\n", __func__,
3374                     bsg_request->rqst_data.r_els.els_code);
3375                 SET_DID_STATUS(bsg_reply->result, DID_BAD_TARGET);
3376                 return -ENXIO;
3377         }
3378
3379         if (!vha->flags.online) {
3380                 ql_log(ql_log_warn, vha, 0x7005, "Host not online.\n");
3381                 SET_DID_STATUS(bsg_reply->result, DID_BAD_TARGET);
3382                 rval = -EIO;
3383                 goto done;
3384         }
3385
3386         /* pass through is supported only for ISP 4Gb or higher */
3387         if (!IS_FWI2_CAPABLE(ha)) {
3388                 ql_dbg(ql_dbg_user, vha, 0x7001,
3389                     "ELS passthru not supported for ISP23xx based adapters.\n");
3390                 SET_DID_STATUS(bsg_reply->result, DID_BAD_TARGET);
3391                 rval = -EPERM;
3392                 goto done;
3393         }
3394
3395         sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
3396         if (!sp) {
3397                 ql_dbg(ql_dbg_user, vha, 0x7004,
3398                     "Failed get sp pid=%06x\n", fcport->d_id.b24);
3399                 rval = -ENOMEM;
3400                 SET_DID_STATUS(bsg_reply->result, DID_IMM_RETRY);
3401                 goto done;
3402         }
3403
3404         sp->remap.req.len = bsg_job->request_payload.payload_len;
3405         sp->remap.req.buf = dma_pool_alloc(ha->purex_dma_pool,
3406             GFP_KERNEL, &sp->remap.req.dma);
3407         if (!sp->remap.req.buf) {
3408                 ql_dbg(ql_dbg_user, vha, 0x7005,
3409                     "Failed allocate request dma len=%x\n",
3410                     bsg_job->request_payload.payload_len);
3411                 rval = -ENOMEM;
3412                 SET_DID_STATUS(bsg_reply->result, DID_IMM_RETRY);
3413                 goto done_free_sp;
3414         }
3415
3416         sp->remap.rsp.len = bsg_job->reply_payload.payload_len;
3417         sp->remap.rsp.buf = dma_pool_alloc(ha->purex_dma_pool,
3418             GFP_KERNEL, &sp->remap.rsp.dma);
3419         if (!sp->remap.rsp.buf) {
3420                 ql_dbg(ql_dbg_user, vha, 0x7006,
3421                     "Failed allocate response dma len=%x\n",
3422                     bsg_job->reply_payload.payload_len);
3423                 rval = -ENOMEM;
3424                 SET_DID_STATUS(bsg_reply->result, DID_IMM_RETRY);
3425                 goto done_free_remap_req;
3426         }
3427         sg_copy_to_buffer(bsg_job->request_payload.sg_list,
3428             bsg_job->request_payload.sg_cnt, sp->remap.req.buf,
3429             sp->remap.req.len);
3430         sp->remap.remapped = true;
3431
3432         sp->type = SRB_ELS_CMD_HST_NOLOGIN;
3433         sp->name = "SPCN_BSG_HST_NOLOGIN";
3434         sp->u.bsg_cmd.bsg_job = bsg_job;
3435         qla_parse_auth_els_ctl(sp);
3436
3437         sp->free = qla2x00_bsg_sp_free;
3438         sp->done = qla2x00_bsg_job_done;
3439
3440         rval = qla2x00_start_sp(sp);
3441
3442         ql_dbg(ql_dbg_edif, vha, 0x700a,
3443             "%s %s %8phN xchg %x ctlflag %x hdl %x reqlen %xh bsg ptr %p\n",
3444             __func__, sc_to_str(p->e.sub_cmd), fcport->port_name,
3445             p->e.extra_rx_xchg_address, p->e.extra_control_flags,
3446             sp->handle, sp->remap.req.len, bsg_job);
3447
3448         if (rval != QLA_SUCCESS) {
3449                 ql_log(ql_log_warn, vha, 0x700e,
3450                     "qla2x00_start_sp failed = %d\n", rval);
3451                 SET_DID_STATUS(bsg_reply->result, DID_IMM_RETRY);
3452                 rval = -EIO;
3453                 goto done_free_remap_rsp;
3454         }
3455         return rval;
3456
3457 done_free_remap_rsp:
3458         dma_pool_free(ha->purex_dma_pool, sp->remap.rsp.buf,
3459             sp->remap.rsp.dma);
3460 done_free_remap_req:
3461         dma_pool_free(ha->purex_dma_pool, sp->remap.req.buf,
3462             sp->remap.req.dma);
3463 done_free_sp:
3464         qla2x00_rel_sp(sp);
3465
3466 done:
3467         return rval;
3468 }
3469
3470 void qla_edif_sess_down(struct scsi_qla_host *vha, struct fc_port *sess)
3471 {
3472         if (sess->edif.app_sess_online && vha->e_dbell.db_flags & EDB_ACTIVE) {
3473                 ql_dbg(ql_dbg_disc, vha, 0xf09c,
3474                         "%s: sess %8phN send port_offline event\n",
3475                         __func__, sess->port_name);
3476                 sess->edif.app_sess_online = 0;
3477                 qla_edb_eventcreate(vha, VND_CMD_AUTH_STATE_SESSION_SHUTDOWN,
3478                     sess->d_id.b24, 0, sess);
3479                 qla2x00_post_aen_work(vha, FCH_EVT_PORT_OFFLINE, sess->d_id.b24);
3480         }
3481 }
3482
3483 void qla_edif_clear_appdata(struct scsi_qla_host *vha, struct fc_port *fcport)
3484 {
3485         if (!(fcport->flags & FCF_FCSP_DEVICE))
3486                 return;
3487
3488         qla_edb_clear(vha, fcport->d_id);
3489         qla_enode_clear(vha, fcport->d_id);
3490 }