1 // SPDX-License-Identifier: GPL-2.0-only
3 * Marvell Fibre Channel HBA Driver
4 * Copyright (c) 2021 Marvell
9 #include <linux/kthread.h>
10 #include <linux/vmalloc.h>
11 #include <linux/delay.h>
12 #include <scsi/scsi_tcq.h>
14 static struct edif_sa_index_entry *qla_edif_sadb_find_sa_index_entry(uint16_t nport_handle,
15 struct list_head *sa_list);
16 static uint16_t qla_edif_sadb_get_sa_index(fc_port_t *fcport,
17 struct qla_sa_update_frame *sa_frame);
18 static int qla_edif_sadb_delete_sa_index(fc_port_t *fcport, uint16_t nport_handle,
20 static int qla_pur_get_pending(scsi_qla_host_t *, fc_port_t *, struct bsg_job *);
23 struct list_head list;
29 struct edif_sa_update_aen sa_aen;
33 static struct els_sub_cmd {
37 {SEND_ELS, "send ELS"},
38 {SEND_ELS_REPLY, "send ELS Reply"},
39 {PULL_ELS, "retrieve ELS"},
42 const char *sc_to_str(uint16_t cmd)
45 struct els_sub_cmd *e;
47 for (i = 0; i < ARRAY_SIZE(sc_str); i++) {
55 static struct edif_list_entry *qla_edif_list_find_sa_index(fc_port_t *fcport,
58 struct edif_list_entry *entry;
59 struct edif_list_entry *tentry;
60 struct list_head *indx_list = &fcport->edif.edif_indx_list;
62 list_for_each_entry_safe(entry, tentry, indx_list, next) {
63 if (entry->handle == handle)
69 /* timeout called when no traffic and delayed rx sa_index delete */
70 static void qla2x00_sa_replace_iocb_timeout(struct timer_list *t)
72 struct edif_list_entry *edif_entry = from_timer(edif_entry, t, timer);
73 fc_port_t *fcport = edif_entry->fcport;
74 struct scsi_qla_host *vha = fcport->vha;
75 struct edif_sa_ctl *sa_ctl;
76 uint16_t nport_handle;
77 unsigned long flags = 0;
79 ql_dbg(ql_dbg_edif, vha, 0x3069,
80 "%s: nport_handle 0x%x, SA REPL Delay Timeout, %8phC portid=%06x\n",
81 __func__, edif_entry->handle, fcport->port_name, fcport->d_id.b24);
84 * if delete_sa_index is valid then no one has serviced this
87 spin_lock_irqsave(&fcport->edif.indx_list_lock, flags);
90 * delete_sa_index is invalidated when we find the new sa_index in
91 * the incoming data stream. If it is not invalidated then we are
92 * still looking for the new sa_index because there is no I/O and we
93 * need to just force the rx delete and move on. Otherwise
94 * we could get another rekey which will result in an error 66.
96 if (edif_entry->delete_sa_index != INVALID_EDIF_SA_INDEX) {
97 uint16_t delete_sa_index = edif_entry->delete_sa_index;
99 edif_entry->delete_sa_index = INVALID_EDIF_SA_INDEX;
100 nport_handle = edif_entry->handle;
101 spin_unlock_irqrestore(&fcport->edif.indx_list_lock, flags);
103 sa_ctl = qla_edif_find_sa_ctl_by_index(fcport,
107 ql_dbg(ql_dbg_edif, vha, 0x3063,
108 "%s: sa_ctl: %p, delete index %d, update index: %d, lid: 0x%x\n",
109 __func__, sa_ctl, delete_sa_index, edif_entry->update_sa_index,
112 sa_ctl->flags = EDIF_SA_CTL_FLG_DEL;
113 set_bit(EDIF_SA_CTL_REPL, &sa_ctl->state);
114 qla_post_sa_replace_work(fcport->vha, fcport,
115 nport_handle, sa_ctl);
118 ql_dbg(ql_dbg_edif, vha, 0x3063,
119 "%s: sa_ctl not found for delete_sa_index: %d\n",
120 __func__, edif_entry->delete_sa_index);
123 spin_unlock_irqrestore(&fcport->edif.indx_list_lock, flags);
128 * create a new list entry for this nport handle and
129 * add an sa_update index to the list - called for sa_update
131 static int qla_edif_list_add_sa_update_index(fc_port_t *fcport,
132 uint16_t sa_index, uint16_t handle)
134 struct edif_list_entry *entry;
135 unsigned long flags = 0;
137 /* if the entry exists, then just update the sa_index */
138 entry = qla_edif_list_find_sa_index(fcport, handle);
140 entry->update_sa_index = sa_index;
146 * This is the normal path - there should be no existing entry
147 * when update is called. The exception is at startup
148 * when update is called for the first two sa_indexes
149 * followed by a delete of the first sa_index
151 entry = kzalloc((sizeof(struct edif_list_entry)), GFP_ATOMIC);
155 INIT_LIST_HEAD(&entry->next);
156 entry->handle = handle;
157 entry->update_sa_index = sa_index;
158 entry->delete_sa_index = INVALID_EDIF_SA_INDEX;
161 timer_setup(&entry->timer, qla2x00_sa_replace_iocb_timeout, 0);
162 spin_lock_irqsave(&fcport->edif.indx_list_lock, flags);
163 list_add_tail(&entry->next, &fcport->edif.edif_indx_list);
164 spin_unlock_irqrestore(&fcport->edif.indx_list_lock, flags);
168 /* remove an entry from the list */
169 static void qla_edif_list_delete_sa_index(fc_port_t *fcport, struct edif_list_entry *entry)
171 unsigned long flags = 0;
173 spin_lock_irqsave(&fcport->edif.indx_list_lock, flags);
174 list_del(&entry->next);
175 spin_unlock_irqrestore(&fcport->edif.indx_list_lock, flags);
178 int qla_post_sa_replace_work(struct scsi_qla_host *vha,
179 fc_port_t *fcport, uint16_t nport_handle, struct edif_sa_ctl *sa_ctl)
181 struct qla_work_evt *e;
183 e = qla2x00_alloc_work(vha, QLA_EVT_SA_REPLACE);
185 return QLA_FUNCTION_FAILED;
187 e->u.sa_update.fcport = fcport;
188 e->u.sa_update.sa_ctl = sa_ctl;
189 e->u.sa_update.nport_handle = nport_handle;
190 fcport->flags |= FCF_ASYNC_ACTIVE;
191 return qla2x00_post_work(vha, e);
195 qla_edif_sa_ctl_init(scsi_qla_host_t *vha, struct fc_port *fcport)
197 ql_dbg(ql_dbg_edif, vha, 0x2058,
198 "Init SA_CTL List for fcport - nn %8phN pn %8phN portid=%06x.\n",
199 fcport->node_name, fcport->port_name, fcport->d_id.b24);
201 fcport->edif.tx_rekey_cnt = 0;
202 fcport->edif.rx_rekey_cnt = 0;
204 fcport->edif.tx_bytes = 0;
205 fcport->edif.rx_bytes = 0;
208 static int qla_bsg_check(scsi_qla_host_t *vha, struct bsg_job *bsg_job,
211 struct extra_auth_els *p;
212 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
213 struct qla_bsg_auth_els_request *req =
214 (struct qla_bsg_auth_els_request *)bsg_job->request;
216 if (!vha->hw->flags.edif_enabled) {
217 ql_dbg(ql_dbg_edif, vha, 0x9105,
218 "%s edif not enabled\n", __func__);
221 if (vha->e_dbell.db_flags != EDB_ACTIVE) {
222 ql_dbg(ql_dbg_edif, vha, 0x09102,
223 "%s doorbell not enabled\n", __func__);
230 if (p->sub_cmd == PULL_ELS) {
231 struct qla_bsg_auth_els_reply *rpl =
232 (struct qla_bsg_auth_els_reply *)bsg_job->reply;
234 qla_pur_get_pending(vha, fcport, bsg_job);
236 ql_dbg(ql_dbg_edif, vha, 0x911d,
237 "%s %s %8phN sid=%x. xchg %x, nb=%xh bsg ptr %p\n",
238 __func__, sc_to_str(p->sub_cmd), fcport->port_name,
239 fcport->d_id.b24, rpl->rx_xchg_address,
240 rpl->r.reply_payload_rcv_len, bsg_job);
248 bsg_job_done(bsg_job, bsg_reply->result,
249 bsg_reply->reply_payload_rcv_len);
254 qla2x00_find_fcport_by_pid(scsi_qla_host_t *vha, port_id_t *id)
259 list_for_each_entry_safe(f, tf, &vha->vp_fcports, list) {
260 if ((f->flags & FCF_FCSP_DEVICE)) {
261 ql_dbg(ql_dbg_edif + ql_dbg_verbose, vha, 0x2058,
262 "Found secure fcport - nn %8phN pn %8phN portid=0x%x, 0x%x.\n",
263 f->node_name, f->port_name,
264 f->d_id.b24, id->b24);
265 if (f->d_id.b24 == id->b24)
273 * qla_edif_app_check(): check for valid application id.
274 * @vha: host adapter pointer
275 * @appid: application id
276 * Return: false = fail, true = pass
279 qla_edif_app_check(scsi_qla_host_t *vha, struct app_id appid)
281 /* check that the app is allow/known to the driver */
283 if (appid.app_vid == EDIF_APP_ID) {
284 ql_dbg(ql_dbg_edif + ql_dbg_verbose, vha, 0x911d, "%s app id ok\n", __func__);
287 ql_dbg(ql_dbg_edif, vha, 0x911d, "%s app id not ok (%x)",
288 __func__, appid.app_vid);
294 qla_edif_free_sa_ctl(fc_port_t *fcport, struct edif_sa_ctl *sa_ctl,
297 unsigned long flags = 0;
299 spin_lock_irqsave(&fcport->edif.sa_list_lock, flags);
300 list_del(&sa_ctl->next);
301 spin_unlock_irqrestore(&fcport->edif.sa_list_lock, flags);
303 fcport->edif.tx_rekey_cnt--;
305 fcport->edif.rx_rekey_cnt--;
309 /* return an index to the freepool */
310 static void qla_edif_add_sa_index_to_freepool(fc_port_t *fcport, int dir,
314 struct scsi_qla_host *vha = fcport->vha;
315 struct qla_hw_data *ha = vha->hw;
316 unsigned long flags = 0;
317 u16 lsa_index = sa_index;
319 ql_dbg(ql_dbg_edif + ql_dbg_verbose, vha, 0x3063,
320 "%s: entry\n", __func__);
323 sa_id_map = ha->edif_tx_sa_id_map;
324 lsa_index -= EDIF_TX_SA_INDEX_BASE;
326 sa_id_map = ha->edif_rx_sa_id_map;
329 spin_lock_irqsave(&ha->sadb_fp_lock, flags);
330 clear_bit(lsa_index, sa_id_map);
331 spin_unlock_irqrestore(&ha->sadb_fp_lock, flags);
332 ql_dbg(ql_dbg_edif, vha, 0x3063,
333 "%s: index %d added to free pool\n", __func__, sa_index);
336 static void __qla2x00_release_all_sadb(struct scsi_qla_host *vha,
337 struct fc_port *fcport, struct edif_sa_index_entry *entry,
340 struct edif_list_entry *edif_entry;
341 struct edif_sa_ctl *sa_ctl;
345 for (i = 0; i < 2; i++) {
346 if (entry->sa_pair[i].sa_index == INVALID_EDIF_SA_INDEX)
349 if (fcport->loop_id != entry->handle) {
350 ql_dbg(ql_dbg_edif, vha, 0x3063,
351 "%s: ** WARNING %d** entry handle: 0x%x, lid: 0x%x, sa_index: %d\n",
352 __func__, i, entry->handle, fcport->loop_id,
353 entry->sa_pair[i].sa_index);
356 /* release the sa_ctl */
357 sa_ctl = qla_edif_find_sa_ctl_by_index(fcport,
358 entry->sa_pair[i].sa_index, pdir);
360 qla_edif_find_sa_ctl_by_index(fcport, sa_ctl->index, pdir)) {
361 ql_dbg(ql_dbg_edif, vha, 0x3063,
362 "%s: freeing sa_ctl for index %d\n", __func__, sa_ctl->index);
363 qla_edif_free_sa_ctl(fcport, sa_ctl, sa_ctl->index);
365 ql_dbg(ql_dbg_edif, vha, 0x3063,
366 "%s: sa_ctl NOT freed, sa_ctl: %p\n", __func__, sa_ctl);
369 /* Release the index */
370 ql_dbg(ql_dbg_edif, vha, 0x3063,
371 "%s: freeing sa_index %d, nph: 0x%x\n",
372 __func__, entry->sa_pair[i].sa_index, entry->handle);
374 dir = (entry->sa_pair[i].sa_index <
375 EDIF_TX_SA_INDEX_BASE) ? 0 : 1;
376 qla_edif_add_sa_index_to_freepool(fcport, dir,
377 entry->sa_pair[i].sa_index);
379 /* Delete timer on RX */
380 if (pdir != SAU_FLG_TX) {
382 qla_edif_list_find_sa_index(fcport, entry->handle);
384 ql_dbg(ql_dbg_edif, vha, 0x5033,
385 "%s: remove edif_entry %p, update_sa_index: 0x%x, delete_sa_index: 0x%x\n",
386 __func__, edif_entry, edif_entry->update_sa_index,
387 edif_entry->delete_sa_index);
388 qla_edif_list_delete_sa_index(fcport, edif_entry);
390 * valid delete_sa_index indicates there is a rx
391 * delayed delete queued
393 if (edif_entry->delete_sa_index !=
394 INVALID_EDIF_SA_INDEX) {
395 del_timer(&edif_entry->timer);
397 /* build and send the aen */
398 fcport->edif.rx_sa_set = 1;
399 fcport->edif.rx_sa_pending = 0;
400 qla_edb_eventcreate(vha,
401 VND_CMD_AUTH_STATE_SAUPDATE_COMPL,
402 QL_VND_SA_STAT_SUCCESS,
403 QL_VND_RX_SA_KEY, fcport);
405 ql_dbg(ql_dbg_edif, vha, 0x5033,
406 "%s: release edif_entry %p, update_sa_index: 0x%x, delete_sa_index: 0x%x\n",
407 __func__, edif_entry, edif_entry->update_sa_index,
408 edif_entry->delete_sa_index);
415 ql_dbg(ql_dbg_edif, vha, 0x3063,
416 "%s: %d %s keys released\n",
417 __func__, key_cnt, pdir ? "tx" : "rx");
420 /* find an release all outstanding sadb sa_indicies */
421 void qla2x00_release_all_sadb(struct scsi_qla_host *vha, struct fc_port *fcport)
423 struct edif_sa_index_entry *entry, *tmp;
424 struct qla_hw_data *ha = vha->hw;
427 ql_dbg(ql_dbg_edif + ql_dbg_verbose, vha, 0x3063,
428 "%s: Starting...\n", __func__);
430 spin_lock_irqsave(&ha->sadb_lock, flags);
432 list_for_each_entry_safe(entry, tmp, &ha->sadb_rx_index_list, next) {
433 if (entry->fcport == fcport) {
434 list_del(&entry->next);
435 spin_unlock_irqrestore(&ha->sadb_lock, flags);
436 __qla2x00_release_all_sadb(vha, fcport, entry, 0);
438 spin_lock_irqsave(&ha->sadb_lock, flags);
443 list_for_each_entry_safe(entry, tmp, &ha->sadb_tx_index_list, next) {
444 if (entry->fcport == fcport) {
445 list_del(&entry->next);
446 spin_unlock_irqrestore(&ha->sadb_lock, flags);
448 __qla2x00_release_all_sadb(vha, fcport, entry, SAU_FLG_TX);
451 spin_lock_irqsave(&ha->sadb_lock, flags);
455 spin_unlock_irqrestore(&ha->sadb_lock, flags);
459 * qla_edif_app_start: application has announce its present
460 * @vha: host adapter pointer
461 * @bsg_job: user request
463 * Set/activate doorbell. Reset current sessions and re-login with
467 qla_edif_app_start(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
470 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
471 struct app_start appstart;
472 struct app_start_reply appreply;
473 struct fc_port *fcport, *tf;
475 ql_log(ql_log_info, vha, 0x1313,
476 "EDIF application registration with driver, FC device connections will be re-established.\n");
478 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
479 bsg_job->request_payload.sg_cnt, &appstart,
480 sizeof(struct app_start));
482 ql_dbg(ql_dbg_edif, vha, 0x911d, "%s app_vid=%x app_start_flags %x\n",
483 __func__, appstart.app_info.app_vid, appstart.app_start_flags);
485 if (vha->e_dbell.db_flags != EDB_ACTIVE) {
486 /* mark doorbell as active since an app is now present */
487 vha->e_dbell.db_flags = EDB_ACTIVE;
489 ql_dbg(ql_dbg_edif, vha, 0x911e, "%s doorbell already active\n",
493 if (N2N_TOPO(vha->hw)) {
494 if (vha->hw->flags.n2n_fw_acc_sec)
495 set_bit(N2N_LINK_RESET, &vha->dpc_flags);
497 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
498 qla2xxx_wake_dpc(vha);
500 list_for_each_entry_safe(fcport, tf, &vha->vp_fcports, list) {
501 ql_dbg(ql_dbg_edif, vha, 0x2058,
502 "FCSP - nn %8phN pn %8phN portid=%06x.\n",
503 fcport->node_name, fcport->port_name,
505 ql_dbg(ql_dbg_edif, vha, 0xf084,
506 "%s: se_sess %p / sess %p from port %8phC "
507 "loop_id %#04x s_id %06x logout %d "
508 "keep %d els_logo %d disc state %d auth state %d"
510 __func__, fcport->se_sess, fcport,
511 fcport->port_name, fcport->loop_id,
512 fcport->d_id.b24, fcport->logout_on_delete,
513 fcport->keep_nport_handle, fcport->send_els_logo,
514 fcport->disc_state, fcport->edif.auth_state,
515 fcport->edif.app_stop);
517 if (atomic_read(&vha->loop_state) == LOOP_DOWN)
520 fcport->edif.app_started = 1;
521 fcport->login_retry = vha->hw->login_retry_count;
524 fcport->edif.app_stop = 0;
526 ql_dbg(ql_dbg_edif, vha, 0x911e,
527 "%s wwpn %8phC calling qla_edif_reset_auth_wait\n",
528 __func__, fcport->port_name);
529 fcport->edif.app_sess_online = 0;
530 qlt_schedule_sess_for_deletion(fcport);
531 qla_edif_sa_ctl_init(vha, fcport);
535 if (vha->pur_cinfo.enode_flags != ENODE_ACTIVE) {
536 /* mark as active since an app is now present */
537 vha->pur_cinfo.enode_flags = ENODE_ACTIVE;
539 ql_dbg(ql_dbg_edif, vha, 0x911f, "%s enode already active\n",
543 appreply.host_support_edif = vha->hw->flags.edif_enabled;
544 appreply.edif_enode_active = vha->pur_cinfo.enode_flags;
545 appreply.edif_edb_active = vha->e_dbell.db_flags;
547 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
549 SET_DID_STATUS(bsg_reply->result, DID_OK);
551 bsg_reply->reply_payload_rcv_len = sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
552 bsg_job->reply_payload.sg_cnt,
554 sizeof(struct app_start_reply));
556 ql_dbg(ql_dbg_edif, vha, 0x911d,
557 "%s app start completed with 0x%x\n",
564 * qla_edif_app_stop - app has announced it's exiting.
565 * @vha: host adapter pointer
566 * @bsg_job: user space command pointer
568 * Free any in flight messages, clear all doorbell events
569 * to application. Reject any message relate to security.
572 qla_edif_app_stop(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
574 struct app_stop appstop;
575 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
576 struct fc_port *fcport, *tf;
578 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
579 bsg_job->request_payload.sg_cnt, &appstop,
580 sizeof(struct app_stop));
582 ql_dbg(ql_dbg_edif, vha, 0x911d, "%s Stopping APP: app_vid=%x\n",
583 __func__, appstop.app_info.app_vid);
585 /* Call db stop and enode stop functions */
587 /* if we leave this running short waits are operational < 16 secs */
588 qla_enode_stop(vha); /* stop enode */
589 qla_edb_stop(vha); /* stop db */
591 list_for_each_entry_safe(fcport, tf, &vha->vp_fcports, list) {
592 if (!(fcport->flags & FCF_FCSP_DEVICE))
595 if (fcport->flags & FCF_FCSP_DEVICE) {
596 ql_dbg(ql_dbg_edif, vha, 0xf084,
597 "%s: sess %p from port %8phC lid %#04x s_id %06x logout %d keep %d els_logo %d\n",
599 fcport->port_name, fcport->loop_id, fcport->d_id.b24,
600 fcport->logout_on_delete, fcport->keep_nport_handle,
601 fcport->send_els_logo);
603 if (atomic_read(&vha->loop_state) == LOOP_DOWN)
606 fcport->edif.app_stop = 1;
607 ql_dbg(ql_dbg_edif, vha, 0x911e,
608 "%s wwpn %8phC calling qla_edif_reset_auth_wait\n",
609 __func__, fcport->port_name);
611 fcport->send_els_logo = 1;
612 qlt_schedule_sess_for_deletion(fcport);
614 /* qla_edif_flush_sa_ctl_lists(fcport); */
615 fcport->edif.app_started = 0;
619 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
620 SET_DID_STATUS(bsg_reply->result, DID_OK);
622 /* no return interface to app - it assumes we cleaned up ok */
628 qla_edif_app_chk_sa_update(scsi_qla_host_t *vha, fc_port_t *fcport,
629 struct app_plogi_reply *appplogireply)
633 if (!(fcport->edif.rx_sa_set && fcport->edif.tx_sa_set)) {
634 ql_dbg(ql_dbg_edif, vha, 0x911e,
635 "%s: wwpn %8phC Both SA indexes has not been SET TX %d, RX %d.\n",
636 __func__, fcport->port_name, fcport->edif.tx_sa_set,
637 fcport->edif.rx_sa_set);
638 appplogireply->prli_status = 0;
641 ql_dbg(ql_dbg_edif, vha, 0x911e,
642 "%s wwpn %8phC Both SA(s) updated.\n", __func__,
644 fcport->edif.rx_sa_set = fcport->edif.tx_sa_set = 0;
645 fcport->edif.rx_sa_pending = fcport->edif.tx_sa_pending = 0;
646 appplogireply->prli_status = 1;
652 * qla_edif_app_authok - authentication by app succeeded. Driver can proceed
654 * @vha: host adapter pointer
655 * @bsg_job: user request
658 qla_edif_app_authok(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
661 struct auth_complete_cmd appplogiok;
662 struct app_plogi_reply appplogireply = {0};
663 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
664 fc_port_t *fcport = NULL;
665 port_id_t portid = {0};
667 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
668 bsg_job->request_payload.sg_cnt, &appplogiok,
669 sizeof(struct auth_complete_cmd));
671 /* silent unaligned access warning */
672 portid.b.domain = appplogiok.u.d_id.b.domain;
673 portid.b.area = appplogiok.u.d_id.b.area;
674 portid.b.al_pa = appplogiok.u.d_id.b.al_pa;
676 switch (appplogiok.type) {
678 fcport = qla2x00_find_fcport_by_wwpn(vha,
679 appplogiok.u.wwpn, 0);
681 ql_dbg(ql_dbg_edif, vha, 0x911d,
682 "%s wwpn lookup failed: %8phC\n",
683 __func__, appplogiok.u.wwpn);
686 fcport = qla2x00_find_fcport_by_pid(vha, &portid);
688 ql_dbg(ql_dbg_edif, vha, 0x911d,
689 "%s d_id lookup failed: %x\n", __func__,
693 ql_dbg(ql_dbg_edif, vha, 0x911d,
694 "%s undefined type: %x\n", __func__,
700 SET_DID_STATUS(bsg_reply->result, DID_ERROR);
705 * if port is online then this is a REKEY operation
706 * Only do sa update checking
708 if (atomic_read(&fcport->state) == FCS_ONLINE) {
709 ql_dbg(ql_dbg_edif, vha, 0x911d,
710 "%s Skipping PRLI complete based on rekey\n", __func__);
711 appplogireply.prli_status = 1;
712 SET_DID_STATUS(bsg_reply->result, DID_OK);
713 qla_edif_app_chk_sa_update(vha, fcport, &appplogireply);
717 /* make sure in AUTH_PENDING or else reject */
718 if (fcport->disc_state != DSC_LOGIN_AUTH_PEND) {
719 ql_dbg(ql_dbg_edif, vha, 0x911e,
720 "%s wwpn %8phC is not in auth pending state (%x)\n",
721 __func__, fcport->port_name, fcport->disc_state);
722 SET_DID_STATUS(bsg_reply->result, DID_OK);
723 appplogireply.prli_status = 0;
727 SET_DID_STATUS(bsg_reply->result, DID_OK);
728 appplogireply.prli_status = 1;
729 fcport->edif.authok = 1;
730 if (!(fcport->edif.rx_sa_set && fcport->edif.tx_sa_set)) {
731 ql_dbg(ql_dbg_edif, vha, 0x911e,
732 "%s: wwpn %8phC Both SA indexes has not been SET TX %d, RX %d.\n",
733 __func__, fcport->port_name, fcport->edif.tx_sa_set,
734 fcport->edif.rx_sa_set);
735 SET_DID_STATUS(bsg_reply->result, DID_OK);
736 appplogireply.prli_status = 0;
740 ql_dbg(ql_dbg_edif, vha, 0x911e,
741 "%s wwpn %8phC Both SA(s) updated.\n", __func__,
743 fcport->edif.rx_sa_set = fcport->edif.tx_sa_set = 0;
744 fcport->edif.rx_sa_pending = fcport->edif.tx_sa_pending = 0;
747 if (qla_ini_mode_enabled(vha)) {
748 ql_dbg(ql_dbg_edif, vha, 0x911e,
749 "%s AUTH complete - RESUME with prli for wwpn %8phC\n",
750 __func__, fcport->port_name);
751 qla24xx_post_prli_work(vha, fcport);
755 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
756 bsg_reply->reply_payload_rcv_len = sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
757 bsg_job->reply_payload.sg_cnt,
759 sizeof(struct app_plogi_reply));
765 * qla_edif_app_authfail - authentication by app has failed. Driver is given
766 * notice to tear down current session.
767 * @vha: host adapter pointer
768 * @bsg_job: user request
771 qla_edif_app_authfail(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
774 struct auth_complete_cmd appplogifail;
775 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
776 fc_port_t *fcport = NULL;
777 port_id_t portid = {0};
779 ql_dbg(ql_dbg_edif, vha, 0x911d, "%s app auth fail\n", __func__);
781 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
782 bsg_job->request_payload.sg_cnt, &appplogifail,
783 sizeof(struct auth_complete_cmd));
785 /* silent unaligned access warning */
786 portid.b.domain = appplogifail.u.d_id.b.domain;
787 portid.b.area = appplogifail.u.d_id.b.area;
788 portid.b.al_pa = appplogifail.u.d_id.b.al_pa;
791 * TODO: edif: app has failed this plogi. Inform driver to
792 * take any action (if any).
794 switch (appplogifail.type) {
796 fcport = qla2x00_find_fcport_by_wwpn(vha,
797 appplogifail.u.wwpn, 0);
798 SET_DID_STATUS(bsg_reply->result, DID_OK);
801 fcport = qla2x00_find_fcport_by_pid(vha, &portid);
803 ql_dbg(ql_dbg_edif, vha, 0x911d,
804 "%s d_id lookup failed: %x\n", __func__,
806 SET_DID_STATUS(bsg_reply->result, DID_OK);
809 ql_dbg(ql_dbg_edif, vha, 0x911e,
810 "%s undefined type: %x\n", __func__,
812 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
813 SET_DID_STATUS(bsg_reply->result, DID_ERROR);
818 ql_dbg(ql_dbg_edif, vha, 0x911d,
819 "%s fcport is 0x%p\n", __func__, fcport);
822 /* set/reset edif values and flags */
823 ql_dbg(ql_dbg_edif, vha, 0x911e,
824 "%s reset the auth process - %8phC, loopid=%x portid=%06x.\n",
825 __func__, fcport->port_name, fcport->loop_id, fcport->d_id.b24);
827 if (qla_ini_mode_enabled(fcport->vha)) {
828 fcport->send_els_logo = 1;
829 qlt_schedule_sess_for_deletion(fcport);
837 * qla_edif_app_getfcinfo - app would like to read session info (wwpn, nportid,
838 * [initiator|target] mode. It can specific session with specific nport id or
840 * @vha: host adapter pointer
841 * @bsg_job: user request pointer
844 qla_edif_app_getfcinfo(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
848 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
849 struct app_pinfo_req app_req;
850 struct app_pinfo_reply *app_reply;
853 ql_dbg(ql_dbg_edif, vha, 0x911d, "%s app get fcinfo\n", __func__);
855 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
856 bsg_job->request_payload.sg_cnt, &app_req,
857 sizeof(struct app_pinfo_req));
859 app_reply = kzalloc((sizeof(struct app_pinfo_reply) +
860 sizeof(struct app_pinfo) * app_req.num_ports), GFP_KERNEL);
863 SET_DID_STATUS(bsg_reply->result, DID_ERROR);
866 struct fc_port *fcport = NULL, *tf;
868 list_for_each_entry_safe(fcport, tf, &vha->vp_fcports, list) {
869 if (!(fcport->flags & FCF_FCSP_DEVICE))
872 tdid = app_req.remote_pid;
874 ql_dbg(ql_dbg_edif, vha, 0x2058,
875 "APP request entry - portid=%06x.\n", tdid.b24);
877 /* Ran out of space */
878 if (pcnt >= app_req.num_ports)
881 if (tdid.b24 != 0 && tdid.b24 != fcport->d_id.b24)
884 app_reply->ports[pcnt].rekey_count =
885 fcport->edif.rekey_cnt;
887 app_reply->ports[pcnt].remote_type =
888 VND_CMD_RTYPE_UNKNOWN;
889 if (fcport->port_type & (FCT_NVME_TARGET | FCT_TARGET))
890 app_reply->ports[pcnt].remote_type |=
891 VND_CMD_RTYPE_TARGET;
892 if (fcport->port_type & (FCT_NVME_INITIATOR | FCT_INITIATOR))
893 app_reply->ports[pcnt].remote_type |=
894 VND_CMD_RTYPE_INITIATOR;
896 app_reply->ports[pcnt].remote_pid = fcport->d_id;
898 ql_dbg(ql_dbg_edif, vha, 0x2058,
899 "Found FC_SP fcport - nn %8phN pn %8phN pcnt %d portid=%06x secure %d.\n",
900 fcport->node_name, fcport->port_name, pcnt,
901 fcport->d_id.b24, fcport->flags & FCF_FCSP_DEVICE);
903 switch (fcport->edif.auth_state) {
904 case VND_CMD_AUTH_STATE_ELS_RCVD:
905 if (fcport->disc_state == DSC_LOGIN_AUTH_PEND) {
906 fcport->edif.auth_state = VND_CMD_AUTH_STATE_NEEDED;
907 app_reply->ports[pcnt].auth_state =
908 VND_CMD_AUTH_STATE_NEEDED;
910 app_reply->ports[pcnt].auth_state =
911 VND_CMD_AUTH_STATE_ELS_RCVD;
915 app_reply->ports[pcnt].auth_state = fcport->edif.auth_state;
919 memcpy(app_reply->ports[pcnt].remote_wwpn,
920 fcport->port_name, 8);
922 app_reply->ports[pcnt].remote_state =
923 (atomic_read(&fcport->state) ==
931 app_reply->port_count = pcnt;
932 SET_DID_STATUS(bsg_reply->result, DID_OK);
935 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
936 bsg_reply->reply_payload_rcv_len = sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
937 bsg_job->reply_payload.sg_cnt,
939 sizeof(struct app_pinfo_reply) + sizeof(struct app_pinfo) * pcnt);
947 * qla_edif_app_getstats - app would like to read various statistics info
948 * @vha: host adapter pointer
949 * @bsg_job: user request
952 qla_edif_app_getstats(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
955 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
958 struct app_sinfo_req app_req;
959 struct app_stats_reply *app_reply;
962 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
963 bsg_job->request_payload.sg_cnt, &app_req,
964 sizeof(struct app_sinfo_req));
965 if (app_req.num_ports == 0) {
966 ql_dbg(ql_dbg_async, vha, 0x911d,
967 "%s app did not indicate number of ports to return\n",
969 SET_DID_STATUS(bsg_reply->result, DID_ERROR);
973 size = sizeof(struct app_stats_reply) +
974 (sizeof(struct app_sinfo) * app_req.num_ports);
976 app_reply = kzalloc(size, GFP_KERNEL);
978 SET_DID_STATUS(bsg_reply->result, DID_ERROR);
981 struct fc_port *fcport = NULL, *tf;
983 list_for_each_entry_safe(fcport, tf, &vha->vp_fcports, list) {
984 if (fcport->edif.enable) {
985 if (pcnt > app_req.num_ports)
988 app_reply->elem[pcnt].rekey_count =
989 fcport->edif.rekey_cnt;
990 app_reply->elem[pcnt].tx_bytes =
991 fcport->edif.tx_bytes;
992 app_reply->elem[pcnt].rx_bytes =
993 fcport->edif.rx_bytes;
995 memcpy(app_reply->elem[pcnt].remote_wwpn,
996 fcport->port_name, 8);
1001 app_reply->elem_count = pcnt;
1002 SET_DID_STATUS(bsg_reply->result, DID_OK);
1005 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1006 bsg_reply->reply_payload_rcv_len =
1007 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
1008 bsg_job->reply_payload.sg_cnt, app_reply,
1009 sizeof(struct app_stats_reply) + (sizeof(struct app_sinfo) * pcnt));
1017 qla_edif_app_mgmt(struct bsg_job *bsg_job)
1019 struct fc_bsg_request *bsg_request = bsg_job->request;
1020 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1021 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1022 scsi_qla_host_t *vha = shost_priv(host);
1023 struct app_id appcheck;
1026 uint32_t vnd_sc = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
1028 ql_dbg(ql_dbg_edif, vha, 0x911d, "%s vnd subcmd=%x\n",
1031 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1032 bsg_job->request_payload.sg_cnt, &appcheck,
1033 sizeof(struct app_id));
1035 if (!vha->hw->flags.edif_enabled ||
1036 test_bit(VPORT_DELETE, &vha->dpc_flags)) {
1037 ql_dbg(ql_dbg_edif, vha, 0x911d,
1038 "%s edif not enabled or vp delete. bsg ptr done %p. dpc_flags %lx\n",
1039 __func__, bsg_job, vha->dpc_flags);
1041 SET_DID_STATUS(bsg_reply->result, DID_ERROR);
1045 if (!qla_edif_app_check(vha, appcheck)) {
1046 ql_dbg(ql_dbg_edif, vha, 0x911d,
1047 "%s app checked failed.\n",
1050 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1051 SET_DID_STATUS(bsg_reply->result, DID_ERROR);
1056 case QL_VND_SC_SA_UPDATE:
1058 rval = qla24xx_sadb_update(bsg_job);
1060 case QL_VND_SC_APP_START:
1061 rval = qla_edif_app_start(vha, bsg_job);
1063 case QL_VND_SC_APP_STOP:
1064 rval = qla_edif_app_stop(vha, bsg_job);
1066 case QL_VND_SC_AUTH_OK:
1067 rval = qla_edif_app_authok(vha, bsg_job);
1069 case QL_VND_SC_AUTH_FAIL:
1070 rval = qla_edif_app_authfail(vha, bsg_job);
1072 case QL_VND_SC_GET_FCINFO:
1073 rval = qla_edif_app_getfcinfo(vha, bsg_job);
1075 case QL_VND_SC_GET_STATS:
1076 rval = qla_edif_app_getstats(vha, bsg_job);
1079 ql_dbg(ql_dbg_edif, vha, 0x911d, "%s unknown cmd=%x\n",
1081 bsg_request->rqst_data.h_vendor.vendor_cmd[1]);
1082 rval = EXT_STATUS_INVALID_PARAM;
1089 ql_dbg(ql_dbg_user, vha, 0x7009,
1090 "%s: %d bsg ptr done %p\n", __func__, __LINE__, bsg_job);
1091 bsg_job_done(bsg_job, bsg_reply->result,
1092 bsg_reply->reply_payload_rcv_len);
1098 static struct edif_sa_ctl *
1099 qla_edif_add_sa_ctl(fc_port_t *fcport, struct qla_sa_update_frame *sa_frame,
1102 struct edif_sa_ctl *sa_ctl;
1103 struct qla_sa_update_frame *sap;
1104 int index = sa_frame->fast_sa_index;
1105 unsigned long flags = 0;
1107 sa_ctl = kzalloc(sizeof(*sa_ctl), GFP_KERNEL);
1109 /* couldn't get space */
1110 ql_dbg(ql_dbg_edif, fcport->vha, 0x9100,
1111 "unable to allocate SA CTL\n");
1116 * need to allocate sa_index here and save it
1117 * in both sa_ctl->index and sa_frame->fast_sa_index;
1118 * If alloc fails then delete sa_ctl and return NULL
1120 INIT_LIST_HEAD(&sa_ctl->next);
1121 sap = &sa_ctl->sa_frame;
1123 sa_ctl->index = index;
1124 sa_ctl->fcport = fcport;
1127 ql_dbg(ql_dbg_edif, fcport->vha, 0x9100,
1128 "%s: Added sa_ctl %p, index %d, state 0x%lx\n",
1129 __func__, sa_ctl, sa_ctl->index, sa_ctl->state);
1130 spin_lock_irqsave(&fcport->edif.sa_list_lock, flags);
1131 if (dir == SAU_FLG_TX)
1132 list_add_tail(&sa_ctl->next, &fcport->edif.tx_sa_list);
1134 list_add_tail(&sa_ctl->next, &fcport->edif.rx_sa_list);
1135 spin_unlock_irqrestore(&fcport->edif.sa_list_lock, flags);
1141 qla_edif_flush_sa_ctl_lists(fc_port_t *fcport)
1143 struct edif_sa_ctl *sa_ctl, *tsa_ctl;
1144 unsigned long flags = 0;
1146 spin_lock_irqsave(&fcport->edif.sa_list_lock, flags);
1148 list_for_each_entry_safe(sa_ctl, tsa_ctl, &fcport->edif.tx_sa_list,
1150 list_del(&sa_ctl->next);
1154 list_for_each_entry_safe(sa_ctl, tsa_ctl, &fcport->edif.rx_sa_list,
1156 list_del(&sa_ctl->next);
1160 spin_unlock_irqrestore(&fcport->edif.sa_list_lock, flags);
1163 struct edif_sa_ctl *
1164 qla_edif_find_sa_ctl_by_index(fc_port_t *fcport, int index, int dir)
1166 struct edif_sa_ctl *sa_ctl, *tsa_ctl;
1167 struct list_head *sa_list;
1169 if (dir == SAU_FLG_TX)
1170 sa_list = &fcport->edif.tx_sa_list;
1172 sa_list = &fcport->edif.rx_sa_list;
1174 list_for_each_entry_safe(sa_ctl, tsa_ctl, sa_list, next) {
1175 if (test_bit(EDIF_SA_CTL_USED, &sa_ctl->state) &&
1176 sa_ctl->index == index)
1182 /* add the sa to the correct list */
1184 qla24xx_check_sadb_avail_slot(struct bsg_job *bsg_job, fc_port_t *fcport,
1185 struct qla_sa_update_frame *sa_frame)
1187 struct edif_sa_ctl *sa_ctl = NULL;
1191 dir = (sa_frame->flags & SAU_FLG_TX);
1193 /* map the spi to an sa_index */
1194 sa_index = qla_edif_sadb_get_sa_index(fcport, sa_frame);
1195 if (sa_index == RX_DELETE_NO_EDIF_SA_INDEX) {
1196 /* process rx delete */
1197 ql_dbg(ql_dbg_edif, fcport->vha, 0x3063,
1198 "%s: rx delete for lid 0x%x, spi 0x%x, no entry found\n",
1199 __func__, fcport->loop_id, sa_frame->spi);
1201 /* build and send the aen */
1202 fcport->edif.rx_sa_set = 1;
1203 fcport->edif.rx_sa_pending = 0;
1204 qla_edb_eventcreate(fcport->vha,
1205 VND_CMD_AUTH_STATE_SAUPDATE_COMPL,
1206 QL_VND_SA_STAT_SUCCESS,
1207 QL_VND_RX_SA_KEY, fcport);
1209 /* force a return of good bsg status; */
1210 return RX_DELETE_NO_EDIF_SA_INDEX;
1211 } else if (sa_index == INVALID_EDIF_SA_INDEX) {
1212 ql_dbg(ql_dbg_edif, fcport->vha, 0x9100,
1213 "%s: Failed to get sa_index for spi 0x%x, dir: %d\n",
1214 __func__, sa_frame->spi, dir);
1215 return INVALID_EDIF_SA_INDEX;
1218 ql_dbg(ql_dbg_edif, fcport->vha, 0x9100,
1219 "%s: index %d allocated to spi 0x%x, dir: %d, nport_handle: 0x%x\n",
1220 __func__, sa_index, sa_frame->spi, dir, fcport->loop_id);
1222 /* This is a local copy of sa_frame. */
1223 sa_frame->fast_sa_index = sa_index;
1224 /* create the sa_ctl */
1225 sa_ctl = qla_edif_add_sa_ctl(fcport, sa_frame, dir);
1227 ql_dbg(ql_dbg_edif, fcport->vha, 0x9100,
1228 "%s: Failed to add sa_ctl for spi 0x%x, dir: %d, sa_index: %d\n",
1229 __func__, sa_frame->spi, dir, sa_index);
1233 set_bit(EDIF_SA_CTL_USED, &sa_ctl->state);
1235 if (dir == SAU_FLG_TX)
1236 fcport->edif.tx_rekey_cnt++;
1238 fcport->edif.rx_rekey_cnt++;
1240 ql_dbg(ql_dbg_edif, fcport->vha, 0x9100,
1241 "%s: Found sa_ctl %p, index %d, state 0x%lx, tx_cnt %d, rx_cnt %d, nport_handle: 0x%x\n",
1242 __func__, sa_ctl, sa_ctl->index, sa_ctl->state,
1243 fcport->edif.tx_rekey_cnt,
1244 fcport->edif.rx_rekey_cnt, fcport->loop_id);
1249 #define QLA_SA_UPDATE_FLAGS_RX_KEY 0x0
1250 #define QLA_SA_UPDATE_FLAGS_TX_KEY 0x2
1253 qla24xx_sadb_update(struct bsg_job *bsg_job)
1255 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1256 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1257 scsi_qla_host_t *vha = shost_priv(host);
1258 fc_port_t *fcport = NULL;
1260 struct edif_list_entry *edif_entry = NULL;
1264 struct qla_sa_update_frame sa_frame;
1265 struct srb_iocb *iocb_cmd;
1268 ql_dbg(ql_dbg_edif + ql_dbg_verbose, vha, 0x911d,
1269 "%s entered, vha: 0x%p\n", __func__, vha);
1271 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1272 bsg_job->request_payload.sg_cnt, &sa_frame,
1273 sizeof(struct qla_sa_update_frame));
1275 /* Check if host is online */
1276 if (!vha->flags.online) {
1277 ql_log(ql_log_warn, vha, 0x70a1, "Host is not online\n");
1279 SET_DID_STATUS(bsg_reply->result, DID_ERROR);
1283 if (vha->e_dbell.db_flags != EDB_ACTIVE) {
1284 ql_log(ql_log_warn, vha, 0x70a1, "App not started\n");
1286 SET_DID_STATUS(bsg_reply->result, DID_ERROR);
1290 /* silent unaligned access warning */
1291 portid.b.domain = sa_frame.port_id.b.domain;
1292 portid.b.area = sa_frame.port_id.b.area;
1293 portid.b.al_pa = sa_frame.port_id.b.al_pa;
1295 fcport = qla2x00_find_fcport_by_pid(vha, &portid);
1298 if (sa_frame.flags == QLA_SA_UPDATE_FLAGS_TX_KEY)
1299 fcport->edif.tx_bytes = 0;
1300 if (sa_frame.flags == QLA_SA_UPDATE_FLAGS_RX_KEY)
1301 fcport->edif.rx_bytes = 0;
1305 ql_dbg(ql_dbg_edif, vha, 0x70a3, "Failed to find port= %06x\n",
1306 sa_frame.port_id.b24);
1308 SET_DID_STATUS(bsg_reply->result, DID_TARGET_FAILURE);
1312 /* make sure the nport_handle is valid */
1313 if (fcport->loop_id == FC_NO_LOOP_ID) {
1314 ql_dbg(ql_dbg_edif, vha, 0x70e1,
1315 "%s: %8phN lid=FC_NO_LOOP_ID, spi: 0x%x, DS %d, returning NO_CONNECT\n",
1316 __func__, fcport->port_name, sa_frame.spi,
1317 fcport->disc_state);
1319 SET_DID_STATUS(bsg_reply->result, DID_NO_CONNECT);
1323 /* allocate and queue an sa_ctl */
1324 result = qla24xx_check_sadb_avail_slot(bsg_job, fcport, &sa_frame);
1326 /* failure of bsg */
1327 if (result == INVALID_EDIF_SA_INDEX) {
1328 ql_dbg(ql_dbg_edif, vha, 0x70e1,
1329 "%s: %8phN, skipping update.\n",
1330 __func__, fcport->port_name);
1332 SET_DID_STATUS(bsg_reply->result, DID_ERROR);
1335 /* rx delete failure */
1336 } else if (result == RX_DELETE_NO_EDIF_SA_INDEX) {
1337 ql_dbg(ql_dbg_edif, vha, 0x70e1,
1338 "%s: %8phN, skipping rx delete.\n",
1339 __func__, fcport->port_name);
1340 SET_DID_STATUS(bsg_reply->result, DID_OK);
1344 ql_dbg(ql_dbg_edif, vha, 0x70e1,
1345 "%s: %8phN, sa_index in sa_frame: %d flags %xh\n",
1346 __func__, fcport->port_name, sa_frame.fast_sa_index,
1349 /* looking for rx index and delete */
1350 if (((sa_frame.flags & SAU_FLG_TX) == 0) &&
1351 (sa_frame.flags & SAU_FLG_INV)) {
1352 uint16_t nport_handle = fcport->loop_id;
1353 uint16_t sa_index = sa_frame.fast_sa_index;
1356 * make sure we have an existing rx key, otherwise just process
1357 * this as a straight delete just like TX
1358 * This is NOT a normal case, it indicates an error recovery or key cleanup
1359 * by the ipsec code above us.
1361 edif_entry = qla_edif_list_find_sa_index(fcport, fcport->loop_id);
1363 ql_dbg(ql_dbg_edif, vha, 0x911d,
1364 "%s: WARNING: no active sa_index for nport_handle 0x%x, forcing delete for sa_index 0x%x\n",
1365 __func__, fcport->loop_id, sa_index);
1366 goto force_rx_delete;
1370 * if we have a forced delete for rx, remove the sa_index from the edif list
1371 * and proceed with normal delete. The rx delay timer should not be running
1373 if ((sa_frame.flags & SAU_FLG_FORCE_DELETE) == SAU_FLG_FORCE_DELETE) {
1374 qla_edif_list_delete_sa_index(fcport, edif_entry);
1375 ql_dbg(ql_dbg_edif, vha, 0x911d,
1376 "%s: FORCE DELETE flag found for nport_handle 0x%x, sa_index 0x%x, forcing DELETE\n",
1377 __func__, fcport->loop_id, sa_index);
1379 goto force_rx_delete;
1385 * if delete_sa_index is not invalid then there is already
1386 * a delayed index in progress, return bsg bad status
1388 if (edif_entry->delete_sa_index != INVALID_EDIF_SA_INDEX) {
1389 struct edif_sa_ctl *sa_ctl;
1391 ql_dbg(ql_dbg_edif, vha, 0x911d,
1392 "%s: delete for lid 0x%x, delete_sa_index %d is pending\n",
1393 __func__, edif_entry->handle, edif_entry->delete_sa_index);
1395 /* free up the sa_ctl that was allocated with the sa_index */
1396 sa_ctl = qla_edif_find_sa_ctl_by_index(fcport, sa_index,
1397 (sa_frame.flags & SAU_FLG_TX));
1399 ql_dbg(ql_dbg_edif, vha, 0x3063,
1400 "%s: freeing sa_ctl for index %d\n",
1401 __func__, sa_ctl->index);
1402 qla_edif_free_sa_ctl(fcport, sa_ctl, sa_ctl->index);
1405 /* release the sa_index */
1406 ql_dbg(ql_dbg_edif, vha, 0x3063,
1407 "%s: freeing sa_index %d, nph: 0x%x\n",
1408 __func__, sa_index, nport_handle);
1409 qla_edif_sadb_delete_sa_index(fcport, nport_handle, sa_index);
1412 SET_DID_STATUS(bsg_reply->result, DID_ERROR);
1416 fcport->edif.rekey_cnt++;
1418 /* configure and start the rx delay timer */
1419 edif_entry->fcport = fcport;
1420 edif_entry->timer.expires = jiffies + RX_DELAY_DELETE_TIMEOUT * HZ;
1422 ql_dbg(ql_dbg_edif, vha, 0x911d,
1423 "%s: adding timer, entry: %p, delete sa_index %d, lid 0x%x to edif_list\n",
1424 __func__, edif_entry, sa_index, nport_handle);
1427 * Start the timer when we queue the delayed rx delete.
1428 * This is an activity timer that goes off if we have not
1429 * received packets with the new sa_index
1431 add_timer(&edif_entry->timer);
1434 * sa_delete for rx key with an active rx key including this one
1435 * add the delete rx sa index to the hash so we can look for it
1436 * in the rsp queue. Do this after making any changes to the
1437 * edif_entry as part of the rx delete.
1440 ql_dbg(ql_dbg_edif, vha, 0x911d,
1441 "%s: delete sa_index %d, lid 0x%x to edif_list. bsg done ptr %p\n",
1442 __func__, sa_index, nport_handle, bsg_job);
1444 edif_entry->delete_sa_index = sa_index;
1446 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1447 bsg_reply->result = DID_OK << 16;
1452 * rx index and update
1453 * add the index to the list and continue with normal update
1455 } else if (((sa_frame.flags & SAU_FLG_TX) == 0) &&
1456 ((sa_frame.flags & SAU_FLG_INV) == 0)) {
1457 /* sa_update for rx key */
1458 uint32_t nport_handle = fcport->loop_id;
1459 uint16_t sa_index = sa_frame.fast_sa_index;
1463 * add the update rx sa index to the hash so we can look for it
1464 * in the rsp queue and continue normally
1467 ql_dbg(ql_dbg_edif, vha, 0x911d,
1468 "%s: adding update sa_index %d, lid 0x%x to edif_list\n",
1469 __func__, sa_index, nport_handle);
1471 result = qla_edif_list_add_sa_update_index(fcport, sa_index,
1474 ql_dbg(ql_dbg_edif, vha, 0x911d,
1475 "%s: SA_UPDATE failed to add new sa index %d to list for lid 0x%x\n",
1476 __func__, sa_index, nport_handle);
1479 if (sa_frame.flags & SAU_FLG_GMAC_MODE)
1480 fcport->edif.aes_gmac = 1;
1482 fcport->edif.aes_gmac = 0;
1486 * sa_update for both rx and tx keys, sa_delete for tx key
1487 * immediately process the request
1489 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
1492 SET_DID_STATUS(bsg_reply->result, DID_IMM_RETRY);
1496 sp->type = SRB_SA_UPDATE;
1497 sp->name = "bsg_sa_update";
1498 sp->u.bsg_job = bsg_job;
1499 /* sp->free = qla2x00_bsg_sp_free; */
1500 sp->free = qla2x00_rel_sp;
1501 sp->done = qla2x00_bsg_job_done;
1502 iocb_cmd = &sp->u.iocb_cmd;
1503 iocb_cmd->u.sa_update.sa_frame = sa_frame;
1505 rval = qla2x00_start_sp(sp);
1506 if (rval != QLA_SUCCESS) {
1507 ql_log(ql_dbg_edif, vha, 0x70e3,
1508 "qla2x00_start_sp failed=%d.\n", rval);
1512 SET_DID_STATUS(bsg_reply->result, DID_IMM_RETRY);
1516 ql_dbg(ql_dbg_edif, vha, 0x911d,
1517 "%s: %s sent, hdl=%x, portid=%06x.\n",
1518 __func__, sp->name, sp->handle, fcport->d_id.b24);
1520 fcport->edif.rekey_cnt++;
1521 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1522 SET_DID_STATUS(bsg_reply->result, DID_OK);
1527 * send back error status
1530 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1531 ql_dbg(ql_dbg_edif, vha, 0x911d,
1532 "%s:status: FAIL, result: 0x%x, bsg ptr done %p\n",
1533 __func__, bsg_reply->result, bsg_job);
1534 bsg_job_done(bsg_job, bsg_reply->result,
1535 bsg_reply->reply_payload_rcv_len);
1541 qla_enode_free(scsi_qla_host_t *vha, struct enode *node)
1543 node->ntype = N_UNDEF;
1548 * qla_enode_init - initialize enode structs & lock
1549 * @vha: host adapter pointer
1551 * should only be called when driver attaching
1554 qla_enode_init(scsi_qla_host_t *vha)
1556 struct qla_hw_data *ha = vha->hw;
1559 if (vha->pur_cinfo.enode_flags == ENODE_ACTIVE) {
1560 /* list still active - error */
1561 ql_dbg(ql_dbg_edif, vha, 0x09102, "%s enode still active\n",
1566 /* initialize lock which protects pur_core & init list */
1567 spin_lock_init(&vha->pur_cinfo.pur_lock);
1568 INIT_LIST_HEAD(&vha->pur_cinfo.head);
1570 snprintf(name, sizeof(name), "%s_%d_purex", QLA2XXX_DRIVER_NAME,
1575 * qla_enode_stop - stop and clear and enode data
1576 * @vha: host adapter pointer
1578 * called when app notified it is exiting
1581 qla_enode_stop(scsi_qla_host_t *vha)
1583 unsigned long flags;
1584 struct enode *node, *q;
1586 if (vha->pur_cinfo.enode_flags != ENODE_ACTIVE) {
1587 /* doorbell list not enabled */
1588 ql_dbg(ql_dbg_edif, vha, 0x09102,
1589 "%s enode not active\n", __func__);
1593 /* grab lock so list doesn't move */
1594 spin_lock_irqsave(&vha->pur_cinfo.pur_lock, flags);
1596 vha->pur_cinfo.enode_flags &= ~ENODE_ACTIVE; /* mark it not active */
1598 /* hopefully this is a null list at this point */
1599 list_for_each_entry_safe(node, q, &vha->pur_cinfo.head, list) {
1600 ql_dbg(ql_dbg_edif, vha, 0x910f,
1601 "%s freeing enode type=%x, cnt=%x\n", __func__, node->ntype,
1602 node->dinfo.nodecnt);
1603 list_del_init(&node->list);
1604 qla_enode_free(vha, node);
1606 spin_unlock_irqrestore(&vha->pur_cinfo.pur_lock, flags);
1609 static void qla_enode_clear(scsi_qla_host_t *vha, port_id_t portid)
1611 unsigned long flags;
1612 struct enode *e, *tmp;
1613 struct purexevent *purex;
1614 LIST_HEAD(enode_list);
1616 if (vha->pur_cinfo.enode_flags != ENODE_ACTIVE) {
1617 ql_dbg(ql_dbg_edif, vha, 0x09102,
1618 "%s enode not active\n", __func__);
1621 spin_lock_irqsave(&vha->pur_cinfo.pur_lock, flags);
1622 list_for_each_entry_safe(e, tmp, &vha->pur_cinfo.head, list) {
1623 purex = &e->u.purexinfo;
1624 if (purex->pur_info.pur_sid.b24 == portid.b24) {
1625 ql_dbg(ql_dbg_edif, vha, 0x911d,
1626 "%s free ELS sid=%06x. xchg %x, nb=%xh\n",
1627 __func__, portid.b24,
1628 purex->pur_info.pur_rx_xchg_address,
1629 purex->pur_info.pur_bytes_rcvd);
1631 list_del_init(&e->list);
1632 list_add_tail(&e->list, &enode_list);
1635 spin_unlock_irqrestore(&vha->pur_cinfo.pur_lock, flags);
1637 list_for_each_entry_safe(e, tmp, &enode_list, list) {
1638 list_del_init(&e->list);
1639 qla_enode_free(vha, e);
1644 * allocate enode struct and populate buffer
1645 * returns: enode pointer with buffers
1648 static struct enode *
1649 qla_enode_alloc(scsi_qla_host_t *vha, uint32_t ntype)
1652 struct purexevent *purex;
1654 node = kzalloc(RX_ELS_SIZE, GFP_ATOMIC);
1658 purex = &node->u.purexinfo;
1659 purex->msgp = (u8 *)(node + 1);
1660 purex->msgp_len = ELS_MAX_PAYLOAD;
1662 node->ntype = ntype;
1663 INIT_LIST_HEAD(&node->list);
1668 qla_enode_add(scsi_qla_host_t *vha, struct enode *ptr)
1670 unsigned long flags;
1672 ql_dbg(ql_dbg_edif + ql_dbg_verbose, vha, 0x9109,
1673 "%s add enode for type=%x, cnt=%x\n",
1674 __func__, ptr->ntype, ptr->dinfo.nodecnt);
1676 spin_lock_irqsave(&vha->pur_cinfo.pur_lock, flags);
1677 list_add_tail(&ptr->list, &vha->pur_cinfo.head);
1678 spin_unlock_irqrestore(&vha->pur_cinfo.pur_lock, flags);
1683 static struct enode *
1684 qla_enode_find(scsi_qla_host_t *vha, uint32_t ntype, uint32_t p1, uint32_t p2)
1686 struct enode *node_rtn = NULL;
1687 struct enode *list_node, *q;
1688 unsigned long flags;
1690 struct purexevent *purex;
1692 /* secure the list from moving under us */
1693 spin_lock_irqsave(&vha->pur_cinfo.pur_lock, flags);
1695 list_for_each_entry_safe(list_node, q, &vha->pur_cinfo.head, list) {
1697 /* node type determines what p1 and p2 are */
1698 purex = &list_node->u.purexinfo;
1701 if (purex->pur_info.pur_sid.b24 == sid) {
1702 /* found it and its complete */
1703 node_rtn = list_node;
1704 list_del(&list_node->list);
1709 spin_unlock_irqrestore(&vha->pur_cinfo.pur_lock, flags);
1715 * qla_pur_get_pending - read/return authentication message sent
1717 * @vha: host adapter pointer
1718 * @fcport: session pointer
1719 * @bsg_job: user request where the message is copy to.
1722 qla_pur_get_pending(scsi_qla_host_t *vha, fc_port_t *fcport,
1723 struct bsg_job *bsg_job)
1726 struct purexevent *purex;
1727 struct qla_bsg_auth_els_reply *rpl =
1728 (struct qla_bsg_auth_els_reply *)bsg_job->reply;
1730 bsg_job->reply_len = sizeof(*rpl);
1732 ptr = qla_enode_find(vha, N_PUREX, fcport->d_id.b24, PUR_GET);
1734 ql_dbg(ql_dbg_edif, vha, 0x9111,
1735 "%s no enode data found for %8phN sid=%06x\n",
1736 __func__, fcport->port_name, fcport->d_id.b24);
1737 SET_DID_STATUS(rpl->r.result, DID_IMM_RETRY);
1742 * enode is now off the linked list and is ours to deal with
1744 purex = &ptr->u.purexinfo;
1746 /* Copy info back to caller */
1747 rpl->rx_xchg_address = purex->pur_info.pur_rx_xchg_address;
1749 SET_DID_STATUS(rpl->r.result, DID_OK);
1750 rpl->r.reply_payload_rcv_len =
1751 sg_pcopy_from_buffer(bsg_job->reply_payload.sg_list,
1752 bsg_job->reply_payload.sg_cnt, purex->msgp,
1753 purex->pur_info.pur_bytes_rcvd, 0);
1755 /* data copy / passback completed - destroy enode */
1756 qla_enode_free(vha, ptr);
1761 /* it is assume qpair lock is held */
1763 qla_els_reject_iocb(scsi_qla_host_t *vha, struct qla_qpair *qp,
1764 struct qla_els_pt_arg *a)
1766 struct els_entry_24xx *els_iocb;
1768 els_iocb = __qla2x00_alloc_iocbs(qp, NULL);
1770 ql_log(ql_log_warn, vha, 0x700c,
1771 "qla2x00_alloc_iocbs failed.\n");
1772 return QLA_FUNCTION_FAILED;
1775 qla_els_pt_iocb(vha, els_iocb, a);
1777 ql_dbg(ql_dbg_edif, vha, 0x0183,
1778 "Sending ELS reject...\n");
1779 ql_dump_buffer(ql_dbg_edif + ql_dbg_verbose, vha, 0x0185,
1780 vha->hw->elsrej.c, sizeof(*vha->hw->elsrej.c));
1781 /* flush iocb to mem before notifying hw doorbell */
1783 qla2x00_start_iocbs(vha, qp->req);
1788 qla_edb_init(scsi_qla_host_t *vha)
1790 if (vha->e_dbell.db_flags == EDB_ACTIVE) {
1791 /* list already init'd - error */
1792 ql_dbg(ql_dbg_edif, vha, 0x09102,
1793 "edif db already initialized, cannot reinit\n");
1797 /* initialize lock which protects doorbell & init list */
1798 spin_lock_init(&vha->e_dbell.db_lock);
1799 INIT_LIST_HEAD(&vha->e_dbell.head);
1801 /* create and initialize doorbell */
1802 init_completion(&vha->e_dbell.dbell);
1806 qla_edb_node_free(scsi_qla_host_t *vha, struct edb_node *node)
1809 * releases the space held by this edb node entry
1810 * this function does _not_ free the edb node itself
1811 * NB: the edb node entry passed should not be on any list
1813 * currently for doorbell there's no additional cleanup
1814 * needed, but here as a placeholder for furture use.
1818 ql_dbg(ql_dbg_edif, vha, 0x09122,
1819 "%s error - no valid node passed\n", __func__);
1823 node->ntype = N_UNDEF;
1826 static void qla_edb_clear(scsi_qla_host_t *vha, port_id_t portid)
1828 unsigned long flags;
1829 struct edb_node *e, *tmp;
1831 LIST_HEAD(edb_list);
1833 if (vha->e_dbell.db_flags != EDB_ACTIVE) {
1834 /* doorbell list not enabled */
1835 ql_dbg(ql_dbg_edif, vha, 0x09102,
1836 "%s doorbell not enabled\n", __func__);
1840 /* grab lock so list doesn't move */
1841 spin_lock_irqsave(&vha->e_dbell.db_lock, flags);
1842 list_for_each_entry_safe(e, tmp, &vha->e_dbell.head, list) {
1844 case VND_CMD_AUTH_STATE_NEEDED:
1845 case VND_CMD_AUTH_STATE_SESSION_SHUTDOWN:
1846 sid = e->u.plogi_did;
1848 case VND_CMD_AUTH_STATE_ELS_RCVD:
1851 case VND_CMD_AUTH_STATE_SAUPDATE_COMPL:
1852 /* app wants to see this */
1855 ql_log(ql_log_warn, vha, 0x09102,
1856 "%s unknown node type: %x\n", __func__, e->ntype);
1860 if (sid.b24 == portid.b24) {
1861 ql_dbg(ql_dbg_edif, vha, 0x910f,
1862 "%s free doorbell event : node type = %x %p\n",
1863 __func__, e->ntype, e);
1864 list_del_init(&e->list);
1865 list_add_tail(&e->list, &edb_list);
1868 spin_unlock_irqrestore(&vha->e_dbell.db_lock, flags);
1870 list_for_each_entry_safe(e, tmp, &edb_list, list) {
1871 qla_edb_node_free(vha, e);
1872 list_del_init(&e->list);
1877 /* function called when app is stopping */
1880 qla_edb_stop(scsi_qla_host_t *vha)
1882 unsigned long flags;
1883 struct edb_node *node, *q;
1885 if (vha->e_dbell.db_flags != EDB_ACTIVE) {
1886 /* doorbell list not enabled */
1887 ql_dbg(ql_dbg_edif, vha, 0x09102,
1888 "%s doorbell not enabled\n", __func__);
1892 /* grab lock so list doesn't move */
1893 spin_lock_irqsave(&vha->e_dbell.db_lock, flags);
1895 vha->e_dbell.db_flags &= ~EDB_ACTIVE; /* mark it not active */
1896 /* hopefully this is a null list at this point */
1897 list_for_each_entry_safe(node, q, &vha->e_dbell.head, list) {
1898 ql_dbg(ql_dbg_edif, vha, 0x910f,
1899 "%s freeing edb_node type=%x\n",
1900 __func__, node->ntype);
1901 qla_edb_node_free(vha, node);
1902 list_del(&node->list);
1906 spin_unlock_irqrestore(&vha->e_dbell.db_lock, flags);
1908 /* wake up doorbell waiters - they'll be dismissed with error code */
1909 complete_all(&vha->e_dbell.dbell);
1912 static struct edb_node *
1913 qla_edb_node_alloc(scsi_qla_host_t *vha, uint32_t ntype)
1915 struct edb_node *node;
1917 node = kzalloc(sizeof(*node), GFP_ATOMIC);
1919 /* couldn't get space */
1920 ql_dbg(ql_dbg_edif, vha, 0x9100,
1921 "edb node unable to be allocated\n");
1925 node->ntype = ntype;
1926 INIT_LIST_HEAD(&node->list);
1930 /* adds a already allocated enode to the linked list */
1932 qla_edb_node_add(scsi_qla_host_t *vha, struct edb_node *ptr)
1934 unsigned long flags;
1936 if (vha->e_dbell.db_flags != EDB_ACTIVE) {
1937 /* doorbell list not enabled */
1938 ql_dbg(ql_dbg_edif, vha, 0x09102,
1939 "%s doorbell not enabled\n", __func__);
1943 spin_lock_irqsave(&vha->e_dbell.db_lock, flags);
1944 list_add_tail(&ptr->list, &vha->e_dbell.head);
1945 spin_unlock_irqrestore(&vha->e_dbell.db_lock, flags);
1947 /* ring doorbell for waiters */
1948 complete(&vha->e_dbell.dbell);
1953 /* adds event to doorbell list */
1955 qla_edb_eventcreate(scsi_qla_host_t *vha, uint32_t dbtype,
1956 uint32_t data, uint32_t data2, fc_port_t *sfcport)
1958 struct edb_node *edbnode;
1959 fc_port_t *fcport = sfcport;
1962 if (!vha->hw->flags.edif_enabled) {
1963 /* edif not enabled */
1967 if (vha->e_dbell.db_flags != EDB_ACTIVE) {
1969 fcport->edif.auth_state = dbtype;
1970 /* doorbell list not enabled */
1971 ql_dbg(ql_dbg_edif, vha, 0x09102,
1972 "%s doorbell not enabled (type=%d\n", __func__, dbtype);
1976 edbnode = qla_edb_node_alloc(vha, dbtype);
1978 ql_dbg(ql_dbg_edif, vha, 0x09102,
1979 "%s unable to alloc db node\n", __func__);
1984 id.b.domain = (data >> 16) & 0xff;
1985 id.b.area = (data >> 8) & 0xff;
1986 id.b.al_pa = data & 0xff;
1987 ql_dbg(ql_dbg_edif, vha, 0x09222,
1988 "%s: Arrived s_id: %06x\n", __func__,
1990 fcport = qla2x00_find_fcport_by_pid(vha, &id);
1992 ql_dbg(ql_dbg_edif, vha, 0x09102,
1993 "%s can't find fcport for sid= 0x%x - ignoring\n",
2000 /* populate the edb node */
2002 case VND_CMD_AUTH_STATE_NEEDED:
2003 case VND_CMD_AUTH_STATE_SESSION_SHUTDOWN:
2004 edbnode->u.plogi_did.b24 = fcport->d_id.b24;
2006 case VND_CMD_AUTH_STATE_ELS_RCVD:
2007 edbnode->u.els_sid.b24 = fcport->d_id.b24;
2009 case VND_CMD_AUTH_STATE_SAUPDATE_COMPL:
2010 edbnode->u.sa_aen.port_id = fcport->d_id;
2011 edbnode->u.sa_aen.status = data;
2012 edbnode->u.sa_aen.key_type = data2;
2015 ql_dbg(ql_dbg_edif, vha, 0x09102,
2016 "%s unknown type: %x\n", __func__, dbtype);
2017 qla_edb_node_free(vha, edbnode);
2023 if (edbnode && (!qla_edb_node_add(vha, edbnode))) {
2024 ql_dbg(ql_dbg_edif, vha, 0x09102,
2025 "%s unable to add dbnode\n", __func__);
2026 qla_edb_node_free(vha, edbnode);
2030 if (edbnode && fcport)
2031 fcport->edif.auth_state = dbtype;
2032 ql_dbg(ql_dbg_edif, vha, 0x09102,
2033 "%s Doorbell produced : type=%d %p\n", __func__, dbtype, edbnode);
2036 static struct edb_node *
2037 qla_edb_getnext(scsi_qla_host_t *vha)
2039 unsigned long flags;
2040 struct edb_node *edbnode = NULL;
2042 spin_lock_irqsave(&vha->e_dbell.db_lock, flags);
2044 /* db nodes are fifo - no qualifications done */
2045 if (!list_empty(&vha->e_dbell.head)) {
2046 edbnode = list_first_entry(&vha->e_dbell.head,
2047 struct edb_node, list);
2048 list_del(&edbnode->list);
2051 spin_unlock_irqrestore(&vha->e_dbell.db_lock, flags);
2057 qla_edif_timer(scsi_qla_host_t *vha)
2059 struct qla_hw_data *ha = vha->hw;
2061 if (!vha->vp_idx && N2N_TOPO(ha) && ha->flags.n2n_fw_acc_sec) {
2062 if (vha->e_dbell.db_flags != EDB_ACTIVE &&
2063 ha->edif_post_stop_cnt_down) {
2064 ha->edif_post_stop_cnt_down--;
2067 * turn off auto 'Plogi Acc + secure=1' feature
2068 * Set Add FW option[3]
2071 if (ha->edif_post_stop_cnt_down == 0) {
2072 ql_dbg(ql_dbg_async, vha, 0x911d,
2073 "%s chip reset to turn off PLOGI ACC + secure\n",
2075 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2078 ha->edif_post_stop_cnt_down = 60;
2084 * app uses separate thread to read this. It'll wait until the doorbell
2085 * is rung by the driver or the max wait time has expired
2088 edif_doorbell_show(struct device *dev, struct device_attribute *attr,
2091 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
2092 struct edb_node *dbnode = NULL;
2093 struct edif_app_dbell *ap = (struct edif_app_dbell *)buf;
2094 uint32_t dat_siz, buf_size, sz;
2096 /* TODO: app currently hardcoded to 256. Will transition to bsg */
2099 /* stop new threads from waiting if we're not init'd */
2100 if (vha->e_dbell.db_flags != EDB_ACTIVE) {
2101 ql_dbg(ql_dbg_edif + ql_dbg_verbose, vha, 0x09122,
2102 "%s error - edif db not enabled\n", __func__);
2106 if (!vha->hw->flags.edif_enabled) {
2107 /* edif not enabled */
2108 ql_dbg(ql_dbg_edif + ql_dbg_verbose, vha, 0x09122,
2109 "%s error - edif not enabled\n", __func__);
2114 while ((sz - buf_size) >= sizeof(struct edb_node)) {
2115 /* remove the next item from the doorbell list */
2117 dbnode = qla_edb_getnext(vha);
2119 ap->event_code = dbnode->ntype;
2120 switch (dbnode->ntype) {
2121 case VND_CMD_AUTH_STATE_SESSION_SHUTDOWN:
2122 case VND_CMD_AUTH_STATE_NEEDED:
2123 ap->port_id = dbnode->u.plogi_did;
2124 dat_siz += sizeof(ap->port_id);
2126 case VND_CMD_AUTH_STATE_ELS_RCVD:
2127 ap->port_id = dbnode->u.els_sid;
2128 dat_siz += sizeof(ap->port_id);
2130 case VND_CMD_AUTH_STATE_SAUPDATE_COMPL:
2131 ap->port_id = dbnode->u.sa_aen.port_id;
2132 memcpy(ap->event_data, &dbnode->u,
2133 sizeof(struct edif_sa_update_aen));
2134 dat_siz += sizeof(struct edif_sa_update_aen);
2137 /* unknown node type, rtn unknown ntype */
2138 ap->event_code = VND_CMD_AUTH_STATE_UNDEF;
2139 memcpy(ap->event_data, &dbnode->ntype, 4);
2144 ql_dbg(ql_dbg_edif, vha, 0x09102,
2145 "%s Doorbell consumed : type=%d %p\n",
2146 __func__, dbnode->ntype, dbnode);
2147 /* we're done with the db node, so free it up */
2148 qla_edb_node_free(vha, dbnode);
2154 ap->event_data_size = dat_siz;
2155 /* 8bytes = ap->event_code + ap->event_data_size */
2156 buf_size += dat_siz + 8;
2157 ap = (struct edif_app_dbell *)(buf + buf_size);
2162 static void qla_noop_sp_done(srb_t *sp, int res)
2165 kref_put(&sp->cmd_kref, qla2x00_sp_release);
2169 * Called from work queue
2170 * build and send the sa_update iocb to delete an rx sa_index
2173 qla24xx_issue_sa_replace_iocb(scsi_qla_host_t *vha, struct qla_work_evt *e)
2176 fc_port_t *fcport = NULL;
2177 struct srb_iocb *iocb_cmd = NULL;
2178 int rval = QLA_SUCCESS;
2179 struct edif_sa_ctl *sa_ctl = e->u.sa_update.sa_ctl;
2180 uint16_t nport_handle = e->u.sa_update.nport_handle;
2182 ql_dbg(ql_dbg_edif, vha, 0x70e6,
2183 "%s: starting, sa_ctl: %p\n", __func__, sa_ctl);
2186 ql_dbg(ql_dbg_edif, vha, 0x70e6,
2187 "sa_ctl allocation failed\n");
2191 fcport = sa_ctl->fcport;
2193 /* Alloc SRB structure */
2194 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
2196 ql_dbg(ql_dbg_edif, vha, 0x70e6,
2197 "SRB allocation failed\n");
2201 fcport->flags |= FCF_ASYNC_SENT;
2202 iocb_cmd = &sp->u.iocb_cmd;
2203 iocb_cmd->u.sa_update.sa_ctl = sa_ctl;
2205 ql_dbg(ql_dbg_edif, vha, 0x3073,
2206 "Enter: SA REPL portid=%06x, sa_ctl %p, index %x, nport_handle: 0x%x\n",
2207 fcport->d_id.b24, sa_ctl, sa_ctl->index, nport_handle);
2209 * if this is a sadb cleanup delete, mark it so the isr can
2210 * take the correct action
2212 if (sa_ctl->flags & EDIF_SA_CTL_FLG_CLEANUP_DEL) {
2213 /* mark this srb as a cleanup delete */
2214 sp->flags |= SRB_EDIF_CLEANUP_DELETE;
2215 ql_dbg(ql_dbg_edif, vha, 0x70e6,
2216 "%s: sp 0x%p flagged as cleanup delete\n", __func__, sp);
2219 sp->type = SRB_SA_REPLACE;
2220 sp->name = "SA_REPLACE";
2221 sp->fcport = fcport;
2222 sp->free = qla2x00_rel_sp;
2223 sp->done = qla_noop_sp_done;
2225 rval = qla2x00_start_sp(sp);
2227 if (rval != QLA_SUCCESS)
2228 rval = QLA_FUNCTION_FAILED;
2233 void qla24xx_sa_update_iocb(srb_t *sp, struct sa_update_28xx *sa_update_iocb)
2236 struct scsi_qla_host *vha = sp->vha;
2237 struct qla_sa_update_frame *sa_frame =
2238 &sp->u.iocb_cmd.u.sa_update.sa_frame;
2241 switch (sa_frame->flags & (SAU_FLG_INV | SAU_FLG_TX)) {
2243 ql_dbg(ql_dbg_edif, vha, 0x911d,
2244 "%s: EDIF SA UPDATE RX IOCB vha: 0x%p index: %d\n",
2245 __func__, vha, sa_frame->fast_sa_index);
2248 ql_dbg(ql_dbg_edif, vha, 0x911d,
2249 "%s: EDIF SA DELETE RX IOCB vha: 0x%p index: %d\n",
2250 __func__, vha, sa_frame->fast_sa_index);
2251 flags |= SA_FLAG_INVALIDATE;
2254 ql_dbg(ql_dbg_edif, vha, 0x911d,
2255 "%s: EDIF SA UPDATE TX IOCB vha: 0x%p index: %d\n",
2256 __func__, vha, sa_frame->fast_sa_index);
2257 flags |= SA_FLAG_TX;
2260 ql_dbg(ql_dbg_edif, vha, 0x911d,
2261 "%s: EDIF SA DELETE TX IOCB vha: 0x%p index: %d\n",
2262 __func__, vha, sa_frame->fast_sa_index);
2263 flags |= SA_FLAG_TX | SA_FLAG_INVALIDATE;
2267 sa_update_iocb->entry_type = SA_UPDATE_IOCB_TYPE;
2268 sa_update_iocb->entry_count = 1;
2269 sa_update_iocb->sys_define = 0;
2270 sa_update_iocb->entry_status = 0;
2271 sa_update_iocb->handle = sp->handle;
2272 sa_update_iocb->u.nport_handle = cpu_to_le16(sp->fcport->loop_id);
2273 sa_update_iocb->vp_index = sp->fcport->vha->vp_idx;
2274 sa_update_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
2275 sa_update_iocb->port_id[1] = sp->fcport->d_id.b.area;
2276 sa_update_iocb->port_id[2] = sp->fcport->d_id.b.domain;
2278 sa_update_iocb->flags = flags;
2279 sa_update_iocb->salt = cpu_to_le32(sa_frame->salt);
2280 sa_update_iocb->spi = cpu_to_le32(sa_frame->spi);
2281 sa_update_iocb->sa_index = cpu_to_le16(sa_frame->fast_sa_index);
2283 sa_update_iocb->sa_control |= SA_CNTL_ENC_FCSP;
2284 if (sp->fcport->edif.aes_gmac)
2285 sa_update_iocb->sa_control |= SA_CNTL_AES_GMAC;
2287 if (sa_frame->flags & SAU_FLG_KEY256) {
2288 sa_update_iocb->sa_control |= SA_CNTL_KEY256;
2289 for (itr = 0; itr < 32; itr++)
2290 sa_update_iocb->sa_key[itr] = sa_frame->sa_key[itr];
2292 sa_update_iocb->sa_control |= SA_CNTL_KEY128;
2293 for (itr = 0; itr < 16; itr++)
2294 sa_update_iocb->sa_key[itr] = sa_frame->sa_key[itr];
2297 ql_dbg(ql_dbg_edif, vha, 0x921d,
2298 "%s SAU Port ID = %02x%02x%02x, flags=%xh, index=%u, ctl=%xh, SPI 0x%x flags 0x%x hdl=%x gmac %d\n",
2299 __func__, sa_update_iocb->port_id[2], sa_update_iocb->port_id[1],
2300 sa_update_iocb->port_id[0], sa_update_iocb->flags, sa_update_iocb->sa_index,
2301 sa_update_iocb->sa_control, sa_update_iocb->spi, sa_frame->flags, sp->handle,
2302 sp->fcport->edif.aes_gmac);
2304 if (sa_frame->flags & SAU_FLG_TX)
2305 sp->fcport->edif.tx_sa_pending = 1;
2307 sp->fcport->edif.rx_sa_pending = 1;
2309 sp->fcport->vha->qla_stats.control_requests++;
2313 qla24xx_sa_replace_iocb(srb_t *sp, struct sa_update_28xx *sa_update_iocb)
2315 struct scsi_qla_host *vha = sp->vha;
2316 struct srb_iocb *srb_iocb = &sp->u.iocb_cmd;
2317 struct edif_sa_ctl *sa_ctl = srb_iocb->u.sa_update.sa_ctl;
2318 uint16_t nport_handle = sp->fcport->loop_id;
2320 sa_update_iocb->entry_type = SA_UPDATE_IOCB_TYPE;
2321 sa_update_iocb->entry_count = 1;
2322 sa_update_iocb->sys_define = 0;
2323 sa_update_iocb->entry_status = 0;
2324 sa_update_iocb->handle = sp->handle;
2326 sa_update_iocb->u.nport_handle = cpu_to_le16(nport_handle);
2328 sa_update_iocb->vp_index = sp->fcport->vha->vp_idx;
2329 sa_update_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
2330 sa_update_iocb->port_id[1] = sp->fcport->d_id.b.area;
2331 sa_update_iocb->port_id[2] = sp->fcport->d_id.b.domain;
2333 /* Invalidate the index. salt, spi, control & key are ignore */
2334 sa_update_iocb->flags = SA_FLAG_INVALIDATE;
2335 sa_update_iocb->salt = 0;
2336 sa_update_iocb->spi = 0;
2337 sa_update_iocb->sa_index = cpu_to_le16(sa_ctl->index);
2338 sa_update_iocb->sa_control = 0;
2340 ql_dbg(ql_dbg_edif, vha, 0x921d,
2341 "%s SAU DELETE RX Port ID = %02x:%02x:%02x, lid %d flags=%xh, index=%u, hdl=%x\n",
2342 __func__, sa_update_iocb->port_id[2], sa_update_iocb->port_id[1],
2343 sa_update_iocb->port_id[0], nport_handle, sa_update_iocb->flags,
2344 sa_update_iocb->sa_index, sp->handle);
2346 sp->fcport->vha->qla_stats.control_requests++;
2349 void qla24xx_auth_els(scsi_qla_host_t *vha, void **pkt, struct rsp_que **rsp)
2351 struct purex_entry_24xx *p = *pkt;
2355 struct purexevent *purex;
2356 struct scsi_qla_host *host = NULL;
2358 struct fc_port *fcport;
2359 struct qla_els_pt_arg a;
2362 memset(&a, 0, sizeof(a));
2364 a.els_opcode = ELS_AUTH_ELS;
2365 a.nport_handle = p->nport_handle;
2366 a.rx_xchg_address = p->rx_xchg_addr;
2367 a.did.b.domain = p->s_id[2];
2368 a.did.b.area = p->s_id[1];
2369 a.did.b.al_pa = p->s_id[0];
2370 a.tx_byte_count = a.tx_len = sizeof(struct fc_els_ls_rjt);
2371 a.tx_addr = vha->hw->elsrej.cdma;
2372 a.vp_idx = vha->vp_idx;
2373 a.control_flags = EPD_ELS_RJT;
2375 sid = p->s_id[0] | (p->s_id[1] << 8) | (p->s_id[2] << 16);
2377 totlen = (le16_to_cpu(p->frame_size) & 0x0fff) - PURX_ELS_HEADER_SIZE;
2378 if (le16_to_cpu(p->status_flags) & 0x8000) {
2379 totlen = le16_to_cpu(p->trunc_frame_size);
2380 qla_els_reject_iocb(vha, (*rsp)->qpair, &a);
2381 __qla_consume_iocb(vha, pkt, rsp);
2385 if (totlen > ELS_MAX_PAYLOAD) {
2386 ql_dbg(ql_dbg_edif, vha, 0x0910d,
2387 "%s WARNING: verbose ELS frame received (totlen=%x)\n",
2389 qla_els_reject_iocb(vha, (*rsp)->qpair, &a);
2390 __qla_consume_iocb(vha, pkt, rsp);
2394 if (!vha->hw->flags.edif_enabled) {
2395 /* edif support not enabled */
2396 ql_dbg(ql_dbg_edif, vha, 0x910e, "%s edif not enabled\n",
2398 qla_els_reject_iocb(vha, (*rsp)->qpair, &a);
2399 __qla_consume_iocb(vha, pkt, rsp);
2403 ptr = qla_enode_alloc(vha, N_PUREX);
2405 ql_dbg(ql_dbg_edif, vha, 0x09109,
2406 "WARNING: enode alloc failed for sid=%x\n",
2408 qla_els_reject_iocb(vha, (*rsp)->qpair, &a);
2409 __qla_consume_iocb(vha, pkt, rsp);
2413 purex = &ptr->u.purexinfo;
2414 purex->pur_info.pur_sid = a.did;
2415 purex->pur_info.pur_bytes_rcvd = totlen;
2416 purex->pur_info.pur_rx_xchg_address = le32_to_cpu(p->rx_xchg_addr);
2417 purex->pur_info.pur_nphdl = le16_to_cpu(p->nport_handle);
2418 purex->pur_info.pur_did.b.domain = p->d_id[2];
2419 purex->pur_info.pur_did.b.area = p->d_id[1];
2420 purex->pur_info.pur_did.b.al_pa = p->d_id[0];
2421 purex->pur_info.vp_idx = p->vp_idx;
2423 rc = __qla_copy_purex_to_buffer(vha, pkt, rsp, purex->msgp,
2426 qla_els_reject_iocb(vha, (*rsp)->qpair, &a);
2427 qla_enode_free(vha, ptr);
2430 beid.al_pa = purex->pur_info.pur_did.b.al_pa;
2431 beid.area = purex->pur_info.pur_did.b.area;
2432 beid.domain = purex->pur_info.pur_did.b.domain;
2433 host = qla_find_host_by_d_id(vha, beid);
2435 ql_log(ql_log_fatal, vha, 0x508b,
2436 "%s Drop ELS due to unable to find host %06x\n",
2437 __func__, purex->pur_info.pur_did.b24);
2439 qla_els_reject_iocb(vha, (*rsp)->qpair, &a);
2440 qla_enode_free(vha, ptr);
2444 fcport = qla2x00_find_fcport_by_pid(host, &purex->pur_info.pur_sid);
2446 if (host->e_dbell.db_flags != EDB_ACTIVE ||
2447 (fcport && EDIF_SESSION_DOWN(fcport))) {
2448 ql_dbg(ql_dbg_edif, host, 0x0910c, "%s e_dbell.db_flags =%x %06x\n",
2449 __func__, host->e_dbell.db_flags,
2450 fcport ? fcport->d_id.b24 : 0);
2452 qla_els_reject_iocb(host, (*rsp)->qpair, &a);
2453 qla_enode_free(host, ptr);
2457 /* add the local enode to the list */
2458 qla_enode_add(host, ptr);
2460 ql_dbg(ql_dbg_edif, host, 0x0910c,
2461 "%s COMPLETE purex->pur_info.pur_bytes_rcvd =%xh s:%06x -> d:%06x xchg=%xh\n",
2462 __func__, purex->pur_info.pur_bytes_rcvd, purex->pur_info.pur_sid.b24,
2463 purex->pur_info.pur_did.b24, purex->pur_info.pur_rx_xchg_address);
2465 qla_edb_eventcreate(host, VND_CMD_AUTH_STATE_ELS_RCVD, sid, 0, NULL);
2468 static uint16_t qla_edif_get_sa_index_from_freepool(fc_port_t *fcport, int dir)
2470 struct scsi_qla_host *vha = fcport->vha;
2471 struct qla_hw_data *ha = vha->hw;
2473 unsigned long flags = 0;
2476 ql_dbg(ql_dbg_edif + ql_dbg_verbose, vha, 0x3063,
2477 "%s: entry\n", __func__);
2480 sa_id_map = ha->edif_tx_sa_id_map;
2482 sa_id_map = ha->edif_rx_sa_id_map;
2484 spin_lock_irqsave(&ha->sadb_fp_lock, flags);
2485 sa_index = find_first_zero_bit(sa_id_map, EDIF_NUM_SA_INDEX);
2486 if (sa_index >= EDIF_NUM_SA_INDEX) {
2487 spin_unlock_irqrestore(&ha->sadb_fp_lock, flags);
2488 return INVALID_EDIF_SA_INDEX;
2490 set_bit(sa_index, sa_id_map);
2491 spin_unlock_irqrestore(&ha->sadb_fp_lock, flags);
2494 sa_index += EDIF_TX_SA_INDEX_BASE;
2496 ql_dbg(ql_dbg_edif, vha, 0x3063,
2497 "%s: index retrieved from free pool %d\n", __func__, sa_index);
2502 /* find an sadb entry for an nport_handle */
2503 static struct edif_sa_index_entry *
2504 qla_edif_sadb_find_sa_index_entry(uint16_t nport_handle,
2505 struct list_head *sa_list)
2507 struct edif_sa_index_entry *entry;
2508 struct edif_sa_index_entry *tentry;
2509 struct list_head *indx_list = sa_list;
2511 list_for_each_entry_safe(entry, tentry, indx_list, next) {
2512 if (entry->handle == nport_handle)
2518 /* remove an sa_index from the nport_handle and return it to the free pool */
2519 static int qla_edif_sadb_delete_sa_index(fc_port_t *fcport, uint16_t nport_handle,
2522 struct edif_sa_index_entry *entry;
2523 struct list_head *sa_list;
2524 int dir = (sa_index < EDIF_TX_SA_INDEX_BASE) ? 0 : 1;
2526 int free_slot_count = 0;
2527 scsi_qla_host_t *vha = fcport->vha;
2528 struct qla_hw_data *ha = vha->hw;
2529 unsigned long flags = 0;
2531 ql_dbg(ql_dbg_edif, vha, 0x3063,
2532 "%s: entry\n", __func__);
2535 sa_list = &ha->sadb_tx_index_list;
2537 sa_list = &ha->sadb_rx_index_list;
2539 entry = qla_edif_sadb_find_sa_index_entry(nport_handle, sa_list);
2541 ql_dbg(ql_dbg_edif, vha, 0x3063,
2542 "%s: no entry found for nport_handle 0x%x\n",
2543 __func__, nport_handle);
2547 spin_lock_irqsave(&ha->sadb_lock, flags);
2549 * each tx/rx direction has up to 2 sa indexes/slots. 1 slot for in flight traffic
2550 * the other is use at re-key time.
2552 for (slot = 0; slot < 2; slot++) {
2553 if (entry->sa_pair[slot].sa_index == sa_index) {
2554 entry->sa_pair[slot].sa_index = INVALID_EDIF_SA_INDEX;
2555 entry->sa_pair[slot].spi = 0;
2557 qla_edif_add_sa_index_to_freepool(fcport, dir, sa_index);
2558 } else if (entry->sa_pair[slot].sa_index == INVALID_EDIF_SA_INDEX) {
2563 if (free_slot_count == 2) {
2564 list_del(&entry->next);
2567 spin_unlock_irqrestore(&ha->sadb_lock, flags);
2569 ql_dbg(ql_dbg_edif, vha, 0x3063,
2570 "%s: sa_index %d removed, free_slot_count: %d\n",
2571 __func__, sa_index, free_slot_count);
2577 qla28xx_sa_update_iocb_entry(scsi_qla_host_t *v, struct req_que *req,
2578 struct sa_update_28xx *pkt)
2580 const char *func = "SA_UPDATE_RESPONSE_IOCB";
2582 struct edif_sa_ctl *sa_ctl;
2583 int old_sa_deleted = 1;
2584 uint16_t nport_handle;
2585 struct scsi_qla_host *vha;
2587 sp = qla2x00_get_sp_from_handle(v, func, req, pkt);
2590 ql_dbg(ql_dbg_edif, v, 0x3063,
2591 "%s: no sp found for pkt\n", __func__);
2594 /* use sp->vha due to npiv */
2597 switch (pkt->flags & (SA_FLAG_INVALIDATE | SA_FLAG_TX)) {
2599 ql_dbg(ql_dbg_edif, vha, 0x3063,
2600 "%s: EDIF SA UPDATE RX IOCB vha: 0x%p index: %d\n",
2601 __func__, vha, pkt->sa_index);
2604 ql_dbg(ql_dbg_edif, vha, 0x3063,
2605 "%s: EDIF SA DELETE RX IOCB vha: 0x%p index: %d\n",
2606 __func__, vha, pkt->sa_index);
2609 ql_dbg(ql_dbg_edif, vha, 0x3063,
2610 "%s: EDIF SA UPDATE TX IOCB vha: 0x%p index: %d\n",
2611 __func__, vha, pkt->sa_index);
2614 ql_dbg(ql_dbg_edif, vha, 0x3063,
2615 "%s: EDIF SA DELETE TX IOCB vha: 0x%p index: %d\n",
2616 __func__, vha, pkt->sa_index);
2621 * dig the nport handle out of the iocb, fcport->loop_id can not be trusted
2622 * to be correct during cleanup sa_update iocbs.
2624 nport_handle = sp->fcport->loop_id;
2626 ql_dbg(ql_dbg_edif, vha, 0x3063,
2627 "%s: %8phN comp status=%x old_sa_info=%x new_sa_info=%x lid %d, index=0x%x pkt_flags %xh hdl=%x\n",
2628 __func__, sp->fcport->port_name, pkt->u.comp_sts, pkt->old_sa_info, pkt->new_sa_info,
2629 nport_handle, pkt->sa_index, pkt->flags, sp->handle);
2631 /* if rx delete, remove the timer */
2632 if ((pkt->flags & (SA_FLAG_INVALIDATE | SA_FLAG_TX)) == SA_FLAG_INVALIDATE) {
2633 struct edif_list_entry *edif_entry;
2635 sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
2637 edif_entry = qla_edif_list_find_sa_index(sp->fcport, nport_handle);
2639 ql_dbg(ql_dbg_edif, vha, 0x5033,
2640 "%s: removing edif_entry %p, new sa_index: 0x%x\n",
2641 __func__, edif_entry, pkt->sa_index);
2642 qla_edif_list_delete_sa_index(sp->fcport, edif_entry);
2643 del_timer(&edif_entry->timer);
2645 ql_dbg(ql_dbg_edif, vha, 0x5033,
2646 "%s: releasing edif_entry %p, new sa_index: 0x%x\n",
2647 __func__, edif_entry, pkt->sa_index);
2654 * if this is a delete for either tx or rx, make sure it succeeded.
2655 * The new_sa_info field should be 0xffff on success
2657 if (pkt->flags & SA_FLAG_INVALIDATE)
2658 old_sa_deleted = (le16_to_cpu(pkt->new_sa_info) == 0xffff) ? 1 : 0;
2660 /* Process update and delete the same way */
2662 /* If this is an sadb cleanup delete, bypass sending events to IPSEC */
2663 if (sp->flags & SRB_EDIF_CLEANUP_DELETE) {
2664 sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
2665 ql_dbg(ql_dbg_edif, vha, 0x3063,
2666 "%s: nph 0x%x, sa_index %d removed from fw\n",
2667 __func__, sp->fcport->loop_id, pkt->sa_index);
2669 } else if ((pkt->entry_status == 0) && (pkt->u.comp_sts == 0) &&
2672 * Note: Wa are only keeping track of latest SA,
2673 * so we know when we can start enableing encryption per I/O.
2674 * If all SA's get deleted, let FW reject the IOCB.
2676 * TODO: edif: don't set enabled here I think
2677 * TODO: edif: prli complete is where it should be set
2679 ql_dbg(ql_dbg_edif + ql_dbg_verbose, vha, 0x3063,
2680 "SA(%x)updated for s_id %02x%02x%02x\n",
2682 pkt->port_id[2], pkt->port_id[1], pkt->port_id[0]);
2683 sp->fcport->edif.enable = 1;
2684 if (pkt->flags & SA_FLAG_TX) {
2685 sp->fcport->edif.tx_sa_set = 1;
2686 sp->fcport->edif.tx_sa_pending = 0;
2687 qla_edb_eventcreate(vha, VND_CMD_AUTH_STATE_SAUPDATE_COMPL,
2688 QL_VND_SA_STAT_SUCCESS,
2689 QL_VND_TX_SA_KEY, sp->fcport);
2691 sp->fcport->edif.rx_sa_set = 1;
2692 sp->fcport->edif.rx_sa_pending = 0;
2693 qla_edb_eventcreate(vha, VND_CMD_AUTH_STATE_SAUPDATE_COMPL,
2694 QL_VND_SA_STAT_SUCCESS,
2695 QL_VND_RX_SA_KEY, sp->fcport);
2698 ql_dbg(ql_dbg_edif, vha, 0x3063,
2699 "%s: %8phN SA update FAILED: sa_index: %d, new_sa_info %d, %02x%02x%02x\n",
2700 __func__, sp->fcport->port_name, pkt->sa_index, pkt->new_sa_info,
2701 pkt->port_id[2], pkt->port_id[1], pkt->port_id[0]);
2703 if (pkt->flags & SA_FLAG_TX)
2704 qla_edb_eventcreate(vha, VND_CMD_AUTH_STATE_SAUPDATE_COMPL,
2705 (le16_to_cpu(pkt->u.comp_sts) << 16) | QL_VND_SA_STAT_FAILED,
2706 QL_VND_TX_SA_KEY, sp->fcport);
2708 qla_edb_eventcreate(vha, VND_CMD_AUTH_STATE_SAUPDATE_COMPL,
2709 (le16_to_cpu(pkt->u.comp_sts) << 16) | QL_VND_SA_STAT_FAILED,
2710 QL_VND_RX_SA_KEY, sp->fcport);
2713 /* for delete, release sa_ctl, sa_index */
2714 if (pkt->flags & SA_FLAG_INVALIDATE) {
2715 /* release the sa_ctl */
2716 sa_ctl = qla_edif_find_sa_ctl_by_index(sp->fcport,
2717 le16_to_cpu(pkt->sa_index), (pkt->flags & SA_FLAG_TX));
2719 qla_edif_find_sa_ctl_by_index(sp->fcport, sa_ctl->index,
2720 (pkt->flags & SA_FLAG_TX)) != NULL) {
2721 ql_dbg(ql_dbg_edif + ql_dbg_verbose, vha, 0x3063,
2722 "%s: freeing sa_ctl for index %d\n",
2723 __func__, sa_ctl->index);
2724 qla_edif_free_sa_ctl(sp->fcport, sa_ctl, sa_ctl->index);
2726 ql_dbg(ql_dbg_edif, vha, 0x3063,
2727 "%s: sa_ctl NOT freed, sa_ctl: %p\n",
2730 ql_dbg(ql_dbg_edif, vha, 0x3063,
2731 "%s: freeing sa_index %d, nph: 0x%x\n",
2732 __func__, le16_to_cpu(pkt->sa_index), nport_handle);
2733 qla_edif_sadb_delete_sa_index(sp->fcport, nport_handle,
2734 le16_to_cpu(pkt->sa_index));
2736 * check for a failed sa_update and remove
2739 } else if (pkt->u.comp_sts) {
2740 ql_dbg(ql_dbg_edif, vha, 0x3063,
2741 "%s: freeing sa_index %d, nph: 0x%x\n",
2742 __func__, pkt->sa_index, nport_handle);
2743 qla_edif_sadb_delete_sa_index(sp->fcport, nport_handle,
2744 le16_to_cpu(pkt->sa_index));
2745 switch (le16_to_cpu(pkt->u.comp_sts)) {
2746 case CS_PORT_EDIF_UNAVAIL:
2747 case CS_PORT_EDIF_LOGOUT:
2748 qlt_schedule_sess_for_deletion(sp->fcport);
2759 * qla28xx_start_scsi_edif() - Send a SCSI type 6 command to the ISP
2760 * @sp: command to send to the ISP
2762 * Return: non-zero if a failure occurred, else zero.
2765 qla28xx_start_scsi_edif(srb_t *sp)
2768 unsigned long flags;
2769 struct scsi_cmnd *cmd;
2777 uint8_t additional_cdb_len;
2778 struct ct6_dsd *ctx;
2779 struct scsi_qla_host *vha = sp->vha;
2780 struct qla_hw_data *ha = vha->hw;
2781 struct cmd_type_6 *cmd_pkt;
2782 struct dsd64 *cur_dsd;
2783 uint8_t avail_dsds = 0;
2784 struct scatterlist *sg;
2785 struct req_que *req = sp->qpair->req;
2786 spinlock_t *lock = sp->qpair->qp_lock_ptr;
2788 /* Setup device pointers. */
2789 cmd = GET_CMD_SP(sp);
2791 /* So we know we haven't pci_map'ed anything yet */
2794 /* Send marker if required */
2795 if (vha->marker_needed != 0) {
2796 if (qla2x00_marker(vha, sp->qpair, 0, 0, MK_SYNC_ALL) !=
2798 ql_log(ql_log_warn, vha, 0x300c,
2799 "qla2x00_marker failed for cmd=%p.\n", cmd);
2800 return QLA_FUNCTION_FAILED;
2802 vha->marker_needed = 0;
2805 /* Acquire ring specific lock */
2806 spin_lock_irqsave(lock, flags);
2808 /* Check for room in outstanding command list. */
2809 handle = req->current_outstanding_cmd;
2810 for (index = 1; index < req->num_outstanding_cmds; index++) {
2812 if (handle == req->num_outstanding_cmds)
2814 if (!req->outstanding_cmds[handle])
2817 if (index == req->num_outstanding_cmds)
2820 /* Map the sg table so we have an accurate count of sg entries needed */
2821 if (scsi_sg_count(cmd)) {
2822 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
2823 scsi_sg_count(cmd), cmd->sc_data_direction);
2824 if (unlikely(!nseg))
2831 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
2832 if (req->cnt < (req_cnt + 2)) {
2833 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
2834 rd_reg_dword(req->req_q_out);
2835 if (req->ring_index < cnt)
2836 req->cnt = cnt - req->ring_index;
2838 req->cnt = req->length -
2839 (req->ring_index - cnt);
2840 if (req->cnt < (req_cnt + 2))
2844 ctx = sp->u.scmd.ct6_ctx =
2845 mempool_alloc(ha->ctx_mempool, GFP_ATOMIC);
2847 ql_log(ql_log_fatal, vha, 0x3010,
2848 "Failed to allocate ctx for cmd=%p.\n", cmd);
2852 memset(ctx, 0, sizeof(struct ct6_dsd));
2853 ctx->fcp_cmnd = dma_pool_zalloc(ha->fcp_cmnd_dma_pool,
2854 GFP_ATOMIC, &ctx->fcp_cmnd_dma);
2855 if (!ctx->fcp_cmnd) {
2856 ql_log(ql_log_fatal, vha, 0x3011,
2857 "Failed to allocate fcp_cmnd for cmd=%p.\n", cmd);
2861 /* Initialize the DSD list and dma handle */
2862 INIT_LIST_HEAD(&ctx->dsd_list);
2863 ctx->dsd_use_cnt = 0;
2865 if (cmd->cmd_len > 16) {
2866 additional_cdb_len = cmd->cmd_len - 16;
2867 if ((cmd->cmd_len % 4) != 0) {
2869 * SCSI command bigger than 16 bytes must be
2872 ql_log(ql_log_warn, vha, 0x3012,
2873 "scsi cmd len %d not multiple of 4 for cmd=%p.\n",
2875 goto queuing_error_fcp_cmnd;
2877 ctx->fcp_cmnd_len = 12 + cmd->cmd_len + 4;
2879 additional_cdb_len = 0;
2880 ctx->fcp_cmnd_len = 12 + 16 + 4;
2883 cmd_pkt = (struct cmd_type_6 *)req->ring_ptr;
2884 cmd_pkt->handle = make_handle(req->id, handle);
2887 * Zero out remaining portion of packet.
2888 * tagged queuing modifier -- default is TSK_SIMPLE (0).
2890 clr_ptr = (uint32_t *)cmd_pkt + 2;
2891 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2892 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2894 /* No data transfer */
2895 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
2896 cmd_pkt->byte_count = cpu_to_le32(0);
2900 /* Set transfer direction */
2901 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
2902 cmd_pkt->control_flags = cpu_to_le16(CF_WRITE_DATA);
2903 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
2904 vha->qla_stats.output_requests++;
2905 sp->fcport->edif.tx_bytes += scsi_bufflen(cmd);
2906 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
2907 cmd_pkt->control_flags = cpu_to_le16(CF_READ_DATA);
2908 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
2909 vha->qla_stats.input_requests++;
2910 sp->fcport->edif.rx_bytes += scsi_bufflen(cmd);
2913 cmd_pkt->control_flags |= cpu_to_le16(CF_EN_EDIF);
2914 cmd_pkt->control_flags &= ~(cpu_to_le16(CF_NEW_SA));
2916 /* One DSD is available in the Command Type 6 IOCB */
2918 cur_dsd = &cmd_pkt->fcp_dsd;
2920 /* Load data segments */
2921 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
2923 cont_a64_entry_t *cont_pkt;
2925 /* Allocate additional continuation packets? */
2926 if (avail_dsds == 0) {
2928 * Five DSDs are available in the Continuation
2931 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, req);
2932 cur_dsd = cont_pkt->dsd;
2936 sle_dma = sg_dma_address(sg);
2937 put_unaligned_le64(sle_dma, &cur_dsd->address);
2938 cur_dsd->length = cpu_to_le32(sg_dma_len(sg));
2944 /* Set NPORT-ID and LUN number*/
2945 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2946 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2947 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2948 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
2949 cmd_pkt->vp_index = sp->vha->vp_idx;
2951 cmd_pkt->entry_type = COMMAND_TYPE_6;
2953 /* Set total data segment count. */
2954 cmd_pkt->entry_count = (uint8_t)req_cnt;
2956 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
2957 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
2959 /* build FCP_CMND IU */
2960 int_to_scsilun(cmd->device->lun, &ctx->fcp_cmnd->lun);
2961 ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len;
2963 if (cmd->sc_data_direction == DMA_TO_DEVICE)
2964 ctx->fcp_cmnd->additional_cdb_len |= 1;
2965 else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
2966 ctx->fcp_cmnd->additional_cdb_len |= 2;
2968 /* Populate the FCP_PRIO. */
2969 if (ha->flags.fcp_prio_enabled)
2970 ctx->fcp_cmnd->task_attribute |=
2971 sp->fcport->fcp_prio << 3;
2973 memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
2975 fcp_dl = (__be32 *)(ctx->fcp_cmnd->cdb + 16 +
2976 additional_cdb_len);
2977 *fcp_dl = htonl((uint32_t)scsi_bufflen(cmd));
2979 cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(ctx->fcp_cmnd_len);
2980 put_unaligned_le64(ctx->fcp_cmnd_dma, &cmd_pkt->fcp_cmnd_dseg_address);
2982 sp->flags |= SRB_FCP_CMND_DMA_VALID;
2983 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
2984 /* Set total data segment count. */
2985 cmd_pkt->entry_count = (uint8_t)req_cnt;
2986 cmd_pkt->entry_status = 0;
2988 /* Build command packet. */
2989 req->current_outstanding_cmd = handle;
2990 req->outstanding_cmds[handle] = sp;
2991 sp->handle = handle;
2992 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
2993 req->cnt -= req_cnt;
2995 /* Adjust ring index. */
2998 if (req->ring_index == req->length) {
2999 req->ring_index = 0;
3000 req->ring_ptr = req->ring;
3005 sp->qpair->cmd_cnt++;
3006 /* Set chip new ring index. */
3007 wrt_reg_dword(req->req_q_in, req->ring_index);
3009 spin_unlock_irqrestore(lock, flags);
3013 queuing_error_fcp_cmnd:
3014 dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd, ctx->fcp_cmnd_dma);
3017 scsi_dma_unmap(cmd);
3019 if (sp->u.scmd.ct6_ctx) {
3020 mempool_free(sp->u.scmd.ct6_ctx, ha->ctx_mempool);
3021 sp->u.scmd.ct6_ctx = NULL;
3023 spin_unlock_irqrestore(lock, flags);
3025 return QLA_FUNCTION_FAILED;
3028 /**********************************************
3029 * edif update/delete sa_index list functions *
3030 **********************************************/
3032 /* clear the edif_indx_list for this port */
3033 void qla_edif_list_del(fc_port_t *fcport)
3035 struct edif_list_entry *indx_lst;
3036 struct edif_list_entry *tindx_lst;
3037 struct list_head *indx_list = &fcport->edif.edif_indx_list;
3038 unsigned long flags = 0;
3040 spin_lock_irqsave(&fcport->edif.indx_list_lock, flags);
3041 list_for_each_entry_safe(indx_lst, tindx_lst, indx_list, next) {
3042 list_del(&indx_lst->next);
3045 spin_unlock_irqrestore(&fcport->edif.indx_list_lock, flags);
3052 /* allocate/retrieve an sa_index for a given spi */
3053 static uint16_t qla_edif_sadb_get_sa_index(fc_port_t *fcport,
3054 struct qla_sa_update_frame *sa_frame)
3056 struct edif_sa_index_entry *entry;
3057 struct list_head *sa_list;
3059 int dir = sa_frame->flags & SAU_FLG_TX;
3062 scsi_qla_host_t *vha = fcport->vha;
3063 struct qla_hw_data *ha = vha->hw;
3064 unsigned long flags = 0;
3065 uint16_t nport_handle = fcport->loop_id;
3067 ql_dbg(ql_dbg_edif, vha, 0x3063,
3068 "%s: entry fc_port: %p, nport_handle: 0x%x\n",
3069 __func__, fcport, nport_handle);
3072 sa_list = &ha->sadb_tx_index_list;
3074 sa_list = &ha->sadb_rx_index_list;
3076 entry = qla_edif_sadb_find_sa_index_entry(nport_handle, sa_list);
3078 if ((sa_frame->flags & (SAU_FLG_TX | SAU_FLG_INV)) == SAU_FLG_INV) {
3079 ql_dbg(ql_dbg_edif, vha, 0x3063,
3080 "%s: rx delete request with no entry\n", __func__);
3081 return RX_DELETE_NO_EDIF_SA_INDEX;
3084 /* if there is no entry for this nport, add one */
3085 entry = kzalloc((sizeof(struct edif_sa_index_entry)), GFP_ATOMIC);
3087 return INVALID_EDIF_SA_INDEX;
3089 sa_index = qla_edif_get_sa_index_from_freepool(fcport, dir);
3090 if (sa_index == INVALID_EDIF_SA_INDEX) {
3092 return INVALID_EDIF_SA_INDEX;
3095 INIT_LIST_HEAD(&entry->next);
3096 entry->handle = nport_handle;
3097 entry->fcport = fcport;
3098 entry->sa_pair[0].spi = sa_frame->spi;
3099 entry->sa_pair[0].sa_index = sa_index;
3100 entry->sa_pair[1].spi = 0;
3101 entry->sa_pair[1].sa_index = INVALID_EDIF_SA_INDEX;
3102 spin_lock_irqsave(&ha->sadb_lock, flags);
3103 list_add_tail(&entry->next, sa_list);
3104 spin_unlock_irqrestore(&ha->sadb_lock, flags);
3105 ql_dbg(ql_dbg_edif, vha, 0x3063,
3106 "%s: Created new sadb entry for nport_handle 0x%x, spi 0x%x, returning sa_index %d\n",
3107 __func__, nport_handle, sa_frame->spi, sa_index);
3112 spin_lock_irqsave(&ha->sadb_lock, flags);
3114 /* see if we already have an entry for this spi */
3115 for (slot = 0; slot < 2; slot++) {
3116 if (entry->sa_pair[slot].sa_index == INVALID_EDIF_SA_INDEX) {
3119 if (entry->sa_pair[slot].spi == sa_frame->spi) {
3120 spin_unlock_irqrestore(&ha->sadb_lock, flags);
3121 ql_dbg(ql_dbg_edif, vha, 0x3063,
3122 "%s: sadb slot %d entry for lid 0x%x, spi 0x%x found, sa_index %d\n",
3123 __func__, slot, entry->handle, sa_frame->spi,
3124 entry->sa_pair[slot].sa_index);
3125 return entry->sa_pair[slot].sa_index;
3129 spin_unlock_irqrestore(&ha->sadb_lock, flags);
3131 /* both slots are used */
3132 if (free_slot == -1) {
3133 ql_dbg(ql_dbg_edif, vha, 0x3063,
3134 "%s: WARNING: No free slots in sadb for nport_handle 0x%x, spi: 0x%x\n",
3135 __func__, entry->handle, sa_frame->spi);
3136 ql_dbg(ql_dbg_edif, vha, 0x3063,
3137 "%s: Slot 0 spi: 0x%x sa_index: %d, Slot 1 spi: 0x%x sa_index: %d\n",
3138 __func__, entry->sa_pair[0].spi, entry->sa_pair[0].sa_index,
3139 entry->sa_pair[1].spi, entry->sa_pair[1].sa_index);
3141 return INVALID_EDIF_SA_INDEX;
3144 /* there is at least one free slot, use it */
3145 sa_index = qla_edif_get_sa_index_from_freepool(fcport, dir);
3146 if (sa_index == INVALID_EDIF_SA_INDEX) {
3147 ql_dbg(ql_dbg_edif, fcport->vha, 0x3063,
3148 "%s: empty freepool!!\n", __func__);
3149 return INVALID_EDIF_SA_INDEX;
3152 spin_lock_irqsave(&ha->sadb_lock, flags);
3153 entry->sa_pair[free_slot].spi = sa_frame->spi;
3154 entry->sa_pair[free_slot].sa_index = sa_index;
3155 spin_unlock_irqrestore(&ha->sadb_lock, flags);
3156 ql_dbg(ql_dbg_edif, fcport->vha, 0x3063,
3157 "%s: sadb slot %d entry for nport_handle 0x%x, spi 0x%x added, returning sa_index %d\n",
3158 __func__, free_slot, entry->handle, sa_frame->spi, sa_index);
3163 /* release any sadb entries -- only done at teardown */
3164 void qla_edif_sadb_release(struct qla_hw_data *ha)
3166 struct edif_sa_index_entry *entry, *tmp;
3168 list_for_each_entry_safe(entry, tmp, &ha->sadb_rx_index_list, next) {
3169 list_del(&entry->next);
3173 list_for_each_entry_safe(entry, tmp, &ha->sadb_tx_index_list, next) {
3174 list_del(&entry->next);
3179 /**************************
3180 * sadb freepool functions
3181 **************************/
3183 /* build the rx and tx sa_index free pools -- only done at fcport init */
3184 int qla_edif_sadb_build_free_pool(struct qla_hw_data *ha)
3186 ha->edif_tx_sa_id_map =
3187 kcalloc(BITS_TO_LONGS(EDIF_NUM_SA_INDEX), sizeof(long), GFP_KERNEL);
3189 if (!ha->edif_tx_sa_id_map) {
3190 ql_log_pci(ql_log_fatal, ha->pdev, 0x0009,
3191 "Unable to allocate memory for sadb tx.\n");
3195 ha->edif_rx_sa_id_map =
3196 kcalloc(BITS_TO_LONGS(EDIF_NUM_SA_INDEX), sizeof(long), GFP_KERNEL);
3197 if (!ha->edif_rx_sa_id_map) {
3198 kfree(ha->edif_tx_sa_id_map);
3199 ha->edif_tx_sa_id_map = NULL;
3200 ql_log_pci(ql_log_fatal, ha->pdev, 0x0009,
3201 "Unable to allocate memory for sadb rx.\n");
3207 /* release the free pool - only done during fcport teardown */
3208 void qla_edif_sadb_release_free_pool(struct qla_hw_data *ha)
3210 kfree(ha->edif_tx_sa_id_map);
3211 ha->edif_tx_sa_id_map = NULL;
3212 kfree(ha->edif_rx_sa_id_map);
3213 ha->edif_rx_sa_id_map = NULL;
3216 static void __chk_edif_rx_sa_delete_pending(scsi_qla_host_t *vha,
3217 fc_port_t *fcport, uint32_t handle, uint16_t sa_index)
3219 struct edif_list_entry *edif_entry;
3220 struct edif_sa_ctl *sa_ctl;
3221 uint16_t delete_sa_index = INVALID_EDIF_SA_INDEX;
3222 unsigned long flags = 0;
3223 uint16_t nport_handle = fcport->loop_id;
3224 uint16_t cached_nport_handle;
3226 spin_lock_irqsave(&fcport->edif.indx_list_lock, flags);
3227 edif_entry = qla_edif_list_find_sa_index(fcport, nport_handle);
3229 spin_unlock_irqrestore(&fcport->edif.indx_list_lock, flags);
3230 return; /* no pending delete for this handle */
3234 * check for no pending delete for this index or iocb does not
3237 if (edif_entry->delete_sa_index == INVALID_EDIF_SA_INDEX ||
3238 edif_entry->update_sa_index != sa_index) {
3239 spin_unlock_irqrestore(&fcport->edif.indx_list_lock, flags);
3244 * wait until we have seen at least EDIF_DELAY_COUNT transfers before
3245 * queueing RX delete
3247 if (edif_entry->count++ < EDIF_RX_DELETE_FILTER_COUNT) {
3248 spin_unlock_irqrestore(&fcport->edif.indx_list_lock, flags);
3252 ql_dbg(ql_dbg_edif, vha, 0x5033,
3253 "%s: invalidating delete_sa_index, update_sa_index: 0x%x sa_index: 0x%x, delete_sa_index: 0x%x\n",
3254 __func__, edif_entry->update_sa_index, sa_index, edif_entry->delete_sa_index);
3256 delete_sa_index = edif_entry->delete_sa_index;
3257 edif_entry->delete_sa_index = INVALID_EDIF_SA_INDEX;
3258 cached_nport_handle = edif_entry->handle;
3259 spin_unlock_irqrestore(&fcport->edif.indx_list_lock, flags);
3261 /* sanity check on the nport handle */
3262 if (nport_handle != cached_nport_handle) {
3263 ql_dbg(ql_dbg_edif, vha, 0x3063,
3264 "%s: POST SA DELETE nport_handle mismatch: lid: 0x%x, edif_entry nph: 0x%x\n",
3265 __func__, nport_handle, cached_nport_handle);
3268 /* find the sa_ctl for the delete and schedule the delete */
3269 sa_ctl = qla_edif_find_sa_ctl_by_index(fcport, delete_sa_index, 0);
3271 ql_dbg(ql_dbg_edif, vha, 0x3063,
3272 "%s: POST SA DELETE sa_ctl: %p, index recvd %d\n",
3273 __func__, sa_ctl, sa_index);
3274 ql_dbg(ql_dbg_edif, vha, 0x3063,
3275 "delete index %d, update index: %d, nport handle: 0x%x, handle: 0x%x\n",
3277 edif_entry->update_sa_index, nport_handle, handle);
3279 sa_ctl->flags = EDIF_SA_CTL_FLG_DEL;
3280 set_bit(EDIF_SA_CTL_REPL, &sa_ctl->state);
3281 qla_post_sa_replace_work(fcport->vha, fcport,
3282 nport_handle, sa_ctl);
3284 ql_dbg(ql_dbg_edif, vha, 0x3063,
3285 "%s: POST SA DELETE sa_ctl not found for delete_sa_index: %d\n",
3286 __func__, delete_sa_index);
3290 void qla_chk_edif_rx_sa_delete_pending(scsi_qla_host_t *vha,
3291 srb_t *sp, struct sts_entry_24xx *sts24)
3293 fc_port_t *fcport = sp->fcport;
3294 /* sa_index used by this iocb */
3295 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
3298 handle = (uint32_t)LSW(sts24->handle);
3300 /* find out if this status iosb is for a scsi read */
3301 if (cmd->sc_data_direction != DMA_FROM_DEVICE)
3304 return __chk_edif_rx_sa_delete_pending(vha, fcport, handle,
3305 le16_to_cpu(sts24->edif_sa_index));
3308 void qlt_chk_edif_rx_sa_delete_pending(scsi_qla_host_t *vha, fc_port_t *fcport,
3309 struct ctio7_from_24xx *pkt)
3311 __chk_edif_rx_sa_delete_pending(vha, fcport,
3312 pkt->handle, le16_to_cpu(pkt->edif_sa_index));
3315 static void qla_parse_auth_els_ctl(struct srb *sp)
3317 struct qla_els_pt_arg *a = &sp->u.bsg_cmd.u.els_arg;
3318 struct bsg_job *bsg_job = sp->u.bsg_cmd.bsg_job;
3319 struct fc_bsg_request *request = bsg_job->request;
3320 struct qla_bsg_auth_els_request *p =
3321 (struct qla_bsg_auth_els_request *)bsg_job->request;
3323 a->tx_len = a->tx_byte_count = sp->remap.req.len;
3324 a->tx_addr = sp->remap.req.dma;
3325 a->rx_len = a->rx_byte_count = sp->remap.rsp.len;
3326 a->rx_addr = sp->remap.rsp.dma;
3328 if (p->e.sub_cmd == SEND_ELS_REPLY) {
3329 a->control_flags = p->e.extra_control_flags << 13;
3330 a->rx_xchg_address = cpu_to_le32(p->e.extra_rx_xchg_address);
3331 if (p->e.extra_control_flags == BSG_CTL_FLAG_LS_ACC)
3332 a->els_opcode = ELS_LS_ACC;
3333 else if (p->e.extra_control_flags == BSG_CTL_FLAG_LS_RJT)
3334 a->els_opcode = ELS_LS_RJT;
3336 a->did = sp->fcport->d_id;
3337 a->els_opcode = request->rqst_data.h_els.command_code;
3338 a->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3339 a->vp_idx = sp->vha->vp_idx;
3342 int qla_edif_process_els(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
3344 struct fc_bsg_request *bsg_request = bsg_job->request;
3345 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
3346 fc_port_t *fcport = NULL;
3347 struct qla_hw_data *ha = vha->hw;
3349 int rval = (DID_ERROR << 16);
3351 struct qla_bsg_auth_els_request *p =
3352 (struct qla_bsg_auth_els_request *)bsg_job->request;
3354 d_id.b.al_pa = bsg_request->rqst_data.h_els.port_id[2];
3355 d_id.b.area = bsg_request->rqst_data.h_els.port_id[1];
3356 d_id.b.domain = bsg_request->rqst_data.h_els.port_id[0];
3358 /* find matching d_id in fcport list */
3359 fcport = qla2x00_find_fcport_by_pid(vha, &d_id);
3361 ql_dbg(ql_dbg_edif, vha, 0x911a,
3362 "%s fcport not find online portid=%06x.\n",
3363 __func__, d_id.b24);
3364 SET_DID_STATUS(bsg_reply->result, DID_ERROR);
3368 if (qla_bsg_check(vha, bsg_job, fcport))
3371 if (fcport->loop_id == FC_NO_LOOP_ID) {
3372 ql_dbg(ql_dbg_edif, vha, 0x910d,
3373 "%s ELS code %x, no loop id.\n", __func__,
3374 bsg_request->rqst_data.r_els.els_code);
3375 SET_DID_STATUS(bsg_reply->result, DID_BAD_TARGET);
3379 if (!vha->flags.online) {
3380 ql_log(ql_log_warn, vha, 0x7005, "Host not online.\n");
3381 SET_DID_STATUS(bsg_reply->result, DID_BAD_TARGET);
3386 /* pass through is supported only for ISP 4Gb or higher */
3387 if (!IS_FWI2_CAPABLE(ha)) {
3388 ql_dbg(ql_dbg_user, vha, 0x7001,
3389 "ELS passthru not supported for ISP23xx based adapters.\n");
3390 SET_DID_STATUS(bsg_reply->result, DID_BAD_TARGET);
3395 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
3397 ql_dbg(ql_dbg_user, vha, 0x7004,
3398 "Failed get sp pid=%06x\n", fcport->d_id.b24);
3400 SET_DID_STATUS(bsg_reply->result, DID_IMM_RETRY);
3404 sp->remap.req.len = bsg_job->request_payload.payload_len;
3405 sp->remap.req.buf = dma_pool_alloc(ha->purex_dma_pool,
3406 GFP_KERNEL, &sp->remap.req.dma);
3407 if (!sp->remap.req.buf) {
3408 ql_dbg(ql_dbg_user, vha, 0x7005,
3409 "Failed allocate request dma len=%x\n",
3410 bsg_job->request_payload.payload_len);
3412 SET_DID_STATUS(bsg_reply->result, DID_IMM_RETRY);
3416 sp->remap.rsp.len = bsg_job->reply_payload.payload_len;
3417 sp->remap.rsp.buf = dma_pool_alloc(ha->purex_dma_pool,
3418 GFP_KERNEL, &sp->remap.rsp.dma);
3419 if (!sp->remap.rsp.buf) {
3420 ql_dbg(ql_dbg_user, vha, 0x7006,
3421 "Failed allocate response dma len=%x\n",
3422 bsg_job->reply_payload.payload_len);
3424 SET_DID_STATUS(bsg_reply->result, DID_IMM_RETRY);
3425 goto done_free_remap_req;
3427 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
3428 bsg_job->request_payload.sg_cnt, sp->remap.req.buf,
3430 sp->remap.remapped = true;
3432 sp->type = SRB_ELS_CMD_HST_NOLOGIN;
3433 sp->name = "SPCN_BSG_HST_NOLOGIN";
3434 sp->u.bsg_cmd.bsg_job = bsg_job;
3435 qla_parse_auth_els_ctl(sp);
3437 sp->free = qla2x00_bsg_sp_free;
3438 sp->done = qla2x00_bsg_job_done;
3440 rval = qla2x00_start_sp(sp);
3442 ql_dbg(ql_dbg_edif, vha, 0x700a,
3443 "%s %s %8phN xchg %x ctlflag %x hdl %x reqlen %xh bsg ptr %p\n",
3444 __func__, sc_to_str(p->e.sub_cmd), fcport->port_name,
3445 p->e.extra_rx_xchg_address, p->e.extra_control_flags,
3446 sp->handle, sp->remap.req.len, bsg_job);
3448 if (rval != QLA_SUCCESS) {
3449 ql_log(ql_log_warn, vha, 0x700e,
3450 "qla2x00_start_sp failed = %d\n", rval);
3451 SET_DID_STATUS(bsg_reply->result, DID_IMM_RETRY);
3453 goto done_free_remap_rsp;
3457 done_free_remap_rsp:
3458 dma_pool_free(ha->purex_dma_pool, sp->remap.rsp.buf,
3460 done_free_remap_req:
3461 dma_pool_free(ha->purex_dma_pool, sp->remap.req.buf,
3470 void qla_edif_sess_down(struct scsi_qla_host *vha, struct fc_port *sess)
3472 if (sess->edif.app_sess_online && vha->e_dbell.db_flags & EDB_ACTIVE) {
3473 ql_dbg(ql_dbg_disc, vha, 0xf09c,
3474 "%s: sess %8phN send port_offline event\n",
3475 __func__, sess->port_name);
3476 sess->edif.app_sess_online = 0;
3477 qla_edb_eventcreate(vha, VND_CMD_AUTH_STATE_SESSION_SHUTDOWN,
3478 sess->d_id.b24, 0, sess);
3479 qla2x00_post_aen_work(vha, FCH_EVT_PORT_OFFLINE, sess->d_id.b24);
3483 void qla_edif_clear_appdata(struct scsi_qla_host *vha, struct fc_port *fcport)
3485 if (!(fcport->flags & FCF_FCSP_DEVICE))
3488 qla_edb_clear(vha, fcport->d_id);
3489 qla_enode_clear(vha, fcport->d_id);