2 * qla_target.c SCSI LLD infrastructure for QLogic 22xx/23xx/24xx/25xx
4 * based on qla2x00t.c code:
6 * Copyright (C) 2004 - 2010 Vladislav Bolkhovitin <vst@vlnb.net>
7 * Copyright (C) 2004 - 2005 Leonid Stoljar
8 * Copyright (C) 2006 Nathaniel Clark <nate@misrule.us>
9 * Copyright (C) 2006 - 2010 ID7 Ltd.
11 * Forward port and refactoring to modern qla2xxx and target/configfs
13 * Copyright (C) 2010-2013 Nicholas A. Bellinger <nab@kernel.org>
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation, version 2
20 * This program is distributed in the hope that it will be useful,
21 * but WITHOUT ANY WARRANTY; without even the implied warranty of
22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
23 * GNU General Public License for more details.
26 #include <linux/module.h>
27 #include <linux/init.h>
28 #include <linux/types.h>
29 #include <linux/blkdev.h>
30 #include <linux/interrupt.h>
31 #include <linux/pci.h>
32 #include <linux/delay.h>
33 #include <linux/list.h>
34 #include <linux/workqueue.h>
35 #include <asm/unaligned.h>
36 #include <scsi/scsi.h>
37 #include <scsi/scsi_host.h>
38 #include <scsi/scsi_tcq.h>
39 #include <target/target_core_base.h>
40 #include <target/target_core_fabric.h>
43 #include "qla_target.h"
45 static int ql2xtgt_tape_enable;
46 module_param(ql2xtgt_tape_enable, int, S_IRUGO|S_IWUSR);
47 MODULE_PARM_DESC(ql2xtgt_tape_enable,
48 "Enables Sequence level error recovery (aka FC Tape). Default is 0 - no SLER. 1 - Enable SLER.");
50 static char *qlini_mode = QLA2XXX_INI_MODE_STR_ENABLED;
51 module_param(qlini_mode, charp, S_IRUGO);
52 MODULE_PARM_DESC(qlini_mode,
53 "Determines when initiator mode will be enabled. Possible values: "
54 "\"exclusive\" - initiator mode will be enabled on load, "
55 "disabled on enabling target mode and then on disabling target mode "
57 "\"disabled\" - initiator mode will never be enabled; "
58 "\"enabled\" (default) - initiator mode will always stay enabled.");
60 int ql2x_ini_mode = QLA2XXX_INI_MODE_EXCLUSIVE;
62 static int temp_sam_status = SAM_STAT_BUSY;
65 * From scsi/fc/fc_fcp.h
67 enum fcp_resp_rsp_codes {
69 FCP_DATA_LEN_INVALID = 1,
70 FCP_CMND_FIELDS_INVALID = 2,
71 FCP_DATA_PARAM_MISMATCH = 3,
74 FCP_TMF_INVALID_LUN = 9,
78 * fc_pri_ta from scsi/fc/fc_fcp.h
80 #define FCP_PTA_SIMPLE 0 /* simple task attribute */
81 #define FCP_PTA_HEADQ 1 /* head of queue task attribute */
82 #define FCP_PTA_ORDERED 2 /* ordered task attribute */
83 #define FCP_PTA_ACA 4 /* auto. contingent allegiance */
84 #define FCP_PTA_MASK 7 /* mask for task attribute field */
85 #define FCP_PRI_SHIFT 3 /* priority field starts in bit 3 */
86 #define FCP_PRI_RESVD_MASK 0x80 /* reserved bits in priority field */
89 * This driver calls qla2x00_alloc_iocbs() and qla2x00_issue_marker(), which
90 * must be called under HW lock and could unlock/lock it inside.
91 * It isn't an issue, since in the current implementation on the time when
92 * those functions are called:
94 * - Either context is IRQ and only IRQ handler can modify HW data,
95 * including rings related fields,
97 * - Or access to target mode variables from struct qla_tgt doesn't
98 * cross those functions boundaries, except tgt_stop, which
99 * additionally protected by irq_cmd_count.
101 /* Predefs for callbacks handed to qla2xxx LLD */
102 static void qlt_24xx_atio_pkt(struct scsi_qla_host *ha,
103 struct atio_from_isp *pkt, uint8_t);
104 static void qlt_response_pkt(struct scsi_qla_host *ha, response_t *pkt);
105 static int qlt_issue_task_mgmt(struct qla_tgt_sess *sess, uint32_t lun,
106 int fn, void *iocb, int flags);
107 static void qlt_send_term_exchange(struct scsi_qla_host *ha, struct qla_tgt_cmd
108 *cmd, struct atio_from_isp *atio, int ha_locked, int ul_abort);
109 static void qlt_reject_free_srr_imm(struct scsi_qla_host *ha,
110 struct qla_tgt_srr_imm *imm, int ha_lock);
111 static void qlt_abort_cmd_on_host_reset(struct scsi_qla_host *vha,
112 struct qla_tgt_cmd *cmd);
113 static void qlt_alloc_qfull_cmd(struct scsi_qla_host *vha,
114 struct atio_from_isp *atio, uint16_t status, int qfull);
115 static void qlt_disable_vha(struct scsi_qla_host *vha);
116 static void qlt_clear_tgt_db(struct qla_tgt *tgt);
117 static void qlt_send_notify_ack(struct scsi_qla_host *vha,
118 struct imm_ntfy_from_isp *ntfy,
119 uint32_t add_flags, uint16_t resp_code, int resp_code_valid,
120 uint16_t srr_flags, uint16_t srr_reject_code, uint8_t srr_explan);
121 static void qlt_send_term_imm_notif(struct scsi_qla_host *vha,
122 struct imm_ntfy_from_isp *imm, int ha_locked);
126 static struct kmem_cache *qla_tgt_mgmt_cmd_cachep;
127 static struct kmem_cache *qla_tgt_plogi_cachep;
128 static mempool_t *qla_tgt_mgmt_cmd_mempool;
129 static struct workqueue_struct *qla_tgt_wq;
130 static DEFINE_MUTEX(qla_tgt_mutex);
131 static LIST_HEAD(qla_tgt_glist);
133 /* This API intentionally takes dest as a parameter, rather than returning
134 * int value to avoid caller forgetting to issue wmb() after the store */
135 void qlt_do_generation_tick(struct scsi_qla_host *vha, int *dest)
137 scsi_qla_host_t *base_vha = pci_get_drvdata(vha->hw->pdev);
138 *dest = atomic_inc_return(&base_vha->generation_tick);
143 /* ha->hardware_lock supposed to be held on entry (to protect tgt->sess_list) */
144 static struct qla_tgt_sess *qlt_find_sess_by_port_name(
146 const uint8_t *port_name)
148 struct qla_tgt_sess *sess;
150 list_for_each_entry(sess, &tgt->sess_list, sess_list_entry) {
151 if (!memcmp(sess->port_name, port_name, WWN_SIZE))
158 /* Might release hw lock, then reaquire!! */
159 static inline int qlt_issue_marker(struct scsi_qla_host *vha, int vha_locked)
161 /* Send marker if required */
162 if (unlikely(vha->marker_needed != 0)) {
163 int rc = qla2x00_issue_marker(vha, vha_locked);
164 if (rc != QLA_SUCCESS) {
165 ql_dbg(ql_dbg_tgt, vha, 0xe03d,
166 "qla_target(%d): issue_marker() failed\n",
175 struct scsi_qla_host *qlt_find_host_by_d_id(struct scsi_qla_host *vha,
178 struct qla_hw_data *ha = vha->hw;
181 if ((vha->d_id.b.area != d_id[1]) || (vha->d_id.b.domain != d_id[0]))
184 if (vha->d_id.b.al_pa == d_id[2])
187 BUG_ON(ha->tgt.tgt_vp_map == NULL);
188 vp_idx = ha->tgt.tgt_vp_map[d_id[2]].idx;
189 if (likely(test_bit(vp_idx, ha->vp_idx_map)))
190 return ha->tgt.tgt_vp_map[vp_idx].vha;
196 struct scsi_qla_host *qlt_find_host_by_vp_idx(struct scsi_qla_host *vha,
199 struct qla_hw_data *ha = vha->hw;
201 if (vha->vp_idx == vp_idx)
204 BUG_ON(ha->tgt.tgt_vp_map == NULL);
205 if (likely(test_bit(vp_idx, ha->vp_idx_map)))
206 return ha->tgt.tgt_vp_map[vp_idx].vha;
211 static inline void qlt_incr_num_pend_cmds(struct scsi_qla_host *vha)
215 spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags);
217 vha->hw->tgt.num_pend_cmds++;
218 if (vha->hw->tgt.num_pend_cmds > vha->qla_stats.stat_max_pend_cmds)
219 vha->qla_stats.stat_max_pend_cmds =
220 vha->hw->tgt.num_pend_cmds;
221 spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
223 static inline void qlt_decr_num_pend_cmds(struct scsi_qla_host *vha)
227 spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags);
228 vha->hw->tgt.num_pend_cmds--;
229 spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
232 static bool qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha,
233 struct atio_from_isp *atio, uint8_t ha_locked)
235 ql_dbg(ql_dbg_tgt, vha, 0xe072,
236 "%s: qla_target(%d): type %x ox_id %04x\n",
237 __func__, vha->vp_idx, atio->u.raw.entry_type,
238 be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id));
240 switch (atio->u.raw.entry_type) {
243 struct scsi_qla_host *host = qlt_find_host_by_d_id(vha,
244 atio->u.isp24.fcp_hdr.d_id);
245 if (unlikely(NULL == host)) {
246 ql_dbg(ql_dbg_tgt, vha, 0xe03e,
247 "qla_target(%d): Received ATIO_TYPE7 "
248 "with unknown d_id %x:%x:%x\n", vha->vp_idx,
249 atio->u.isp24.fcp_hdr.d_id[0],
250 atio->u.isp24.fcp_hdr.d_id[1],
251 atio->u.isp24.fcp_hdr.d_id[2]);
254 qlt_24xx_atio_pkt(host, atio, ha_locked);
258 case IMMED_NOTIFY_TYPE:
260 struct scsi_qla_host *host = vha;
261 struct imm_ntfy_from_isp *entry =
262 (struct imm_ntfy_from_isp *)atio;
264 if ((entry->u.isp24.vp_index != 0xFF) &&
265 (entry->u.isp24.nport_handle != 0xFFFF)) {
266 host = qlt_find_host_by_vp_idx(vha,
267 entry->u.isp24.vp_index);
268 if (unlikely(!host)) {
269 ql_dbg(ql_dbg_tgt, vha, 0xe03f,
270 "qla_target(%d): Received "
271 "ATIO (IMMED_NOTIFY_TYPE) "
272 "with unknown vp_index %d\n",
273 vha->vp_idx, entry->u.isp24.vp_index);
277 qlt_24xx_atio_pkt(host, atio, ha_locked);
282 ql_dbg(ql_dbg_tgt, vha, 0xe040,
283 "qla_target(%d): Received unknown ATIO atio "
284 "type %x\n", vha->vp_idx, atio->u.raw.entry_type);
291 void qlt_response_pkt_all_vps(struct scsi_qla_host *vha, response_t *pkt)
293 switch (pkt->entry_type) {
295 ql_dbg(ql_dbg_tgt, vha, 0xe073,
296 "qla_target(%d):%s: CRC2 Response pkt\n",
297 vha->vp_idx, __func__);
300 struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt;
301 struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha,
303 if (unlikely(!host)) {
304 ql_dbg(ql_dbg_tgt, vha, 0xe041,
305 "qla_target(%d): Response pkt (CTIO_TYPE7) "
306 "received, with unknown vp_index %d\n",
307 vha->vp_idx, entry->vp_index);
310 qlt_response_pkt(host, pkt);
314 case IMMED_NOTIFY_TYPE:
316 struct scsi_qla_host *host = vha;
317 struct imm_ntfy_from_isp *entry =
318 (struct imm_ntfy_from_isp *)pkt;
320 host = qlt_find_host_by_vp_idx(vha, entry->u.isp24.vp_index);
321 if (unlikely(!host)) {
322 ql_dbg(ql_dbg_tgt, vha, 0xe042,
323 "qla_target(%d): Response pkt (IMMED_NOTIFY_TYPE) "
324 "received, with unknown vp_index %d\n",
325 vha->vp_idx, entry->u.isp24.vp_index);
328 qlt_response_pkt(host, pkt);
332 case NOTIFY_ACK_TYPE:
334 struct scsi_qla_host *host = vha;
335 struct nack_to_isp *entry = (struct nack_to_isp *)pkt;
337 if (0xFF != entry->u.isp24.vp_index) {
338 host = qlt_find_host_by_vp_idx(vha,
339 entry->u.isp24.vp_index);
340 if (unlikely(!host)) {
341 ql_dbg(ql_dbg_tgt, vha, 0xe043,
342 "qla_target(%d): Response "
343 "pkt (NOTIFY_ACK_TYPE) "
344 "received, with unknown "
345 "vp_index %d\n", vha->vp_idx,
346 entry->u.isp24.vp_index);
350 qlt_response_pkt(host, pkt);
356 struct abts_recv_from_24xx *entry =
357 (struct abts_recv_from_24xx *)pkt;
358 struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha,
360 if (unlikely(!host)) {
361 ql_dbg(ql_dbg_tgt, vha, 0xe044,
362 "qla_target(%d): Response pkt "
363 "(ABTS_RECV_24XX) received, with unknown "
364 "vp_index %d\n", vha->vp_idx, entry->vp_index);
367 qlt_response_pkt(host, pkt);
373 struct abts_resp_to_24xx *entry =
374 (struct abts_resp_to_24xx *)pkt;
375 struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha,
377 if (unlikely(!host)) {
378 ql_dbg(ql_dbg_tgt, vha, 0xe045,
379 "qla_target(%d): Response pkt "
380 "(ABTS_RECV_24XX) received, with unknown "
381 "vp_index %d\n", vha->vp_idx, entry->vp_index);
384 qlt_response_pkt(host, pkt);
389 qlt_response_pkt(vha, pkt);
396 * All qlt_plogi_ack_t operations are protected by hardware_lock
400 * This is a zero-base ref-counting solution, since hardware_lock
401 * guarantees that ref_count is not modified concurrently.
402 * Upon successful return content of iocb is undefined
404 static qlt_plogi_ack_t *
405 qlt_plogi_ack_find_add(struct scsi_qla_host *vha, port_id_t *id,
406 struct imm_ntfy_from_isp *iocb)
408 qlt_plogi_ack_t *pla;
410 list_for_each_entry(pla, &vha->plogi_ack_list, list) {
411 if (pla->id.b24 == id->b24) {
412 qlt_send_term_imm_notif(vha, &pla->iocb, 1);
418 pla = kmem_cache_zalloc(qla_tgt_plogi_cachep, GFP_ATOMIC);
420 ql_dbg(ql_dbg_async, vha, 0x5088,
421 "qla_target(%d): Allocation of plogi_ack failed\n",
428 list_add_tail(&pla->list, &vha->plogi_ack_list);
433 static void qlt_plogi_ack_unref(struct scsi_qla_host *vha, qlt_plogi_ack_t *pla)
435 BUG_ON(!pla->ref_count);
441 ql_dbg(ql_dbg_async, vha, 0x5089,
442 "Sending PLOGI ACK to wwn %8phC s_id %02x:%02x:%02x loop_id %#04x"
443 " exch %#x ox_id %#x\n", pla->iocb.u.isp24.port_name,
444 pla->iocb.u.isp24.port_id[2], pla->iocb.u.isp24.port_id[1],
445 pla->iocb.u.isp24.port_id[0],
446 le16_to_cpu(pla->iocb.u.isp24.nport_handle),
447 pla->iocb.u.isp24.exchange_address, pla->iocb.ox_id);
448 qlt_send_notify_ack(vha, &pla->iocb, 0, 0, 0, 0, 0, 0);
450 list_del(&pla->list);
451 kmem_cache_free(qla_tgt_plogi_cachep, pla);
455 qlt_plogi_ack_link(struct scsi_qla_host *vha, qlt_plogi_ack_t *pla,
456 struct qla_tgt_sess *sess, qlt_plogi_link_t link)
458 /* Inc ref_count first because link might already be pointing at pla */
461 if (sess->plogi_link[link])
462 qlt_plogi_ack_unref(vha, sess->plogi_link[link]);
464 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf097,
465 "Linking sess %p [%d] wwn %8phC with PLOGI ACK to wwn %8phC"
466 " s_id %02x:%02x:%02x, ref=%d\n", sess, link, sess->port_name,
467 pla->iocb.u.isp24.port_name, pla->iocb.u.isp24.port_id[2],
468 pla->iocb.u.isp24.port_id[1], pla->iocb.u.isp24.port_id[0],
471 sess->plogi_link[link] = pla;
475 /* These fields must be initialized by the caller */
478 * number of cmds dropped while we were waiting for
479 * initiator to ack LOGO initialize to 1 if LOGO is
480 * triggered by a command, otherwise, to 0
484 /* These fields are used by callee */
485 struct list_head list;
489 qlt_send_first_logo(struct scsi_qla_host *vha, qlt_port_logo_t *logo)
491 qlt_port_logo_t *tmp;
494 mutex_lock(&vha->vha_tgt.tgt_mutex);
496 list_for_each_entry(tmp, &vha->logo_list, list) {
497 if (tmp->id.b24 == logo->id.b24) {
498 tmp->cmd_count += logo->cmd_count;
499 mutex_unlock(&vha->vha_tgt.tgt_mutex);
504 list_add_tail(&logo->list, &vha->logo_list);
506 mutex_unlock(&vha->vha_tgt.tgt_mutex);
508 res = qla24xx_els_dcmd_iocb(vha, ELS_DCMD_LOGO, logo->id);
510 mutex_lock(&vha->vha_tgt.tgt_mutex);
511 list_del(&logo->list);
512 mutex_unlock(&vha->vha_tgt.tgt_mutex);
514 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf098,
515 "Finished LOGO to %02x:%02x:%02x, dropped %d cmds, res = %#x\n",
516 logo->id.b.domain, logo->id.b.area, logo->id.b.al_pa,
517 logo->cmd_count, res);
520 static void qlt_free_session_done(struct work_struct *work)
522 struct qla_tgt_sess *sess = container_of(work, struct qla_tgt_sess,
524 struct qla_tgt *tgt = sess->tgt;
525 struct scsi_qla_host *vha = sess->vha;
526 struct qla_hw_data *ha = vha->hw;
528 bool logout_started = false;
531 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf084,
532 "%s: se_sess %p / sess %p from port %8phC loop_id %#04x"
533 " s_id %02x:%02x:%02x logout %d keep %d els_logo %d\n",
534 __func__, sess->se_sess, sess, sess->port_name, sess->loop_id,
535 sess->s_id.b.domain, sess->s_id.b.area, sess->s_id.b.al_pa,
536 sess->logout_on_delete, sess->keep_nport_handle,
537 sess->send_els_logo);
541 if (sess->send_els_logo) {
542 qlt_port_logo_t logo;
543 logo.id = sess->s_id;
545 qlt_send_first_logo(vha, &logo);
548 if (sess->logout_on_delete) {
551 memset(&fcport, 0, sizeof(fcport));
552 fcport.loop_id = sess->loop_id;
553 fcport.d_id = sess->s_id;
554 memcpy(fcport.port_name, sess->port_name, WWN_SIZE);
556 fcport.tgt_session = sess;
558 rc = qla2x00_post_async_logout_work(vha, &fcport, NULL);
559 if (rc != QLA_SUCCESS)
560 ql_log(ql_log_warn, vha, 0xf085,
561 "Schedule logo failed sess %p rc %d\n",
564 logout_started = true;
568 * Release the target session for FC Nexus from fabric module code.
570 if (sess->se_sess != NULL)
571 ha->tgt.tgt_ops->free_session(sess);
573 if (logout_started) {
577 while (!ACCESS_ONCE(sess->logout_completed)) {
579 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf086,
580 "%s: waiting for sess %p logout\n",
590 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf087,
591 "%s: sess %p logout completed\n",
595 spin_lock_irqsave(&ha->hardware_lock, flags);
598 qlt_plogi_ack_t *own =
599 sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN];
600 qlt_plogi_ack_t *con =
601 sess->plogi_link[QLT_PLOGI_LINK_CONFLICT];
604 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf099,
605 "se_sess %p / sess %p port %8phC is gone,"
606 " %s (ref=%d), releasing PLOGI for %8phC (ref=%d)\n",
607 sess->se_sess, sess, sess->port_name,
608 own ? "releasing own PLOGI" :
609 "no own PLOGI pending",
610 own ? own->ref_count : -1,
611 con->iocb.u.isp24.port_name, con->ref_count);
612 qlt_plogi_ack_unref(vha, con);
614 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf09a,
615 "se_sess %p / sess %p port %8phC is gone, %s (ref=%d)\n",
616 sess->se_sess, sess, sess->port_name,
617 own ? "releasing own PLOGI" :
618 "no own PLOGI pending",
619 own ? own->ref_count : -1);
623 qlt_plogi_ack_unref(vha, own);
626 list_del(&sess->sess_list_entry);
628 spin_unlock_irqrestore(&ha->hardware_lock, flags);
630 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf001,
631 "Unregistration of sess %p finished\n", sess);
635 * We need to protect against race, when tgt is freed before or
639 if (tgt->sess_count == 0)
640 wake_up_all(&tgt->waitQ);
643 /* ha->tgt.sess_lock supposed to be held on entry */
644 static void qlt_release_session(struct kref *kref)
646 struct qla_tgt_sess *sess =
647 container_of(kref, struct qla_tgt_sess, sess_kref);
648 struct scsi_qla_host *vha = sess->vha;
651 vha->hw->tgt.tgt_ops->clear_nacl_from_fcport_map(sess);
653 if (!list_empty(&sess->del_list_entry))
654 list_del_init(&sess->del_list_entry);
655 sess->deleted = QLA_SESS_DELETION_IN_PROGRESS;
657 INIT_WORK(&sess->free_work, qlt_free_session_done);
658 schedule_work(&sess->free_work);
661 void qlt_put_sess(struct qla_tgt_sess *sess)
666 assert_spin_locked(&sess->vha->hw->tgt.sess_lock);
667 kref_put(&sess->sess_kref, qlt_release_session);
669 EXPORT_SYMBOL(qlt_put_sess);
671 static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd)
673 struct qla_hw_data *ha = vha->hw;
674 struct qla_tgt_sess *sess = NULL;
677 struct imm_ntfy_from_isp *n = (struct imm_ntfy_from_isp *)iocb;
680 loop_id = le16_to_cpu(n->u.isp24.nport_handle);
681 if (loop_id == 0xFFFF) {
683 atomic_inc(&vha->vha_tgt.qla_tgt->tgt_global_resets_count);
684 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
685 qlt_clear_tgt_db(vha->vha_tgt.qla_tgt);
686 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
687 #if 0 /* FIXME: do we need to choose a session here? */
688 if (!list_empty(&ha->tgt.qla_tgt->sess_list)) {
689 sess = list_entry(ha->tgt.qla_tgt->sess_list.next,
690 typeof(*sess), sess_list_entry);
692 case QLA_TGT_NEXUS_LOSS_SESS:
693 mcmd = QLA_TGT_NEXUS_LOSS;
695 case QLA_TGT_ABORT_ALL_SESS:
696 mcmd = QLA_TGT_ABORT_ALL;
698 case QLA_TGT_NEXUS_LOSS:
699 case QLA_TGT_ABORT_ALL:
702 ql_dbg(ql_dbg_tgt, vha, 0xe046,
703 "qla_target(%d): Not allowed "
704 "command %x in %s", vha->vp_idx,
713 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
714 sess = ha->tgt.tgt_ops->find_sess_by_loop_id(vha, loop_id);
715 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
718 ql_dbg(ql_dbg_tgt, vha, 0xe000,
719 "Using sess for qla_tgt_reset: %p\n", sess);
725 ql_dbg(ql_dbg_tgt, vha, 0xe047,
726 "scsi(%ld): resetting (session %p from port %8phC mcmd %x, "
727 "loop_id %d)\n", vha->host_no, sess, sess->port_name,
730 return qlt_issue_task_mgmt(sess, 0, mcmd, iocb, QLA24XX_MGMT_SEND_NACK);
733 /* ha->tgt.sess_lock supposed to be held on entry */
734 static void qlt_schedule_sess_for_deletion(struct qla_tgt_sess *sess,
737 struct qla_tgt *tgt = sess->tgt;
738 uint32_t dev_loss_tmo = tgt->ha->port_down_retry_count + 5;
741 /* Upgrade to unconditional deletion in case it was temporary */
742 if (immediate && sess->deleted == QLA_SESS_DELETION_PENDING)
743 list_del(&sess->del_list_entry);
748 ql_dbg(ql_dbg_tgt, sess->vha, 0xe001,
749 "Scheduling sess %p for deletion\n", sess);
753 sess->deleted = QLA_SESS_DELETION_IN_PROGRESS;
754 list_add(&sess->del_list_entry, &tgt->del_sess_list);
756 sess->deleted = QLA_SESS_DELETION_PENDING;
757 list_add_tail(&sess->del_list_entry, &tgt->del_sess_list);
760 sess->expires = jiffies + dev_loss_tmo * HZ;
762 ql_dbg(ql_dbg_tgt, sess->vha, 0xe048,
763 "qla_target(%d): session for port %8phC (loop ID %d s_id %02x:%02x:%02x)"
764 " scheduled for deletion in %u secs (expires: %lu) immed: %d, logout: %d, gen: %#x\n",
765 sess->vha->vp_idx, sess->port_name, sess->loop_id,
766 sess->s_id.b.domain, sess->s_id.b.area, sess->s_id.b.al_pa,
767 dev_loss_tmo, sess->expires, immediate, sess->logout_on_delete,
771 mod_delayed_work(system_wq, &tgt->sess_del_work, 0);
773 schedule_delayed_work(&tgt->sess_del_work,
774 sess->expires - jiffies);
777 /* ha->tgt.sess_lock supposed to be held on entry */
778 static void qlt_clear_tgt_db(struct qla_tgt *tgt)
780 struct qla_tgt_sess *sess;
782 list_for_each_entry(sess, &tgt->sess_list, sess_list_entry)
783 qlt_schedule_sess_for_deletion(sess, true);
785 /* At this point tgt could be already dead */
788 static int qla24xx_get_loop_id(struct scsi_qla_host *vha, const uint8_t *s_id,
791 struct qla_hw_data *ha = vha->hw;
792 dma_addr_t gid_list_dma;
793 struct gid_list_info *gid_list;
798 gid_list = dma_alloc_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
799 &gid_list_dma, GFP_KERNEL);
801 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf044,
802 "qla_target(%d): DMA Alloc failed of %u\n",
803 vha->vp_idx, qla2x00_gid_list_size(ha));
807 /* Get list of logged in devices */
808 rc = qla2x00_get_id_list(vha, gid_list, gid_list_dma, &entries);
809 if (rc != QLA_SUCCESS) {
810 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf045,
811 "qla_target(%d): get_id_list() failed: %x\n",
814 goto out_free_id_list;
817 id_iter = (char *)gid_list;
819 for (i = 0; i < entries; i++) {
820 struct gid_list_info *gid = (struct gid_list_info *)id_iter;
821 if ((gid->al_pa == s_id[2]) &&
822 (gid->area == s_id[1]) &&
823 (gid->domain == s_id[0])) {
824 *loop_id = le16_to_cpu(gid->loop_id);
828 id_iter += ha->gid_list_info_size;
832 dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
833 gid_list, gid_list_dma);
837 /* ha->tgt.sess_lock supposed to be held on entry */
838 static void qlt_undelete_sess(struct qla_tgt_sess *sess)
840 BUG_ON(sess->deleted != QLA_SESS_DELETION_PENDING);
842 list_del_init(&sess->del_list_entry);
846 static void qlt_del_sess_work_fn(struct delayed_work *work)
848 struct qla_tgt *tgt = container_of(work, struct qla_tgt,
850 struct scsi_qla_host *vha = tgt->vha;
851 struct qla_hw_data *ha = vha->hw;
852 struct qla_tgt_sess *sess;
853 unsigned long flags, elapsed;
855 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
856 while (!list_empty(&tgt->del_sess_list)) {
857 sess = list_entry(tgt->del_sess_list.next, typeof(*sess),
860 if (time_after_eq(elapsed, sess->expires)) {
861 /* No turning back */
862 list_del_init(&sess->del_list_entry);
863 sess->deleted = QLA_SESS_DELETION_IN_PROGRESS;
865 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf004,
866 "Timeout: sess %p about to be deleted\n",
869 ha->tgt.tgt_ops->shutdown_sess(sess);
872 schedule_delayed_work(&tgt->sess_del_work,
873 sess->expires - elapsed);
877 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
881 * Adds an extra ref to allow to drop hw lock after adding sess to the list.
882 * Caller must put it.
884 static struct qla_tgt_sess *qlt_create_sess(
885 struct scsi_qla_host *vha,
889 struct qla_hw_data *ha = vha->hw;
890 struct qla_tgt_sess *sess;
893 /* Check to avoid double sessions */
894 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
895 list_for_each_entry(sess, &vha->vha_tgt.qla_tgt->sess_list,
897 if (!memcmp(sess->port_name, fcport->port_name, WWN_SIZE)) {
898 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf005,
899 "Double sess %p found (s_id %x:%x:%x, "
900 "loop_id %d), updating to d_id %x:%x:%x, "
901 "loop_id %d", sess, sess->s_id.b.domain,
902 sess->s_id.b.al_pa, sess->s_id.b.area,
903 sess->loop_id, fcport->d_id.b.domain,
904 fcport->d_id.b.al_pa, fcport->d_id.b.area,
907 /* Cannot undelete at this point */
908 if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
909 spin_unlock_irqrestore(&ha->tgt.sess_lock,
915 qlt_undelete_sess(sess);
917 if (!sess->se_sess) {
918 if (ha->tgt.tgt_ops->check_initiator_node_acl(vha,
919 &sess->port_name[0], sess) < 0) {
920 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
925 kref_get(&sess->sess_kref);
926 ha->tgt.tgt_ops->update_sess(sess, fcport->d_id, fcport->loop_id,
927 (fcport->flags & FCF_CONF_COMP_SUPPORTED));
929 if (sess->local && !local)
932 qlt_do_generation_tick(vha, &sess->generation);
934 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
939 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
941 sess = kzalloc(sizeof(*sess), GFP_KERNEL);
943 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04a,
944 "qla_target(%u): session allocation failed, all commands "
945 "from port %8phC will be refused", vha->vp_idx,
950 sess->tgt = vha->vha_tgt.qla_tgt;
952 sess->s_id = fcport->d_id;
953 sess->loop_id = fcport->loop_id;
955 kref_init(&sess->sess_kref);
956 INIT_LIST_HEAD(&sess->del_list_entry);
958 /* Under normal circumstances we want to logout from firmware when
959 * session eventually ends and release corresponding nport handle.
960 * In the exception cases (e.g. when new PLOGI is waiting) corresponding
961 * code will adjust these flags as necessary. */
962 sess->logout_on_delete = 1;
963 sess->keep_nport_handle = 0;
965 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf006,
966 "Adding sess %p to tgt %p via ->check_initiator_node_acl()\n",
967 sess, vha->vha_tgt.qla_tgt);
969 sess->conf_compl_supported = (fcport->flags & FCF_CONF_COMP_SUPPORTED);
970 BUILD_BUG_ON(sizeof(sess->port_name) != sizeof(fcport->port_name));
971 memcpy(sess->port_name, fcport->port_name, sizeof(sess->port_name));
973 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
974 list_add_tail(&sess->sess_list_entry, &vha->vha_tgt.qla_tgt->sess_list);
975 vha->vha_tgt.qla_tgt->sess_count++;
976 qlt_do_generation_tick(vha, &sess->generation);
977 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
979 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04b,
980 "qla_target(%d): %ssession for wwn %8phC (loop_id %d, "
981 "s_id %x:%x:%x, confirmed completion %ssupported) added\n",
982 vha->vp_idx, local ? "local " : "", fcport->port_name,
983 fcport->loop_id, sess->s_id.b.domain, sess->s_id.b.area,
984 sess->s_id.b.al_pa, sess->conf_compl_supported ? "" : "not ");
987 * Determine if this fc_port->port_name is allowed to access
988 * target mode using explict NodeACLs+MappedLUNs, or using
989 * TPG demo mode. If this is successful a target mode FC nexus
992 if (ha->tgt.tgt_ops->check_initiator_node_acl(vha,
993 &fcport->port_name[0], sess) < 0) {
997 * Take an extra reference to ->sess_kref here to handle qla_tgt_sess
998 * access across ->tgt.sess_lock reaquire.
1000 kref_get(&sess->sess_kref);
1007 * Called from qla2x00_reg_remote_port()
1009 void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport)
1011 struct qla_hw_data *ha = vha->hw;
1012 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
1013 struct qla_tgt_sess *sess;
1014 unsigned long flags;
1016 if (!vha->hw->tgt.tgt_ops)
1019 if (!tgt || (fcport->port_type != FCT_INITIATOR))
1022 if (qla_ini_mode_enabled(vha))
1025 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
1026 if (tgt->tgt_stop) {
1027 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
1030 sess = qlt_find_sess_by_port_name(tgt, fcport->port_name);
1032 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
1034 mutex_lock(&vha->vha_tgt.tgt_mutex);
1035 sess = qlt_create_sess(vha, fcport, false);
1036 mutex_unlock(&vha->vha_tgt.tgt_mutex);
1038 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
1039 } else if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
1040 /* Point of no return */
1041 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
1044 kref_get(&sess->sess_kref);
1046 if (sess->deleted) {
1047 qlt_undelete_sess(sess);
1049 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04c,
1050 "qla_target(%u): %ssession for port %8phC "
1051 "(loop ID %d) reappeared\n", vha->vp_idx,
1052 sess->local ? "local " : "", sess->port_name,
1055 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf007,
1056 "Reappeared sess %p\n", sess);
1058 ha->tgt.tgt_ops->update_sess(sess, fcport->d_id, fcport->loop_id,
1059 (fcport->flags & FCF_CONF_COMP_SUPPORTED));
1062 if (sess && sess->local) {
1063 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04d,
1064 "qla_target(%u): local session for "
1065 "port %8phC (loop ID %d) became global\n", vha->vp_idx,
1066 fcport->port_name, sess->loop_id);
1070 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
1074 * max_gen - specifies maximum session generation
1075 * at which this deletion requestion is still valid
1078 qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport, int max_gen)
1080 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
1081 struct qla_tgt_sess *sess;
1082 unsigned long flags;
1084 if (!vha->hw->tgt.tgt_ops)
1090 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
1091 if (tgt->tgt_stop) {
1092 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1095 sess = qlt_find_sess_by_port_name(tgt, fcport->port_name);
1097 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1101 if (max_gen - sess->generation < 0) {
1102 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1103 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf092,
1104 "Ignoring stale deletion request for se_sess %p / sess %p"
1105 " for port %8phC, req_gen %d, sess_gen %d\n",
1106 sess->se_sess, sess, sess->port_name, max_gen,
1111 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf008, "qla_tgt_fc_port_deleted %p", sess);
1114 qlt_schedule_sess_for_deletion(sess, false);
1115 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1118 static inline int test_tgt_sess_count(struct qla_tgt *tgt)
1120 struct qla_hw_data *ha = tgt->ha;
1121 unsigned long flags;
1124 * We need to protect against race, when tgt is freed before or
1127 spin_lock_irqsave(&ha->hardware_lock, flags);
1128 ql_dbg(ql_dbg_tgt, tgt->vha, 0xe002,
1129 "tgt %p, empty(sess_list)=%d sess_count=%d\n",
1130 tgt, list_empty(&tgt->sess_list), tgt->sess_count);
1131 res = (tgt->sess_count == 0);
1132 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1137 /* Called by tcm_qla2xxx configfs code */
1138 int qlt_stop_phase1(struct qla_tgt *tgt)
1140 struct scsi_qla_host *vha = tgt->vha;
1141 struct qla_hw_data *ha = tgt->ha;
1142 unsigned long flags;
1144 mutex_lock(&qla_tgt_mutex);
1145 if (!vha->fc_vport) {
1146 struct Scsi_Host *sh = vha->host;
1147 struct fc_host_attrs *fc_host = shost_to_fc_host(sh);
1150 spin_lock_irqsave(sh->host_lock, flags);
1151 npiv_vports = (fc_host->npiv_vports_inuse);
1152 spin_unlock_irqrestore(sh->host_lock, flags);
1155 mutex_unlock(&qla_tgt_mutex);
1159 if (tgt->tgt_stop || tgt->tgt_stopped) {
1160 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04e,
1161 "Already in tgt->tgt_stop or tgt_stopped state\n");
1162 mutex_unlock(&qla_tgt_mutex);
1166 ql_dbg(ql_dbg_tgt, vha, 0xe003, "Stopping target for host %ld(%p)\n",
1169 * Mutex needed to sync with qla_tgt_fc_port_[added,deleted].
1170 * Lock is needed, because we still can get an incoming packet.
1172 mutex_lock(&vha->vha_tgt.tgt_mutex);
1173 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
1175 qlt_clear_tgt_db(tgt);
1176 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
1177 mutex_unlock(&vha->vha_tgt.tgt_mutex);
1178 mutex_unlock(&qla_tgt_mutex);
1180 flush_delayed_work(&tgt->sess_del_work);
1182 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf009,
1183 "Waiting for sess works (tgt %p)", tgt);
1184 spin_lock_irqsave(&tgt->sess_work_lock, flags);
1185 while (!list_empty(&tgt->sess_works_list)) {
1186 spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
1187 flush_scheduled_work();
1188 spin_lock_irqsave(&tgt->sess_work_lock, flags);
1190 spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
1192 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00a,
1193 "Waiting for tgt %p: list_empty(sess_list)=%d "
1194 "sess_count=%d\n", tgt, list_empty(&tgt->sess_list),
1197 wait_event(tgt->waitQ, test_tgt_sess_count(tgt));
1200 if (!ha->flags.host_shutting_down && qla_tgt_mode_enabled(vha))
1201 qlt_disable_vha(vha);
1203 /* Wait for sessions to clear out (just in case) */
1204 wait_event(tgt->waitQ, test_tgt_sess_count(tgt));
1207 EXPORT_SYMBOL(qlt_stop_phase1);
1209 /* Called by tcm_qla2xxx configfs code */
1210 void qlt_stop_phase2(struct qla_tgt *tgt)
1212 struct qla_hw_data *ha = tgt->ha;
1213 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
1214 unsigned long flags;
1216 if (tgt->tgt_stopped) {
1217 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04f,
1218 "Already in tgt->tgt_stopped state\n");
1223 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00b,
1224 "Waiting for %d IRQ commands to complete (tgt %p)",
1225 tgt->irq_cmd_count, tgt);
1227 mutex_lock(&tgt->ha->optrom_mutex);
1228 mutex_lock(&vha->vha_tgt.tgt_mutex);
1229 spin_lock_irqsave(&ha->hardware_lock, flags);
1230 while ((tgt->irq_cmd_count != 0) || (tgt->atio_irq_cmd_count != 0)) {
1231 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1233 spin_lock_irqsave(&ha->hardware_lock, flags);
1236 tgt->tgt_stopped = 1;
1237 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1238 mutex_unlock(&vha->vha_tgt.tgt_mutex);
1239 mutex_unlock(&tgt->ha->optrom_mutex);
1241 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00c, "Stop of tgt %p finished",
1244 EXPORT_SYMBOL(qlt_stop_phase2);
1246 /* Called from qlt_remove_target() -> qla2x00_remove_one() */
1247 static void qlt_release(struct qla_tgt *tgt)
1249 scsi_qla_host_t *vha = tgt->vha;
1251 if ((vha->vha_tgt.qla_tgt != NULL) && !tgt->tgt_stopped)
1252 qlt_stop_phase2(tgt);
1254 vha->vha_tgt.qla_tgt = NULL;
1256 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00d,
1257 "Release of tgt %p finished\n", tgt);
1262 /* ha->hardware_lock supposed to be held on entry */
1263 static int qlt_sched_sess_work(struct qla_tgt *tgt, int type,
1264 const void *param, unsigned int param_size)
1266 struct qla_tgt_sess_work_param *prm;
1267 unsigned long flags;
1269 prm = kzalloc(sizeof(*prm), GFP_ATOMIC);
1271 ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf050,
1272 "qla_target(%d): Unable to create session "
1273 "work, command will be refused", 0);
1277 ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf00e,
1278 "Scheduling work (type %d, prm %p)"
1279 " to find session for param %p (size %d, tgt %p)\n",
1280 type, prm, param, param_size, tgt);
1283 memcpy(&prm->tm_iocb, param, param_size);
1285 spin_lock_irqsave(&tgt->sess_work_lock, flags);
1286 list_add_tail(&prm->sess_works_list_entry, &tgt->sess_works_list);
1287 spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
1289 schedule_work(&tgt->sess_work);
1295 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1297 static void qlt_send_notify_ack(struct scsi_qla_host *vha,
1298 struct imm_ntfy_from_isp *ntfy,
1299 uint32_t add_flags, uint16_t resp_code, int resp_code_valid,
1300 uint16_t srr_flags, uint16_t srr_reject_code, uint8_t srr_explan)
1302 struct qla_hw_data *ha = vha->hw;
1304 struct nack_to_isp *nack;
1306 ql_dbg(ql_dbg_tgt, vha, 0xe004, "Sending NOTIFY_ACK (ha=%p)\n", ha);
1308 /* Send marker if required */
1309 if (qlt_issue_marker(vha, 1) != QLA_SUCCESS)
1312 pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL);
1314 ql_dbg(ql_dbg_tgt, vha, 0xe049,
1315 "qla_target(%d): %s failed: unable to allocate "
1316 "request packet\n", vha->vp_idx, __func__);
1320 if (vha->vha_tgt.qla_tgt != NULL)
1321 vha->vha_tgt.qla_tgt->notify_ack_expected++;
1323 pkt->entry_type = NOTIFY_ACK_TYPE;
1324 pkt->entry_count = 1;
1326 nack = (struct nack_to_isp *)pkt;
1327 nack->ox_id = ntfy->ox_id;
1329 nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle;
1330 if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) {
1331 nack->u.isp24.flags = ntfy->u.isp24.flags &
1332 cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB);
1334 nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id;
1335 nack->u.isp24.status = ntfy->u.isp24.status;
1336 nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode;
1337 nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle;
1338 nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address;
1339 nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs;
1340 nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui;
1341 nack->u.isp24.srr_flags = cpu_to_le16(srr_flags);
1342 nack->u.isp24.srr_reject_code = srr_reject_code;
1343 nack->u.isp24.srr_reject_code_expl = srr_explan;
1344 nack->u.isp24.vp_index = ntfy->u.isp24.vp_index;
1346 ql_dbg(ql_dbg_tgt, vha, 0xe005,
1347 "qla_target(%d): Sending 24xx Notify Ack %d\n",
1348 vha->vp_idx, nack->u.isp24.status);
1350 /* Memory Barrier */
1352 qla2x00_start_iocbs(vha, vha->req);
1356 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1358 static void qlt_24xx_send_abts_resp(struct scsi_qla_host *vha,
1359 struct abts_recv_from_24xx *abts, uint32_t status,
1362 struct qla_hw_data *ha = vha->hw;
1363 struct abts_resp_to_24xx *resp;
1367 ql_dbg(ql_dbg_tgt, vha, 0xe006,
1368 "Sending task mgmt ABTS response (ha=%p, atio=%p, status=%x\n",
1371 /* Send marker if required */
1372 if (qlt_issue_marker(vha, 1) != QLA_SUCCESS)
1375 resp = (struct abts_resp_to_24xx *)qla2x00_alloc_iocbs_ready(vha, NULL);
1377 ql_dbg(ql_dbg_tgt, vha, 0xe04a,
1378 "qla_target(%d): %s failed: unable to allocate "
1379 "request packet", vha->vp_idx, __func__);
1383 resp->entry_type = ABTS_RESP_24XX;
1384 resp->entry_count = 1;
1385 resp->nport_handle = abts->nport_handle;
1386 resp->vp_index = vha->vp_idx;
1387 resp->sof_type = abts->sof_type;
1388 resp->exchange_address = abts->exchange_address;
1389 resp->fcp_hdr_le = abts->fcp_hdr_le;
1390 f_ctl = cpu_to_le32(F_CTL_EXCH_CONTEXT_RESP |
1391 F_CTL_LAST_SEQ | F_CTL_END_SEQ |
1392 F_CTL_SEQ_INITIATIVE);
1393 p = (uint8_t *)&f_ctl;
1394 resp->fcp_hdr_le.f_ctl[0] = *p++;
1395 resp->fcp_hdr_le.f_ctl[1] = *p++;
1396 resp->fcp_hdr_le.f_ctl[2] = *p;
1398 resp->fcp_hdr_le.d_id[0] = abts->fcp_hdr_le.d_id[0];
1399 resp->fcp_hdr_le.d_id[1] = abts->fcp_hdr_le.d_id[1];
1400 resp->fcp_hdr_le.d_id[2] = abts->fcp_hdr_le.d_id[2];
1401 resp->fcp_hdr_le.s_id[0] = abts->fcp_hdr_le.s_id[0];
1402 resp->fcp_hdr_le.s_id[1] = abts->fcp_hdr_le.s_id[1];
1403 resp->fcp_hdr_le.s_id[2] = abts->fcp_hdr_le.s_id[2];
1405 resp->fcp_hdr_le.d_id[0] = abts->fcp_hdr_le.s_id[0];
1406 resp->fcp_hdr_le.d_id[1] = abts->fcp_hdr_le.s_id[1];
1407 resp->fcp_hdr_le.d_id[2] = abts->fcp_hdr_le.s_id[2];
1408 resp->fcp_hdr_le.s_id[0] = abts->fcp_hdr_le.d_id[0];
1409 resp->fcp_hdr_le.s_id[1] = abts->fcp_hdr_le.d_id[1];
1410 resp->fcp_hdr_le.s_id[2] = abts->fcp_hdr_le.d_id[2];
1412 resp->exchange_addr_to_abort = abts->exchange_addr_to_abort;
1413 if (status == FCP_TMF_CMPL) {
1414 resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_ACC;
1415 resp->payload.ba_acct.seq_id_valid = SEQ_ID_INVALID;
1416 resp->payload.ba_acct.low_seq_cnt = 0x0000;
1417 resp->payload.ba_acct.high_seq_cnt = 0xFFFF;
1418 resp->payload.ba_acct.ox_id = abts->fcp_hdr_le.ox_id;
1419 resp->payload.ba_acct.rx_id = abts->fcp_hdr_le.rx_id;
1421 resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_RJT;
1422 resp->payload.ba_rjt.reason_code =
1423 BA_RJT_REASON_CODE_UNABLE_TO_PERFORM;
1424 /* Other bytes are zero */
1427 vha->vha_tgt.qla_tgt->abts_resp_expected++;
1429 /* Memory Barrier */
1431 qla2x00_start_iocbs(vha, vha->req);
1435 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1437 static void qlt_24xx_retry_term_exchange(struct scsi_qla_host *vha,
1438 struct abts_resp_from_24xx_fw *entry)
1440 struct ctio7_to_24xx *ctio;
1442 ql_dbg(ql_dbg_tgt, vha, 0xe007,
1443 "Sending retry TERM EXCH CTIO7 (ha=%p)\n", vha->hw);
1444 /* Send marker if required */
1445 if (qlt_issue_marker(vha, 1) != QLA_SUCCESS)
1448 ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs_ready(vha, NULL);
1450 ql_dbg(ql_dbg_tgt, vha, 0xe04b,
1451 "qla_target(%d): %s failed: unable to allocate "
1452 "request packet\n", vha->vp_idx, __func__);
1457 * We've got on entrance firmware's response on by us generated
1458 * ABTS response. So, in it ID fields are reversed.
1461 ctio->entry_type = CTIO_TYPE7;
1462 ctio->entry_count = 1;
1463 ctio->nport_handle = entry->nport_handle;
1464 ctio->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
1465 ctio->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
1466 ctio->vp_index = vha->vp_idx;
1467 ctio->initiator_id[0] = entry->fcp_hdr_le.d_id[0];
1468 ctio->initiator_id[1] = entry->fcp_hdr_le.d_id[1];
1469 ctio->initiator_id[2] = entry->fcp_hdr_le.d_id[2];
1470 ctio->exchange_addr = entry->exchange_addr_to_abort;
1471 ctio->u.status1.flags = cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 |
1472 CTIO7_FLAGS_TERMINATE);
1473 ctio->u.status1.ox_id = cpu_to_le16(entry->fcp_hdr_le.ox_id);
1475 /* Memory Barrier */
1477 qla2x00_start_iocbs(vha, vha->req);
1479 qlt_24xx_send_abts_resp(vha, (struct abts_recv_from_24xx *)entry,
1480 FCP_TMF_CMPL, true);
1483 static int abort_cmd_for_tag(struct scsi_qla_host *vha, uint32_t tag)
1485 struct qla_tgt_sess_op *op;
1486 struct qla_tgt_cmd *cmd;
1488 spin_lock(&vha->cmd_list_lock);
1490 list_for_each_entry(op, &vha->qla_sess_op_cmd_list, cmd_list) {
1491 if (tag == op->atio.u.isp24.exchange_addr) {
1493 spin_unlock(&vha->cmd_list_lock);
1498 list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) {
1499 if (tag == cmd->atio.u.isp24.exchange_addr) {
1501 spin_unlock(&vha->cmd_list_lock);
1506 spin_unlock(&vha->cmd_list_lock);
1510 /* drop cmds for the given lun
1511 * XXX only looks for cmds on the port through which lun reset was recieved
1512 * XXX does not go through the list of other port (which may have cmds
1515 static void abort_cmds_for_lun(struct scsi_qla_host *vha,
1516 uint32_t lun, uint8_t *s_id)
1518 struct qla_tgt_sess_op *op;
1519 struct qla_tgt_cmd *cmd;
1522 key = sid_to_key(s_id);
1523 spin_lock(&vha->cmd_list_lock);
1524 list_for_each_entry(op, &vha->qla_sess_op_cmd_list, cmd_list) {
1528 op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id);
1529 op_lun = scsilun_to_int(
1530 (struct scsi_lun *)&op->atio.u.isp24.fcp_cmnd.lun);
1531 if (op_key == key && op_lun == lun)
1534 list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) {
1538 cmd_key = sid_to_key(cmd->atio.u.isp24.fcp_hdr.s_id);
1539 cmd_lun = scsilun_to_int(
1540 (struct scsi_lun *)&cmd->atio.u.isp24.fcp_cmnd.lun);
1541 if (cmd_key == key && cmd_lun == lun)
1544 spin_unlock(&vha->cmd_list_lock);
1547 /* ha->hardware_lock supposed to be held on entry */
1548 static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha,
1549 struct abts_recv_from_24xx *abts, struct qla_tgt_sess *sess)
1551 struct qla_hw_data *ha = vha->hw;
1552 struct se_session *se_sess = sess->se_sess;
1553 struct qla_tgt_mgmt_cmd *mcmd;
1554 struct se_cmd *se_cmd;
1557 bool found_lun = false;
1559 spin_lock(&se_sess->sess_cmd_lock);
1560 list_for_each_entry(se_cmd, &se_sess->sess_cmd_list, se_cmd_list) {
1561 struct qla_tgt_cmd *cmd =
1562 container_of(se_cmd, struct qla_tgt_cmd, se_cmd);
1563 if (se_cmd->tag == abts->exchange_addr_to_abort) {
1564 lun = cmd->unpacked_lun;
1569 spin_unlock(&se_sess->sess_cmd_lock);
1571 /* cmd not in LIO lists, look in qla list */
1573 if (abort_cmd_for_tag(vha, abts->exchange_addr_to_abort)) {
1574 /* send TASK_ABORT response immediately */
1575 qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_CMPL, false);
1578 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf081,
1579 "unable to find cmd in driver or LIO for tag 0x%x\n",
1580 abts->exchange_addr_to_abort);
1585 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00f,
1586 "qla_target(%d): task abort (tag=%d)\n",
1587 vha->vp_idx, abts->exchange_addr_to_abort);
1589 mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC);
1591 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf051,
1592 "qla_target(%d): %s: Allocation of ABORT cmd failed",
1593 vha->vp_idx, __func__);
1596 memset(mcmd, 0, sizeof(*mcmd));
1599 memcpy(&mcmd->orig_iocb.abts, abts, sizeof(mcmd->orig_iocb.abts));
1600 mcmd->reset_count = vha->hw->chip_reset;
1602 rc = ha->tgt.tgt_ops->handle_tmr(mcmd, lun, TMR_ABORT_TASK,
1603 abts->exchange_addr_to_abort);
1605 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf052,
1606 "qla_target(%d): tgt_ops->handle_tmr()"
1607 " failed: %d", vha->vp_idx, rc);
1608 mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
1616 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1618 static void qlt_24xx_handle_abts(struct scsi_qla_host *vha,
1619 struct abts_recv_from_24xx *abts)
1621 struct qla_hw_data *ha = vha->hw;
1622 struct qla_tgt_sess *sess;
1623 uint32_t tag = abts->exchange_addr_to_abort;
1626 unsigned long flags;
1628 if (le32_to_cpu(abts->fcp_hdr_le.parameter) & ABTS_PARAM_ABORT_SEQ) {
1629 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf053,
1630 "qla_target(%d): ABTS: Abort Sequence not "
1631 "supported\n", vha->vp_idx);
1632 qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, false);
1636 if (tag == ATIO_EXCHANGE_ADDRESS_UNKNOWN) {
1637 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf010,
1638 "qla_target(%d): ABTS: Unknown Exchange "
1639 "Address received\n", vha->vp_idx);
1640 qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, false);
1644 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf011,
1645 "qla_target(%d): task abort (s_id=%x:%x:%x, "
1646 "tag=%d, param=%x)\n", vha->vp_idx, abts->fcp_hdr_le.s_id[2],
1647 abts->fcp_hdr_le.s_id[1], abts->fcp_hdr_le.s_id[0], tag,
1648 le32_to_cpu(abts->fcp_hdr_le.parameter));
1650 s_id[0] = abts->fcp_hdr_le.s_id[2];
1651 s_id[1] = abts->fcp_hdr_le.s_id[1];
1652 s_id[2] = abts->fcp_hdr_le.s_id[0];
1654 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
1655 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id);
1657 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf012,
1658 "qla_target(%d): task abort for non-existant session\n",
1660 rc = qlt_sched_sess_work(vha->vha_tgt.qla_tgt,
1661 QLA_TGT_SESS_WORK_ABORT, abts, sizeof(*abts));
1663 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
1666 qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED,
1671 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
1674 if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
1675 qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, false);
1679 rc = __qlt_24xx_handle_abts(vha, abts, sess);
1681 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf054,
1682 "qla_target(%d): __qlt_24xx_handle_abts() failed: %d\n",
1684 qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, false);
1690 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1692 static void qlt_24xx_send_task_mgmt_ctio(struct scsi_qla_host *ha,
1693 struct qla_tgt_mgmt_cmd *mcmd, uint32_t resp_code)
1695 struct atio_from_isp *atio = &mcmd->orig_iocb.atio;
1696 struct ctio7_to_24xx *ctio;
1699 ql_dbg(ql_dbg_tgt, ha, 0xe008,
1700 "Sending task mgmt CTIO7 (ha=%p, atio=%p, resp_code=%x\n",
1701 ha, atio, resp_code);
1703 /* Send marker if required */
1704 if (qlt_issue_marker(ha, 1) != QLA_SUCCESS)
1707 ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs(ha, NULL);
1709 ql_dbg(ql_dbg_tgt, ha, 0xe04c,
1710 "qla_target(%d): %s failed: unable to allocate "
1711 "request packet\n", ha->vp_idx, __func__);
1715 ctio->entry_type = CTIO_TYPE7;
1716 ctio->entry_count = 1;
1717 ctio->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
1718 ctio->nport_handle = mcmd->sess->loop_id;
1719 ctio->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
1720 ctio->vp_index = ha->vp_idx;
1721 ctio->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
1722 ctio->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
1723 ctio->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
1724 ctio->exchange_addr = atio->u.isp24.exchange_addr;
1725 ctio->u.status1.flags = (atio->u.isp24.attr << 9) |
1726 cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS);
1727 temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
1728 ctio->u.status1.ox_id = cpu_to_le16(temp);
1729 ctio->u.status1.scsi_status =
1730 cpu_to_le16(SS_RESPONSE_INFO_LEN_VALID);
1731 ctio->u.status1.response_len = cpu_to_le16(8);
1732 ctio->u.status1.sense_data[0] = resp_code;
1734 /* Memory Barrier */
1736 qla2x00_start_iocbs(ha, ha->req);
1739 void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *mcmd)
1741 mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
1743 EXPORT_SYMBOL(qlt_free_mcmd);
1745 /* callback from target fabric module code */
1746 void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *mcmd)
1748 struct scsi_qla_host *vha = mcmd->sess->vha;
1749 struct qla_hw_data *ha = vha->hw;
1750 unsigned long flags;
1752 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf013,
1753 "TM response mcmd (%p) status %#x state %#x",
1754 mcmd, mcmd->fc_tm_rsp, mcmd->flags);
1756 spin_lock_irqsave(&ha->hardware_lock, flags);
1758 if (!vha->flags.online || mcmd->reset_count != ha->chip_reset) {
1760 * Either the port is not online or this request was from
1761 * previous life, just abort the processing.
1763 ql_dbg(ql_dbg_async, vha, 0xe100,
1764 "RESET-TMR online/active/old-count/new-count = %d/%d/%d/%d.\n",
1765 vha->flags.online, qla2x00_reset_active(vha),
1766 mcmd->reset_count, ha->chip_reset);
1767 ha->tgt.tgt_ops->free_mcmd(mcmd);
1768 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1772 if (mcmd->flags == QLA24XX_MGMT_SEND_NACK)
1773 qlt_send_notify_ack(vha, &mcmd->orig_iocb.imm_ntfy,
1776 if (mcmd->orig_iocb.atio.u.raw.entry_type == ABTS_RECV_24XX)
1777 qlt_24xx_send_abts_resp(vha, &mcmd->orig_iocb.abts,
1778 mcmd->fc_tm_rsp, false);
1780 qlt_24xx_send_task_mgmt_ctio(vha, mcmd,
1784 * Make the callback for ->free_mcmd() to queue_work() and invoke
1785 * target_put_sess_cmd() to drop cmd_kref to 1. The final
1786 * target_put_sess_cmd() call will be made from TFO->check_stop_free()
1787 * -> tcm_qla2xxx_check_stop_free() to release the TMR associated se_cmd
1788 * descriptor after TFO->queue_tm_rsp() -> tcm_qla2xxx_queue_tm_rsp() ->
1789 * qlt_xmit_tm_rsp() returns here..
1791 ha->tgt.tgt_ops->free_mcmd(mcmd);
1792 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1794 EXPORT_SYMBOL(qlt_xmit_tm_rsp);
1797 static int qlt_pci_map_calc_cnt(struct qla_tgt_prm *prm)
1799 struct qla_tgt_cmd *cmd = prm->cmd;
1801 BUG_ON(cmd->sg_cnt == 0);
1803 prm->sg = (struct scatterlist *)cmd->sg;
1804 prm->seg_cnt = pci_map_sg(prm->tgt->ha->pdev, cmd->sg,
1805 cmd->sg_cnt, cmd->dma_data_direction);
1806 if (unlikely(prm->seg_cnt == 0))
1809 prm->cmd->sg_mapped = 1;
1811 if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL) {
1813 * If greater than four sg entries then we need to allocate
1814 * the continuation entries
1816 if (prm->seg_cnt > prm->tgt->datasegs_per_cmd)
1817 prm->req_cnt += DIV_ROUND_UP(prm->seg_cnt -
1818 prm->tgt->datasegs_per_cmd,
1819 prm->tgt->datasegs_per_cont);
1822 if ((cmd->se_cmd.prot_op == TARGET_PROT_DIN_INSERT) ||
1823 (cmd->se_cmd.prot_op == TARGET_PROT_DOUT_STRIP)) {
1824 prm->seg_cnt = DIV_ROUND_UP(cmd->bufflen, cmd->blk_sz);
1825 prm->tot_dsds = prm->seg_cnt;
1827 prm->tot_dsds = prm->seg_cnt;
1829 if (cmd->prot_sg_cnt) {
1830 prm->prot_sg = cmd->prot_sg;
1831 prm->prot_seg_cnt = pci_map_sg(prm->tgt->ha->pdev,
1832 cmd->prot_sg, cmd->prot_sg_cnt,
1833 cmd->dma_data_direction);
1834 if (unlikely(prm->prot_seg_cnt == 0))
1837 if ((cmd->se_cmd.prot_op == TARGET_PROT_DIN_INSERT) ||
1838 (cmd->se_cmd.prot_op == TARGET_PROT_DOUT_STRIP)) {
1839 /* Dif Bundling not support here */
1840 prm->prot_seg_cnt = DIV_ROUND_UP(cmd->bufflen,
1842 prm->tot_dsds += prm->prot_seg_cnt;
1844 prm->tot_dsds += prm->prot_seg_cnt;
1851 ql_dbg(ql_dbg_tgt, prm->cmd->vha, 0xe04d,
1852 "qla_target(%d): PCI mapping failed: sg_cnt=%d",
1853 0, prm->cmd->sg_cnt);
1857 static void qlt_unmap_sg(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd)
1859 struct qla_hw_data *ha = vha->hw;
1861 if (!cmd->sg_mapped)
1864 pci_unmap_sg(ha->pdev, cmd->sg, cmd->sg_cnt, cmd->dma_data_direction);
1867 if (cmd->prot_sg_cnt)
1868 pci_unmap_sg(ha->pdev, cmd->prot_sg, cmd->prot_sg_cnt,
1869 cmd->dma_data_direction);
1871 if (cmd->ctx_dsd_alloced)
1872 qla2x00_clean_dsd_pool(ha, NULL, cmd);
1875 dma_pool_free(ha->dl_dma_pool, cmd->ctx, cmd->ctx->crc_ctx_dma);
1878 static int qlt_check_reserve_free_req(struct scsi_qla_host *vha,
1881 uint32_t cnt, cnt_in;
1883 if (vha->req->cnt < (req_cnt + 2)) {
1884 cnt = (uint16_t)RD_REG_DWORD(vha->req->req_q_out);
1885 cnt_in = (uint16_t)RD_REG_DWORD(vha->req->req_q_in);
1887 if (vha->req->ring_index < cnt)
1888 vha->req->cnt = cnt - vha->req->ring_index;
1890 vha->req->cnt = vha->req->length -
1891 (vha->req->ring_index - cnt);
1893 if (unlikely(vha->req->cnt < (req_cnt + 2))) {
1894 ql_dbg(ql_dbg_io, vha, 0x305a,
1895 "qla_target(%d): There is no room in the request ring: vha->req->ring_index=%d, vha->req->cnt=%d, req_cnt=%d Req-out=%d Req-in=%d Req-Length=%d\n",
1896 vha->vp_idx, vha->req->ring_index,
1897 vha->req->cnt, req_cnt, cnt, cnt_in,
1903 vha->req->cnt -= req_cnt;
1909 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1911 static inline void *qlt_get_req_pkt(struct scsi_qla_host *vha)
1913 /* Adjust ring index. */
1914 vha->req->ring_index++;
1915 if (vha->req->ring_index == vha->req->length) {
1916 vha->req->ring_index = 0;
1917 vha->req->ring_ptr = vha->req->ring;
1919 vha->req->ring_ptr++;
1921 return (cont_entry_t *)vha->req->ring_ptr;
1924 /* ha->hardware_lock supposed to be held on entry */
1925 static inline uint32_t qlt_make_handle(struct scsi_qla_host *vha)
1927 struct qla_hw_data *ha = vha->hw;
1930 h = ha->tgt.current_handle;
1931 /* always increment cmd handle */
1934 if (h > DEFAULT_OUTSTANDING_COMMANDS)
1935 h = 1; /* 0 is QLA_TGT_NULL_HANDLE */
1936 if (h == ha->tgt.current_handle) {
1937 ql_dbg(ql_dbg_io, vha, 0x305b,
1938 "qla_target(%d): Ran out of "
1939 "empty cmd slots in ha %p\n", vha->vp_idx, ha);
1940 h = QLA_TGT_NULL_HANDLE;
1943 } while ((h == QLA_TGT_NULL_HANDLE) ||
1944 (h == QLA_TGT_SKIP_HANDLE) ||
1945 (ha->tgt.cmds[h-1] != NULL));
1947 if (h != QLA_TGT_NULL_HANDLE)
1948 ha->tgt.current_handle = h;
1953 /* ha->hardware_lock supposed to be held on entry */
1954 static int qlt_24xx_build_ctio_pkt(struct qla_tgt_prm *prm,
1955 struct scsi_qla_host *vha)
1958 struct ctio7_to_24xx *pkt;
1959 struct qla_hw_data *ha = vha->hw;
1960 struct atio_from_isp *atio = &prm->cmd->atio;
1963 pkt = (struct ctio7_to_24xx *)vha->req->ring_ptr;
1965 memset(pkt, 0, sizeof(*pkt));
1967 pkt->entry_type = CTIO_TYPE7;
1968 pkt->entry_count = (uint8_t)prm->req_cnt;
1969 pkt->vp_index = vha->vp_idx;
1971 h = qlt_make_handle(vha);
1972 if (unlikely(h == QLA_TGT_NULL_HANDLE)) {
1974 * CTIO type 7 from the firmware doesn't provide a way to
1975 * know the initiator's LOOP ID, hence we can't find
1976 * the session and, so, the command.
1980 ha->tgt.cmds[h-1] = prm->cmd;
1982 pkt->handle = h | CTIO_COMPLETION_HANDLE_MARK;
1983 pkt->nport_handle = prm->cmd->loop_id;
1984 pkt->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
1985 pkt->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
1986 pkt->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
1987 pkt->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
1988 pkt->exchange_addr = atio->u.isp24.exchange_addr;
1989 pkt->u.status0.flags |= (atio->u.isp24.attr << 9);
1990 temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
1991 pkt->u.status0.ox_id = cpu_to_le16(temp);
1992 pkt->u.status0.relative_offset = cpu_to_le32(prm->cmd->offset);
1998 * ha->hardware_lock supposed to be held on entry. We have already made sure
1999 * that there is sufficient amount of request entries to not drop it.
2001 static void qlt_load_cont_data_segments(struct qla_tgt_prm *prm,
2002 struct scsi_qla_host *vha)
2005 uint32_t *dword_ptr;
2006 int enable_64bit_addressing = prm->tgt->tgt_enable_64bit_addr;
2008 /* Build continuation packets */
2009 while (prm->seg_cnt > 0) {
2010 cont_a64_entry_t *cont_pkt64 =
2011 (cont_a64_entry_t *)qlt_get_req_pkt(vha);
2014 * Make sure that from cont_pkt64 none of
2015 * 64-bit specific fields used for 32-bit
2016 * addressing. Cast to (cont_entry_t *) for
2020 memset(cont_pkt64, 0, sizeof(*cont_pkt64));
2022 cont_pkt64->entry_count = 1;
2023 cont_pkt64->sys_define = 0;
2025 if (enable_64bit_addressing) {
2026 cont_pkt64->entry_type = CONTINUE_A64_TYPE;
2028 (uint32_t *)&cont_pkt64->dseg_0_address;
2030 cont_pkt64->entry_type = CONTINUE_TYPE;
2032 (uint32_t *)&((cont_entry_t *)
2033 cont_pkt64)->dseg_0_address;
2036 /* Load continuation entry data segments */
2038 cnt < prm->tgt->datasegs_per_cont && prm->seg_cnt;
2039 cnt++, prm->seg_cnt--) {
2041 cpu_to_le32(pci_dma_lo32
2042 (sg_dma_address(prm->sg)));
2043 if (enable_64bit_addressing) {
2045 cpu_to_le32(pci_dma_hi32
2049 *dword_ptr++ = cpu_to_le32(sg_dma_len(prm->sg));
2051 prm->sg = sg_next(prm->sg);
2057 * ha->hardware_lock supposed to be held on entry. We have already made sure
2058 * that there is sufficient amount of request entries to not drop it.
2060 static void qlt_load_data_segments(struct qla_tgt_prm *prm,
2061 struct scsi_qla_host *vha)
2064 uint32_t *dword_ptr;
2065 int enable_64bit_addressing = prm->tgt->tgt_enable_64bit_addr;
2066 struct ctio7_to_24xx *pkt24 = (struct ctio7_to_24xx *)prm->pkt;
2068 pkt24->u.status0.transfer_length = cpu_to_le32(prm->cmd->bufflen);
2070 /* Setup packet address segment pointer */
2071 dword_ptr = pkt24->u.status0.dseg_0_address;
2073 /* Set total data segment count */
2075 pkt24->dseg_count = cpu_to_le16(prm->seg_cnt);
2077 if (prm->seg_cnt == 0) {
2078 /* No data transfer */
2084 /* If scatter gather */
2086 /* Load command entry data segments */
2088 (cnt < prm->tgt->datasegs_per_cmd) && prm->seg_cnt;
2089 cnt++, prm->seg_cnt--) {
2091 cpu_to_le32(pci_dma_lo32(sg_dma_address(prm->sg)));
2092 if (enable_64bit_addressing) {
2094 cpu_to_le32(pci_dma_hi32(
2095 sg_dma_address(prm->sg)));
2097 *dword_ptr++ = cpu_to_le32(sg_dma_len(prm->sg));
2099 prm->sg = sg_next(prm->sg);
2102 qlt_load_cont_data_segments(prm, vha);
2105 static inline int qlt_has_data(struct qla_tgt_cmd *cmd)
2107 return cmd->bufflen > 0;
2111 * Called without ha->hardware_lock held
2113 static int qlt_pre_xmit_response(struct qla_tgt_cmd *cmd,
2114 struct qla_tgt_prm *prm, int xmit_type, uint8_t scsi_status,
2115 uint32_t *full_req_cnt)
2117 struct qla_tgt *tgt = cmd->tgt;
2118 struct scsi_qla_host *vha = tgt->vha;
2119 struct qla_hw_data *ha = vha->hw;
2120 struct se_cmd *se_cmd = &cmd->se_cmd;
2124 prm->rq_result = scsi_status;
2125 prm->sense_buffer = &cmd->sense_buffer[0];
2126 prm->sense_buffer_len = TRANSPORT_SENSE_BUFFER;
2130 prm->add_status_pkt = 0;
2132 /* Send marker if required */
2133 if (qlt_issue_marker(vha, 0) != QLA_SUCCESS)
2136 if ((xmit_type & QLA_TGT_XMIT_DATA) && qlt_has_data(cmd)) {
2137 if (qlt_pci_map_calc_cnt(prm) != 0)
2141 *full_req_cnt = prm->req_cnt;
2143 if (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
2144 prm->residual = se_cmd->residual_count;
2145 ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x305c,
2146 "Residual underflow: %d (tag %lld, op %x, bufflen %d, rq_result %x)\n",
2147 prm->residual, se_cmd->tag,
2148 se_cmd->t_task_cdb ? se_cmd->t_task_cdb[0] : 0,
2149 cmd->bufflen, prm->rq_result);
2150 prm->rq_result |= SS_RESIDUAL_UNDER;
2151 } else if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
2152 prm->residual = se_cmd->residual_count;
2153 ql_dbg(ql_dbg_io, vha, 0x305d,
2154 "Residual overflow: %d (tag %lld, op %x, bufflen %d, rq_result %x)\n",
2155 prm->residual, se_cmd->tag, se_cmd->t_task_cdb ?
2156 se_cmd->t_task_cdb[0] : 0, cmd->bufflen, prm->rq_result);
2157 prm->rq_result |= SS_RESIDUAL_OVER;
2160 if (xmit_type & QLA_TGT_XMIT_STATUS) {
2162 * If QLA_TGT_XMIT_DATA is not set, add_status_pkt will be
2163 * ignored in *xmit_response() below
2165 if (qlt_has_data(cmd)) {
2166 if (QLA_TGT_SENSE_VALID(prm->sense_buffer) ||
2167 (IS_FWI2_CAPABLE(ha) &&
2168 (prm->rq_result != 0))) {
2169 prm->add_status_pkt = 1;
2178 static inline int qlt_need_explicit_conf(struct qla_hw_data *ha,
2179 struct qla_tgt_cmd *cmd, int sending_sense)
2181 if (ha->tgt.enable_class_2)
2185 return cmd->conf_compl_supported;
2187 return ha->tgt.enable_explicit_conf &&
2188 cmd->conf_compl_supported;
2191 #ifdef CONFIG_QLA_TGT_DEBUG_SRR
2193 * Original taken from the XFS code
2195 static unsigned long qlt_srr_random(void)
2198 static unsigned long RandomValue;
2199 static DEFINE_SPINLOCK(lock);
2200 /* cycles pseudo-randomly through all values between 1 and 2^31 - 2 */
2204 unsigned long flags;
2206 spin_lock_irqsave(&lock, flags);
2208 RandomValue = jiffies;
2214 rv = 16807 * lo - 2836 * hi;
2218 spin_unlock_irqrestore(&lock, flags);
2222 static void qlt_check_srr_debug(struct qla_tgt_cmd *cmd, int *xmit_type)
2224 #if 0 /* This is not a real status packets lost, so it won't lead to SRR */
2225 if ((*xmit_type & QLA_TGT_XMIT_STATUS) && (qlt_srr_random() % 200)
2227 *xmit_type &= ~QLA_TGT_XMIT_STATUS;
2228 ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf015,
2229 "Dropping cmd %p (tag %d) status", cmd, se_cmd->tag);
2233 * It's currently not possible to simulate SRRs for FCP_WRITE without
2234 * a physical link layer failure, so don't even try here..
2236 if (cmd->dma_data_direction != DMA_FROM_DEVICE)
2239 if (qlt_has_data(cmd) && (cmd->sg_cnt > 1) &&
2240 ((qlt_srr_random() % 100) == 20)) {
2242 unsigned int tot_len = 0;
2245 leave = qlt_srr_random() % cmd->sg_cnt;
2247 for (i = 0; i < leave; i++)
2248 tot_len += cmd->sg[i].length;
2250 ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf016,
2251 "Cutting cmd %p (tag %d) buffer"
2252 " tail to len %d, sg_cnt %d (cmd->bufflen %d,"
2253 " cmd->sg_cnt %d)", cmd, se_cmd->tag, tot_len, leave,
2254 cmd->bufflen, cmd->sg_cnt);
2256 cmd->bufflen = tot_len;
2257 cmd->sg_cnt = leave;
2260 if (qlt_has_data(cmd) && ((qlt_srr_random() % 100) == 70)) {
2261 unsigned int offset = qlt_srr_random() % cmd->bufflen;
2263 ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf017,
2264 "Cutting cmd %p (tag %d) buffer head "
2265 "to offset %d (cmd->bufflen %d)", cmd, se_cmd->tag, offset,
2268 *xmit_type &= ~QLA_TGT_XMIT_DATA;
2269 else if (qlt_set_data_offset(cmd, offset)) {
2270 ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf018,
2271 "qlt_set_data_offset() failed (tag %d)", se_cmd->tag);
2276 static inline void qlt_check_srr_debug(struct qla_tgt_cmd *cmd, int *xmit_type)
2280 static void qlt_24xx_init_ctio_to_isp(struct ctio7_to_24xx *ctio,
2281 struct qla_tgt_prm *prm)
2283 prm->sense_buffer_len = min_t(uint32_t, prm->sense_buffer_len,
2284 (uint32_t)sizeof(ctio->u.status1.sense_data));
2285 ctio->u.status0.flags |= cpu_to_le16(CTIO7_FLAGS_SEND_STATUS);
2286 if (qlt_need_explicit_conf(prm->tgt->ha, prm->cmd, 0)) {
2287 ctio->u.status0.flags |= cpu_to_le16(
2288 CTIO7_FLAGS_EXPLICIT_CONFORM |
2289 CTIO7_FLAGS_CONFORM_REQ);
2291 ctio->u.status0.residual = cpu_to_le32(prm->residual);
2292 ctio->u.status0.scsi_status = cpu_to_le16(prm->rq_result);
2293 if (QLA_TGT_SENSE_VALID(prm->sense_buffer)) {
2296 if (qlt_need_explicit_conf(prm->tgt->ha, prm->cmd, 1)) {
2297 if (prm->cmd->se_cmd.scsi_status != 0) {
2298 ql_dbg(ql_dbg_tgt, prm->cmd->vha, 0xe017,
2299 "Skipping EXPLICIT_CONFORM and "
2300 "CTIO7_FLAGS_CONFORM_REQ for FCP READ w/ "
2301 "non GOOD status\n");
2302 goto skip_explict_conf;
2304 ctio->u.status1.flags |= cpu_to_le16(
2305 CTIO7_FLAGS_EXPLICIT_CONFORM |
2306 CTIO7_FLAGS_CONFORM_REQ);
2309 ctio->u.status1.flags &=
2310 ~cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0);
2311 ctio->u.status1.flags |=
2312 cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1);
2313 ctio->u.status1.scsi_status |=
2314 cpu_to_le16(SS_SENSE_LEN_VALID);
2315 ctio->u.status1.sense_length =
2316 cpu_to_le16(prm->sense_buffer_len);
2317 for (i = 0; i < prm->sense_buffer_len/4; i++)
2318 ((uint32_t *)ctio->u.status1.sense_data)[i] =
2319 cpu_to_be32(((uint32_t *)prm->sense_buffer)[i]);
2321 if (unlikely((prm->sense_buffer_len % 4) != 0)) {
2324 ql_dbg(ql_dbg_tgt, vha, 0xe04f,
2325 "qla_target(%d): %d bytes of sense "
2326 "lost", prm->tgt->ha->vp_idx,
2327 prm->sense_buffer_len % 4);
2333 ctio->u.status1.flags &=
2334 ~cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0);
2335 ctio->u.status1.flags |=
2336 cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1);
2337 ctio->u.status1.sense_length = 0;
2338 memset(ctio->u.status1.sense_data, 0,
2339 sizeof(ctio->u.status1.sense_data));
2342 /* Sense with len > 24, is it possible ??? */
2349 qlt_hba_err_chk_enabled(struct se_cmd *se_cmd)
2352 * Uncomment when corresponding SCSI changes are done.
2354 if (!sp->cmd->prot_chk)
2358 switch (se_cmd->prot_op) {
2359 case TARGET_PROT_DOUT_INSERT:
2360 case TARGET_PROT_DIN_STRIP:
2361 if (ql2xenablehba_err_chk >= 1)
2364 case TARGET_PROT_DOUT_PASS:
2365 case TARGET_PROT_DIN_PASS:
2366 if (ql2xenablehba_err_chk >= 2)
2369 case TARGET_PROT_DIN_INSERT:
2370 case TARGET_PROT_DOUT_STRIP:
2379 * qla24xx_set_t10dif_tags_from_cmd - Extract Ref and App tags from SCSI command
2383 qlt_set_t10dif_tags(struct se_cmd *se_cmd, struct crc_context *ctx)
2385 uint32_t lba = 0xffffffff & se_cmd->t_task_lba;
2387 /* wait til Mode Sense/Select cmd, modepage Ah, subpage 2
2388 * have been immplemented by TCM, before AppTag is avail.
2389 * Look for modesense_handlers[]
2392 ctx->app_tag_mask[0] = 0x0;
2393 ctx->app_tag_mask[1] = 0x0;
2395 switch (se_cmd->prot_type) {
2396 case TARGET_DIF_TYPE0_PROT:
2398 * No check for ql2xenablehba_err_chk, as it would be an
2399 * I/O error if hba tag generation is not done.
2401 ctx->ref_tag = cpu_to_le32(lba);
2403 if (!qlt_hba_err_chk_enabled(se_cmd))
2406 /* enable ALL bytes of the ref tag */
2407 ctx->ref_tag_mask[0] = 0xff;
2408 ctx->ref_tag_mask[1] = 0xff;
2409 ctx->ref_tag_mask[2] = 0xff;
2410 ctx->ref_tag_mask[3] = 0xff;
2413 * For TYpe 1 protection: 16 bit GUARD tag, 32 bit REF tag, and
2416 case TARGET_DIF_TYPE1_PROT:
2417 ctx->ref_tag = cpu_to_le32(lba);
2419 if (!qlt_hba_err_chk_enabled(se_cmd))
2422 /* enable ALL bytes of the ref tag */
2423 ctx->ref_tag_mask[0] = 0xff;
2424 ctx->ref_tag_mask[1] = 0xff;
2425 ctx->ref_tag_mask[2] = 0xff;
2426 ctx->ref_tag_mask[3] = 0xff;
2429 * For TYPE 2 protection: 16 bit GUARD + 32 bit REF tag has to
2430 * match LBA in CDB + N
2432 case TARGET_DIF_TYPE2_PROT:
2433 ctx->ref_tag = cpu_to_le32(lba);
2435 if (!qlt_hba_err_chk_enabled(se_cmd))
2438 /* enable ALL bytes of the ref tag */
2439 ctx->ref_tag_mask[0] = 0xff;
2440 ctx->ref_tag_mask[1] = 0xff;
2441 ctx->ref_tag_mask[2] = 0xff;
2442 ctx->ref_tag_mask[3] = 0xff;
2445 /* For Type 3 protection: 16 bit GUARD only */
2446 case TARGET_DIF_TYPE3_PROT:
2447 ctx->ref_tag_mask[0] = ctx->ref_tag_mask[1] =
2448 ctx->ref_tag_mask[2] = ctx->ref_tag_mask[3] = 0x00;
2455 qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha)
2458 uint32_t transfer_length = 0;
2459 uint32_t data_bytes;
2461 uint8_t bundling = 1;
2463 struct crc_context *crc_ctx_pkt = NULL;
2464 struct qla_hw_data *ha;
2465 struct ctio_crc2_to_fw *pkt;
2466 dma_addr_t crc_ctx_dma;
2467 uint16_t fw_prot_opts = 0;
2468 struct qla_tgt_cmd *cmd = prm->cmd;
2469 struct se_cmd *se_cmd = &cmd->se_cmd;
2471 struct atio_from_isp *atio = &prm->cmd->atio;
2476 pkt = (struct ctio_crc2_to_fw *)vha->req->ring_ptr;
2478 memset(pkt, 0, sizeof(*pkt));
2480 ql_dbg(ql_dbg_tgt, vha, 0xe071,
2481 "qla_target(%d):%s: se_cmd[%p] CRC2 prot_op[0x%x] cmd prot sg:cnt[%p:%x] lba[%llu]\n",
2482 vha->vp_idx, __func__, se_cmd, se_cmd->prot_op,
2483 prm->prot_sg, prm->prot_seg_cnt, se_cmd->t_task_lba);
2485 if ((se_cmd->prot_op == TARGET_PROT_DIN_INSERT) ||
2486 (se_cmd->prot_op == TARGET_PROT_DOUT_STRIP))
2489 /* Compute dif len and adjust data len to incude protection */
2490 data_bytes = cmd->bufflen;
2491 dif_bytes = (data_bytes / cmd->blk_sz) * 8;
2493 switch (se_cmd->prot_op) {
2494 case TARGET_PROT_DIN_INSERT:
2495 case TARGET_PROT_DOUT_STRIP:
2496 transfer_length = data_bytes;
2497 data_bytes += dif_bytes;
2500 case TARGET_PROT_DIN_STRIP:
2501 case TARGET_PROT_DOUT_INSERT:
2502 case TARGET_PROT_DIN_PASS:
2503 case TARGET_PROT_DOUT_PASS:
2504 transfer_length = data_bytes + dif_bytes;
2512 if (!qlt_hba_err_chk_enabled(se_cmd))
2513 fw_prot_opts |= 0x10; /* Disable Guard tag checking */
2514 /* HBA error checking enabled */
2515 else if (IS_PI_UNINIT_CAPABLE(ha)) {
2516 if ((se_cmd->prot_type == TARGET_DIF_TYPE1_PROT) ||
2517 (se_cmd->prot_type == TARGET_DIF_TYPE2_PROT))
2518 fw_prot_opts |= PO_DIS_VALD_APP_ESC;
2519 else if (se_cmd->prot_type == TARGET_DIF_TYPE3_PROT)
2520 fw_prot_opts |= PO_DIS_VALD_APP_REF_ESC;
2523 switch (se_cmd->prot_op) {
2524 case TARGET_PROT_DIN_INSERT:
2525 case TARGET_PROT_DOUT_INSERT:
2526 fw_prot_opts |= PO_MODE_DIF_INSERT;
2528 case TARGET_PROT_DIN_STRIP:
2529 case TARGET_PROT_DOUT_STRIP:
2530 fw_prot_opts |= PO_MODE_DIF_REMOVE;
2532 case TARGET_PROT_DIN_PASS:
2533 case TARGET_PROT_DOUT_PASS:
2534 fw_prot_opts |= PO_MODE_DIF_PASS;
2535 /* FUTURE: does tcm require T10CRC<->IPCKSUM conversion? */
2537 default:/* Normal Request */
2538 fw_prot_opts |= PO_MODE_DIF_PASS;
2544 /* Update entry type to indicate Command Type CRC_2 IOCB */
2545 pkt->entry_type = CTIO_CRC2;
2546 pkt->entry_count = 1;
2547 pkt->vp_index = vha->vp_idx;
2549 h = qlt_make_handle(vha);
2550 if (unlikely(h == QLA_TGT_NULL_HANDLE)) {
2552 * CTIO type 7 from the firmware doesn't provide a way to
2553 * know the initiator's LOOP ID, hence we can't find
2554 * the session and, so, the command.
2558 ha->tgt.cmds[h-1] = prm->cmd;
2561 pkt->handle = h | CTIO_COMPLETION_HANDLE_MARK;
2562 pkt->nport_handle = prm->cmd->loop_id;
2563 pkt->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
2564 pkt->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
2565 pkt->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
2566 pkt->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
2567 pkt->exchange_addr = atio->u.isp24.exchange_addr;
2569 /* silence compile warning */
2570 t16 = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
2571 pkt->ox_id = cpu_to_le16(t16);
2573 t16 = (atio->u.isp24.attr << 9);
2574 pkt->flags |= cpu_to_le16(t16);
2575 pkt->relative_offset = cpu_to_le32(prm->cmd->offset);
2577 /* Set transfer direction */
2578 if (cmd->dma_data_direction == DMA_TO_DEVICE)
2579 pkt->flags = cpu_to_le16(CTIO7_FLAGS_DATA_IN);
2580 else if (cmd->dma_data_direction == DMA_FROM_DEVICE)
2581 pkt->flags = cpu_to_le16(CTIO7_FLAGS_DATA_OUT);
2584 pkt->dseg_count = prm->tot_dsds;
2585 /* Fibre channel byte count */
2586 pkt->transfer_length = cpu_to_le32(transfer_length);
2589 /* ----- CRC context -------- */
2591 /* Allocate CRC context from global pool */
2592 crc_ctx_pkt = cmd->ctx =
2593 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma);
2596 goto crc_queuing_error;
2598 /* Zero out CTX area. */
2599 clr_ptr = (uint8_t *)crc_ctx_pkt;
2600 memset(clr_ptr, 0, sizeof(*crc_ctx_pkt));
2602 crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma;
2603 INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list);
2606 crc_ctx_pkt->handle = pkt->handle;
2608 qlt_set_t10dif_tags(se_cmd, crc_ctx_pkt);
2610 pkt->crc_context_address[0] = cpu_to_le32(LSD(crc_ctx_dma));
2611 pkt->crc_context_address[1] = cpu_to_le32(MSD(crc_ctx_dma));
2612 pkt->crc_context_len = CRC_CONTEXT_LEN_FW;
2616 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.nobundling.data_address;
2619 * Configure Bundling if we need to fetch interlaving
2620 * protection PCI accesses
2622 fw_prot_opts |= PO_ENABLE_DIF_BUNDLING;
2623 crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes);
2624 crc_ctx_pkt->u.bundling.dseg_count =
2625 cpu_to_le16(prm->tot_dsds - prm->prot_seg_cnt);
2626 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.data_address;
2629 /* Finish the common fields of CRC pkt */
2630 crc_ctx_pkt->blk_size = cpu_to_le16(cmd->blk_sz);
2631 crc_ctx_pkt->prot_opts = cpu_to_le16(fw_prot_opts);
2632 crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes);
2633 crc_ctx_pkt->guard_seed = cpu_to_le16(0);
2636 /* Walks data segments */
2637 pkt->flags |= cpu_to_le16(CTIO7_FLAGS_DSD_PTR);
2639 if (!bundling && prm->prot_seg_cnt) {
2640 if (qla24xx_walk_and_build_sglist_no_difb(ha, NULL, cur_dsd,
2641 prm->tot_dsds, cmd))
2642 goto crc_queuing_error;
2643 } else if (qla24xx_walk_and_build_sglist(ha, NULL, cur_dsd,
2644 (prm->tot_dsds - prm->prot_seg_cnt), cmd))
2645 goto crc_queuing_error;
2647 if (bundling && prm->prot_seg_cnt) {
2648 /* Walks dif segments */
2649 pkt->add_flags |= CTIO_CRC2_AF_DIF_DSD_ENA;
2651 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address;
2652 if (qla24xx_walk_and_build_prot_sglist(ha, NULL, cur_dsd,
2653 prm->prot_seg_cnt, cmd))
2654 goto crc_queuing_error;
2659 /* Cleanup will be performed by the caller */
2661 return QLA_FUNCTION_FAILED;
2666 * Callback to setup response of xmit_type of QLA_TGT_XMIT_DATA and *
2667 * QLA_TGT_XMIT_STATUS for >= 24xx silicon
2669 int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
2670 uint8_t scsi_status)
2672 struct scsi_qla_host *vha = cmd->vha;
2673 struct qla_hw_data *ha = vha->hw;
2674 struct ctio7_to_24xx *pkt;
2675 struct qla_tgt_prm prm;
2676 uint32_t full_req_cnt = 0;
2677 unsigned long flags = 0;
2680 spin_lock_irqsave(&ha->hardware_lock, flags);
2681 if (cmd->sess && cmd->sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
2682 cmd->state = QLA_TGT_STATE_PROCESSED;
2683 if (cmd->sess->logout_completed)
2684 /* no need to terminate. FW already freed exchange. */
2685 qlt_abort_cmd_on_host_reset(cmd->vha, cmd);
2687 qlt_send_term_exchange(vha, cmd, &cmd->atio, 1, 0);
2688 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2691 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2693 memset(&prm, 0, sizeof(prm));
2694 qlt_check_srr_debug(cmd, &xmit_type);
2696 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe018,
2697 "is_send_status=%d, cmd->bufflen=%d, cmd->sg_cnt=%d, cmd->dma_data_direction=%d se_cmd[%p]\n",
2698 (xmit_type & QLA_TGT_XMIT_STATUS) ?
2699 1 : 0, cmd->bufflen, cmd->sg_cnt, cmd->dma_data_direction,
2702 res = qlt_pre_xmit_response(cmd, &prm, xmit_type, scsi_status,
2704 if (unlikely(res != 0)) {
2708 spin_lock_irqsave(&ha->hardware_lock, flags);
2710 if (xmit_type == QLA_TGT_XMIT_STATUS)
2711 vha->tgt_counters.core_qla_snd_status++;
2713 vha->tgt_counters.core_qla_que_buf++;
2715 if (!vha->flags.online || cmd->reset_count != ha->chip_reset) {
2717 * Either the port is not online or this request was from
2718 * previous life, just abort the processing.
2720 cmd->state = QLA_TGT_STATE_PROCESSED;
2721 qlt_abort_cmd_on_host_reset(cmd->vha, cmd);
2722 ql_dbg(ql_dbg_async, vha, 0xe101,
2723 "RESET-RSP online/active/old-count/new-count = %d/%d/%d/%d.\n",
2724 vha->flags.online, qla2x00_reset_active(vha),
2725 cmd->reset_count, ha->chip_reset);
2726 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2730 /* Does F/W have an IOCBs for this request */
2731 res = qlt_check_reserve_free_req(vha, full_req_cnt);
2733 goto out_unmap_unlock;
2735 if (cmd->se_cmd.prot_op && (xmit_type & QLA_TGT_XMIT_DATA))
2736 res = qlt_build_ctio_crc2_pkt(&prm, vha);
2738 res = qlt_24xx_build_ctio_pkt(&prm, vha);
2739 if (unlikely(res != 0)) {
2740 vha->req->cnt += full_req_cnt;
2741 goto out_unmap_unlock;
2744 pkt = (struct ctio7_to_24xx *)prm.pkt;
2746 if (qlt_has_data(cmd) && (xmit_type & QLA_TGT_XMIT_DATA)) {
2747 pkt->u.status0.flags |=
2748 cpu_to_le16(CTIO7_FLAGS_DATA_IN |
2749 CTIO7_FLAGS_STATUS_MODE_0);
2751 if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL)
2752 qlt_load_data_segments(&prm, vha);
2754 if (prm.add_status_pkt == 0) {
2755 if (xmit_type & QLA_TGT_XMIT_STATUS) {
2756 pkt->u.status0.scsi_status =
2757 cpu_to_le16(prm.rq_result);
2758 pkt->u.status0.residual =
2759 cpu_to_le32(prm.residual);
2760 pkt->u.status0.flags |= cpu_to_le16(
2761 CTIO7_FLAGS_SEND_STATUS);
2762 if (qlt_need_explicit_conf(ha, cmd, 0)) {
2763 pkt->u.status0.flags |=
2765 CTIO7_FLAGS_EXPLICIT_CONFORM |
2766 CTIO7_FLAGS_CONFORM_REQ);
2772 * We have already made sure that there is sufficient
2773 * amount of request entries to not drop HW lock in
2776 struct ctio7_to_24xx *ctio =
2777 (struct ctio7_to_24xx *)qlt_get_req_pkt(vha);
2779 ql_dbg(ql_dbg_io, vha, 0x305e,
2780 "Building additional status packet 0x%p.\n",
2784 * T10Dif: ctio_crc2_to_fw overlay ontop of
2787 memcpy(ctio, pkt, sizeof(*ctio));
2788 /* reset back to CTIO7 */
2789 ctio->entry_count = 1;
2790 ctio->entry_type = CTIO_TYPE7;
2791 ctio->dseg_count = 0;
2792 ctio->u.status1.flags &= ~cpu_to_le16(
2793 CTIO7_FLAGS_DATA_IN);
2795 /* Real finish is ctio_m1's finish */
2796 pkt->handle |= CTIO_INTERMEDIATE_HANDLE_MARK;
2797 pkt->u.status0.flags |= cpu_to_le16(
2798 CTIO7_FLAGS_DONT_RET_CTIO);
2800 /* qlt_24xx_init_ctio_to_isp will correct
2801 * all neccessary fields that's part of CTIO7.
2802 * There should be no residual of CTIO-CRC2 data.
2804 qlt_24xx_init_ctio_to_isp((struct ctio7_to_24xx *)ctio,
2806 pr_debug("Status CTIO7: %p\n", ctio);
2809 qlt_24xx_init_ctio_to_isp(pkt, &prm);
2812 cmd->state = QLA_TGT_STATE_PROCESSED; /* Mid-level is done processing */
2813 cmd->cmd_sent_to_fw = 1;
2815 /* Memory Barrier */
2817 qla2x00_start_iocbs(vha, vha->req);
2818 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2823 qlt_unmap_sg(vha, cmd);
2824 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2828 EXPORT_SYMBOL(qlt_xmit_response);
2830 int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd)
2832 struct ctio7_to_24xx *pkt;
2833 struct scsi_qla_host *vha = cmd->vha;
2834 struct qla_hw_data *ha = vha->hw;
2835 struct qla_tgt *tgt = cmd->tgt;
2836 struct qla_tgt_prm prm;
2837 unsigned long flags;
2840 memset(&prm, 0, sizeof(prm));
2846 /* Send marker if required */
2847 if (qlt_issue_marker(vha, 0) != QLA_SUCCESS)
2850 /* Calculate number of entries and segments required */
2851 if (qlt_pci_map_calc_cnt(&prm) != 0)
2854 spin_lock_irqsave(&ha->hardware_lock, flags);
2856 if (!vha->flags.online || (cmd->reset_count != ha->chip_reset) ||
2857 (cmd->sess && cmd->sess->deleted == QLA_SESS_DELETION_IN_PROGRESS)) {
2859 * Either the port is not online or this request was from
2860 * previous life, just abort the processing.
2862 cmd->state = QLA_TGT_STATE_NEED_DATA;
2863 qlt_abort_cmd_on_host_reset(cmd->vha, cmd);
2864 ql_dbg(ql_dbg_async, vha, 0xe102,
2865 "RESET-XFR online/active/old-count/new-count = %d/%d/%d/%d.\n",
2866 vha->flags.online, qla2x00_reset_active(vha),
2867 cmd->reset_count, ha->chip_reset);
2868 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2872 /* Does F/W have an IOCBs for this request */
2873 res = qlt_check_reserve_free_req(vha, prm.req_cnt);
2875 goto out_unlock_free_unmap;
2876 if (cmd->se_cmd.prot_op)
2877 res = qlt_build_ctio_crc2_pkt(&prm, vha);
2879 res = qlt_24xx_build_ctio_pkt(&prm, vha);
2881 if (unlikely(res != 0)) {
2882 vha->req->cnt += prm.req_cnt;
2883 goto out_unlock_free_unmap;
2886 pkt = (struct ctio7_to_24xx *)prm.pkt;
2887 pkt->u.status0.flags |= cpu_to_le16(CTIO7_FLAGS_DATA_OUT |
2888 CTIO7_FLAGS_STATUS_MODE_0);
2890 if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL)
2891 qlt_load_data_segments(&prm, vha);
2893 cmd->state = QLA_TGT_STATE_NEED_DATA;
2894 cmd->cmd_sent_to_fw = 1;
2896 /* Memory Barrier */
2898 qla2x00_start_iocbs(vha, vha->req);
2899 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2903 out_unlock_free_unmap:
2904 qlt_unmap_sg(vha, cmd);
2905 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2909 EXPORT_SYMBOL(qlt_rdy_to_xfer);
2913 * Checks the guard or meta-data for the type of error
2914 * detected by the HBA.
2917 qlt_handle_dif_error(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd,
2918 struct ctio_crc_from_fw *sts)
2920 uint8_t *ap = &sts->actual_dif[0];
2921 uint8_t *ep = &sts->expected_dif[0];
2922 uint32_t e_ref_tag, a_ref_tag;
2923 uint16_t e_app_tag, a_app_tag;
2924 uint16_t e_guard, a_guard;
2925 uint64_t lba = cmd->se_cmd.t_task_lba;
2927 a_guard = be16_to_cpu(*(uint16_t *)(ap + 0));
2928 a_app_tag = be16_to_cpu(*(uint16_t *)(ap + 2));
2929 a_ref_tag = be32_to_cpu(*(uint32_t *)(ap + 4));
2931 e_guard = be16_to_cpu(*(uint16_t *)(ep + 0));
2932 e_app_tag = be16_to_cpu(*(uint16_t *)(ep + 2));
2933 e_ref_tag = be32_to_cpu(*(uint32_t *)(ep + 4));
2935 ql_dbg(ql_dbg_tgt, vha, 0xe075,
2936 "iocb(s) %p Returned STATUS.\n", sts);
2938 ql_dbg(ql_dbg_tgt, vha, 0xf075,
2939 "dif check TGT cdb 0x%x lba 0x%llx: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x]\n",
2940 cmd->atio.u.isp24.fcp_cmnd.cdb[0], lba,
2941 a_ref_tag, e_ref_tag, a_app_tag, e_app_tag, a_guard, e_guard);
2945 * For type 3: ref & app tag is all 'f's
2946 * For type 0,1,2: app tag is all 'f's
2948 if ((a_app_tag == 0xffff) &&
2949 ((cmd->se_cmd.prot_type != TARGET_DIF_TYPE3_PROT) ||
2950 (a_ref_tag == 0xffffffff))) {
2951 uint32_t blocks_done;
2953 /* 2TB boundary case covered automatically with this */
2954 blocks_done = e_ref_tag - (uint32_t)lba + 1;
2955 cmd->se_cmd.bad_sector = e_ref_tag;
2956 cmd->se_cmd.pi_err = 0;
2957 ql_dbg(ql_dbg_tgt, vha, 0xf074,
2958 "need to return scsi good\n");
2960 /* Update protection tag */
2961 if (cmd->prot_sg_cnt) {
2962 uint32_t i, k = 0, num_ent;
2963 struct scatterlist *sg, *sgl;
2968 /* Patch the corresponding protection tags */
2969 for_each_sg(sgl, sg, cmd->prot_sg_cnt, i) {
2970 num_ent = sg_dma_len(sg) / 8;
2971 if (k + num_ent < blocks_done) {
2979 if (k != blocks_done) {
2980 ql_log(ql_log_warn, vha, 0xf076,
2981 "unexpected tag values tag:lba=%u:%llu)\n",
2982 e_ref_tag, (unsigned long long)lba);
2987 struct sd_dif_tuple *spt;
2989 * This section came from initiator. Is it valid here?
2990 * should ulp be override with actual val???
2992 spt = page_address(sg_page(sg)) + sg->offset;
2995 spt->app_tag = 0xffff;
2996 if (cmd->se_cmd.prot_type == SCSI_PROT_DIF_TYPE3)
2997 spt->ref_tag = 0xffffffff;
3005 if (e_guard != a_guard) {
3006 cmd->se_cmd.pi_err = TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED;
3007 cmd->se_cmd.bad_sector = cmd->se_cmd.t_task_lba;
3009 ql_log(ql_log_warn, vha, 0xe076,
3010 "Guard ERR: cdb 0x%x lba 0x%llx: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x] cmd=%p\n",
3011 cmd->atio.u.isp24.fcp_cmnd.cdb[0], lba,
3012 a_ref_tag, e_ref_tag, a_app_tag, e_app_tag,
3013 a_guard, e_guard, cmd);
3018 if (e_ref_tag != a_ref_tag) {
3019 cmd->se_cmd.pi_err = TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED;
3020 cmd->se_cmd.bad_sector = e_ref_tag;
3022 ql_log(ql_log_warn, vha, 0xe077,
3023 "Ref Tag ERR: cdb 0x%x lba 0x%llx: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x] cmd=%p\n",
3024 cmd->atio.u.isp24.fcp_cmnd.cdb[0], lba,
3025 a_ref_tag, e_ref_tag, a_app_tag, e_app_tag,
3026 a_guard, e_guard, cmd);
3030 /* check appl tag */
3031 if (e_app_tag != a_app_tag) {
3032 cmd->se_cmd.pi_err = TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED;
3033 cmd->se_cmd.bad_sector = cmd->se_cmd.t_task_lba;
3035 ql_log(ql_log_warn, vha, 0xe078,
3036 "App Tag ERR: cdb 0x%x lba 0x%llx: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x] cmd=%p\n",
3037 cmd->atio.u.isp24.fcp_cmnd.cdb[0], lba,
3038 a_ref_tag, e_ref_tag, a_app_tag, e_app_tag,
3039 a_guard, e_guard, cmd);
3047 /* If hardware_lock held on entry, might drop it, then reaquire */
3048 /* This function sends the appropriate CTIO to ISP 2xxx or 24xx */
3049 static int __qlt_send_term_imm_notif(struct scsi_qla_host *vha,
3050 struct imm_ntfy_from_isp *ntfy)
3052 struct nack_to_isp *nack;
3053 struct qla_hw_data *ha = vha->hw;
3057 ql_dbg(ql_dbg_tgt_tmr, vha, 0xe01c,
3058 "Sending TERM ELS CTIO (ha=%p)\n", ha);
3060 pkt = (request_t *)qla2x00_alloc_iocbs_ready(vha, NULL);
3062 ql_dbg(ql_dbg_tgt, vha, 0xe080,
3063 "qla_target(%d): %s failed: unable to allocate "
3064 "request packet\n", vha->vp_idx, __func__);
3068 pkt->entry_type = NOTIFY_ACK_TYPE;
3069 pkt->entry_count = 1;
3070 pkt->handle = QLA_TGT_SKIP_HANDLE;
3072 nack = (struct nack_to_isp *)pkt;
3073 nack->ox_id = ntfy->ox_id;
3075 nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle;
3076 if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) {
3077 nack->u.isp24.flags = ntfy->u.isp24.flags &
3078 __constant_cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB);
3082 nack->u.isp24.flags |=
3083 __constant_cpu_to_le16(NOTIFY_ACK_FLAGS_TERMINATE);
3085 nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id;
3086 nack->u.isp24.status = ntfy->u.isp24.status;
3087 nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode;
3088 nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle;
3089 nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address;
3090 nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs;
3091 nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui;
3092 nack->u.isp24.vp_index = ntfy->u.isp24.vp_index;
3094 qla2x00_start_iocbs(vha, vha->req);
3098 static void qlt_send_term_imm_notif(struct scsi_qla_host *vha,
3099 struct imm_ntfy_from_isp *imm, int ha_locked)
3101 unsigned long flags = 0;
3104 if (qlt_issue_marker(vha, ha_locked) < 0)
3108 rc = __qlt_send_term_imm_notif(vha, imm);
3112 qlt_alloc_qfull_cmd(vha, imm, 0, 0);
3117 spin_lock_irqsave(&vha->hw->hardware_lock, flags);
3118 rc = __qlt_send_term_imm_notif(vha, imm);
3122 qlt_alloc_qfull_cmd(vha, imm, 0, 0);
3127 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
3130 /* If hardware_lock held on entry, might drop it, then reaquire */
3131 /* This function sends the appropriate CTIO to ISP 2xxx or 24xx */
3132 static int __qlt_send_term_exchange(struct scsi_qla_host *vha,
3133 struct qla_tgt_cmd *cmd,
3134 struct atio_from_isp *atio)
3136 struct ctio7_to_24xx *ctio24;
3137 struct qla_hw_data *ha = vha->hw;
3142 ql_dbg(ql_dbg_tgt, vha, 0xe01c, "Sending TERM EXCH CTIO (ha=%p)\n", ha);
3144 pkt = (request_t *)qla2x00_alloc_iocbs_ready(vha, NULL);
3146 ql_dbg(ql_dbg_tgt, vha, 0xe050,
3147 "qla_target(%d): %s failed: unable to allocate "
3148 "request packet\n", vha->vp_idx, __func__);
3153 if (cmd->state < QLA_TGT_STATE_PROCESSED) {
3154 ql_dbg(ql_dbg_tgt, vha, 0xe051,
3155 "qla_target(%d): Terminating cmd %p with "
3156 "incorrect state %d\n", vha->vp_idx, cmd,
3162 vha->tgt_counters.num_term_xchg_sent++;
3163 pkt->entry_count = 1;
3164 pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
3166 ctio24 = (struct ctio7_to_24xx *)pkt;
3167 ctio24->entry_type = CTIO_TYPE7;
3168 ctio24->nport_handle = CTIO7_NHANDLE_UNRECOGNIZED;
3169 ctio24->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
3170 ctio24->vp_index = vha->vp_idx;
3171 ctio24->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
3172 ctio24->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
3173 ctio24->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
3174 ctio24->exchange_addr = atio->u.isp24.exchange_addr;
3175 ctio24->u.status1.flags = (atio->u.isp24.attr << 9) |
3176 cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 |
3177 CTIO7_FLAGS_TERMINATE);
3178 temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
3179 ctio24->u.status1.ox_id = cpu_to_le16(temp);
3181 /* Most likely, it isn't needed */
3182 ctio24->u.status1.residual = get_unaligned((uint32_t *)
3183 &atio->u.isp24.fcp_cmnd.add_cdb[
3184 atio->u.isp24.fcp_cmnd.add_cdb_len]);
3185 if (ctio24->u.status1.residual != 0)
3186 ctio24->u.status1.scsi_status |= SS_RESIDUAL_UNDER;
3188 /* Memory Barrier */
3190 qla2x00_start_iocbs(vha, vha->req);
3194 static void qlt_send_term_exchange(struct scsi_qla_host *vha,
3195 struct qla_tgt_cmd *cmd, struct atio_from_isp *atio, int ha_locked,
3198 unsigned long flags = 0;
3201 if (qlt_issue_marker(vha, ha_locked) < 0)
3205 rc = __qlt_send_term_exchange(vha, cmd, atio);
3207 qlt_alloc_qfull_cmd(vha, atio, 0, 0);
3210 spin_lock_irqsave(&vha->hw->hardware_lock, flags);
3211 rc = __qlt_send_term_exchange(vha, cmd, atio);
3213 qlt_alloc_qfull_cmd(vha, atio, 0, 0);
3216 if (cmd && !ul_abort && !cmd->aborted) {
3218 qlt_unmap_sg(vha, cmd);
3219 vha->hw->tgt.tgt_ops->free_cmd(cmd);
3223 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
3228 static void qlt_init_term_exchange(struct scsi_qla_host *vha)
3230 struct list_head free_list;
3231 struct qla_tgt_cmd *cmd, *tcmd;
3233 vha->hw->tgt.leak_exchg_thresh_hold =
3234 (vha->hw->cur_fw_xcb_count/100) * LEAK_EXCHG_THRESH_HOLD_PERCENT;
3237 if (!list_empty(&vha->hw->tgt.q_full_list)) {
3238 INIT_LIST_HEAD(&free_list);
3239 list_splice_init(&vha->hw->tgt.q_full_list, &free_list);
3241 list_for_each_entry_safe(cmd, tcmd, &free_list, cmd_list) {
3242 list_del(&cmd->cmd_list);
3243 /* This cmd was never sent to TCM. There is no need
3244 * to schedule free or call free_cmd
3247 vha->hw->tgt.num_qfull_cmds_alloc--;
3250 vha->hw->tgt.num_qfull_cmds_dropped = 0;
3253 static void qlt_chk_exch_leak_thresh_hold(struct scsi_qla_host *vha)
3255 uint32_t total_leaked;
3257 total_leaked = vha->hw->tgt.num_qfull_cmds_dropped;
3259 if (vha->hw->tgt.leak_exchg_thresh_hold &&
3260 (total_leaked > vha->hw->tgt.leak_exchg_thresh_hold)) {
3262 ql_dbg(ql_dbg_tgt, vha, 0xe079,
3263 "Chip reset due to exchange starvation: %d/%d.\n",
3264 total_leaked, vha->hw->cur_fw_xcb_count);
3266 if (IS_P3P_TYPE(vha->hw))
3267 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
3269 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
3270 qla2xxx_wake_dpc(vha);
3275 int qlt_abort_cmd(struct qla_tgt_cmd *cmd)
3277 struct qla_tgt *tgt = cmd->tgt;
3278 struct scsi_qla_host *vha = tgt->vha;
3279 struct se_cmd *se_cmd = &cmd->se_cmd;
3280 unsigned long flags;
3282 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf014,
3283 "qla_target(%d): terminating exchange for aborted cmd=%p "
3284 "(se_cmd=%p, tag=%llu)", vha->vp_idx, cmd, &cmd->se_cmd,
3287 spin_lock_irqsave(&cmd->cmd_lock, flags);
3289 spin_unlock_irqrestore(&cmd->cmd_lock, flags);
3291 * It's normal to see 2 calls in this path:
3292 * 1) XFER Rdy completion + CMD_T_ABORT
3293 * 2) TCM TMR - drain_state_list
3295 ql_dbg(ql_dbg_tgt_mgt, vha, 0xffff,
3296 "multiple abort. %p transport_state %x, t_state %x,"
3297 " se_cmd_flags %x \n", cmd, cmd->se_cmd.transport_state,
3298 cmd->se_cmd.t_state,cmd->se_cmd.se_cmd_flags);
3302 cmd->cmd_flags |= BIT_6;
3303 spin_unlock_irqrestore(&cmd->cmd_lock, flags);
3305 qlt_send_term_exchange(vha, cmd, &cmd->atio, 0, 1);
3308 EXPORT_SYMBOL(qlt_abort_cmd);
3310 void qlt_free_cmd(struct qla_tgt_cmd *cmd)
3312 struct qla_tgt_sess *sess = cmd->sess;
3314 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe074,
3315 "%s: se_cmd[%p] ox_id %04x\n",
3316 __func__, &cmd->se_cmd,
3317 be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id));
3319 BUG_ON(cmd->cmd_in_wq);
3322 qlt_unmap_sg(cmd->vha, cmd);
3325 qlt_decr_num_pend_cmds(cmd->vha);
3327 BUG_ON(cmd->sg_mapped);
3328 cmd->jiffies_at_free = get_jiffies_64();
3329 if (unlikely(cmd->free_sg))
3332 if (!sess || !sess->se_sess) {
3336 cmd->jiffies_at_free = get_jiffies_64();
3337 percpu_ida_free(&sess->se_sess->sess_tag_pool, cmd->se_cmd.map_tag);
3339 EXPORT_SYMBOL(qlt_free_cmd);
3341 /* ha->hardware_lock supposed to be held on entry */
3342 static int qlt_prepare_srr_ctio(struct scsi_qla_host *vha,
3343 struct qla_tgt_cmd *cmd, void *ctio)
3345 struct qla_tgt_srr_ctio *sc;
3346 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
3347 struct qla_tgt_srr_imm *imm;
3350 cmd->cmd_flags |= BIT_15;
3352 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf019,
3353 "qla_target(%d): CTIO with SRR status received\n", vha->vp_idx);
3356 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf055,
3357 "qla_target(%d): SRR CTIO, but ctio is NULL\n",
3362 sc = kzalloc(sizeof(*sc), GFP_ATOMIC);
3365 /* IRQ is already OFF */
3366 spin_lock(&tgt->srr_lock);
3367 sc->srr_id = tgt->ctio_srr_id;
3368 list_add_tail(&sc->srr_list_entry,
3369 &tgt->srr_ctio_list);
3370 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01a,
3371 "CTIO SRR %p added (id %d)\n", sc, sc->srr_id);
3372 if (tgt->imm_srr_id == tgt->ctio_srr_id) {
3374 list_for_each_entry(imm, &tgt->srr_imm_list,
3376 if (imm->srr_id == sc->srr_id) {
3382 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01b,
3383 "Scheduling srr work\n");
3384 schedule_work(&tgt->srr_work);
3386 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf056,
3387 "qla_target(%d): imm_srr_id "
3388 "== ctio_srr_id (%d), but there is no "
3389 "corresponding SRR IMM, deleting CTIO "
3390 "SRR %p\n", vha->vp_idx,
3391 tgt->ctio_srr_id, sc);
3392 list_del(&sc->srr_list_entry);
3393 spin_unlock(&tgt->srr_lock);
3399 spin_unlock(&tgt->srr_lock);
3401 struct qla_tgt_srr_imm *ti;
3403 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf057,
3404 "qla_target(%d): Unable to allocate SRR CTIO entry\n",
3406 spin_lock(&tgt->srr_lock);
3407 list_for_each_entry_safe(imm, ti, &tgt->srr_imm_list,
3409 if (imm->srr_id == tgt->ctio_srr_id) {
3410 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01c,
3411 "IMM SRR %p deleted (id %d)\n",
3413 list_del(&imm->srr_list_entry);
3414 qlt_reject_free_srr_imm(vha, imm, 1);
3417 spin_unlock(&tgt->srr_lock);
3426 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
3428 static int qlt_term_ctio_exchange(struct scsi_qla_host *vha, void *ctio,
3429 struct qla_tgt_cmd *cmd, uint32_t status)
3434 struct ctio7_from_24xx *c = (struct ctio7_from_24xx *)ctio;
3436 cpu_to_le16(OF_TERM_EXCH));
3441 qlt_send_term_exchange(vha, cmd, &cmd->atio, 1, 0);
3446 /* ha->hardware_lock supposed to be held on entry */
3447 static inline struct qla_tgt_cmd *qlt_get_cmd(struct scsi_qla_host *vha,
3450 struct qla_hw_data *ha = vha->hw;
3453 if (ha->tgt.cmds[handle] != NULL) {
3454 struct qla_tgt_cmd *cmd = ha->tgt.cmds[handle];
3455 ha->tgt.cmds[handle] = NULL;
3461 /* ha->hardware_lock supposed to be held on entry */
3462 static struct qla_tgt_cmd *qlt_ctio_to_cmd(struct scsi_qla_host *vha,
3463 uint32_t handle, void *ctio)
3465 struct qla_tgt_cmd *cmd = NULL;
3467 /* Clear out internal marks */
3468 handle &= ~(CTIO_COMPLETION_HANDLE_MARK |
3469 CTIO_INTERMEDIATE_HANDLE_MARK);
3471 if (handle != QLA_TGT_NULL_HANDLE) {
3472 if (unlikely(handle == QLA_TGT_SKIP_HANDLE))
3475 /* handle-1 is actually used */
3476 if (unlikely(handle > DEFAULT_OUTSTANDING_COMMANDS)) {
3477 ql_dbg(ql_dbg_tgt, vha, 0xe052,
3478 "qla_target(%d): Wrong handle %x received\n",
3479 vha->vp_idx, handle);
3482 cmd = qlt_get_cmd(vha, handle);
3483 if (unlikely(cmd == NULL)) {
3484 ql_dbg(ql_dbg_tgt, vha, 0xe053,
3485 "qla_target(%d): Suspicious: unable to "
3486 "find the command with handle %x\n", vha->vp_idx,
3490 } else if (ctio != NULL) {
3491 /* We can't get loop ID from CTIO7 */
3492 ql_dbg(ql_dbg_tgt, vha, 0xe054,
3493 "qla_target(%d): Wrong CTIO received: QLA24xx doesn't "
3494 "support NULL handles\n", vha->vp_idx);
3501 /* hardware_lock should be held by caller. */
3503 qlt_abort_cmd_on_host_reset(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd)
3505 struct qla_hw_data *ha = vha->hw;
3509 qlt_unmap_sg(vha, cmd);
3511 handle = qlt_make_handle(vha);
3513 /* TODO: fix debug message type and ids. */
3514 if (cmd->state == QLA_TGT_STATE_PROCESSED) {
3515 ql_dbg(ql_dbg_io, vha, 0xff00,
3516 "HOST-ABORT: handle=%d, state=PROCESSED.\n", handle);
3517 } else if (cmd->state == QLA_TGT_STATE_NEED_DATA) {
3518 cmd->write_data_transferred = 0;
3519 cmd->state = QLA_TGT_STATE_DATA_IN;
3521 ql_dbg(ql_dbg_io, vha, 0xff01,
3522 "HOST-ABORT: handle=%d, state=DATA_IN.\n", handle);
3524 ha->tgt.tgt_ops->handle_data(cmd);
3527 ql_dbg(ql_dbg_io, vha, 0xff03,
3528 "HOST-ABORT: handle=%d, state=BAD(%d).\n", handle,
3533 cmd->cmd_flags |= BIT_17;
3534 ha->tgt.tgt_ops->free_cmd(cmd);
3538 qlt_host_reset_handler(struct qla_hw_data *ha)
3540 struct qla_tgt_cmd *cmd;
3541 unsigned long flags;
3542 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
3543 scsi_qla_host_t *vha = NULL;
3544 struct qla_tgt *tgt = base_vha->vha_tgt.qla_tgt;
3547 if (!base_vha->hw->tgt.tgt_ops)
3550 if (!tgt || qla_ini_mode_enabled(base_vha)) {
3551 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf003,
3552 "Target mode disabled\n");
3556 ql_dbg(ql_dbg_tgt_mgt, vha, 0xff10,
3557 "HOST-ABORT-HNDLR: base_vha->dpc_flags=%lx.\n",
3558 base_vha->dpc_flags);
3560 spin_lock_irqsave(&ha->hardware_lock, flags);
3561 for (i = 1; i < DEFAULT_OUTSTANDING_COMMANDS + 1; i++) {
3562 cmd = qlt_get_cmd(base_vha, i);
3565 /* ha->tgt.cmds entry is cleared by qlt_get_cmd. */
3567 qlt_abort_cmd_on_host_reset(vha, cmd);
3569 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3574 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
3576 static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle,
3577 uint32_t status, void *ctio)
3579 struct qla_hw_data *ha = vha->hw;
3580 struct se_cmd *se_cmd;
3581 struct qla_tgt_cmd *cmd;
3583 if (handle & CTIO_INTERMEDIATE_HANDLE_MARK) {
3584 /* That could happen only in case of an error/reset/abort */
3585 if (status != CTIO_SUCCESS) {
3586 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01d,
3587 "Intermediate CTIO received"
3588 " (status %x)\n", status);
3593 cmd = qlt_ctio_to_cmd(vha, handle, ctio);
3597 se_cmd = &cmd->se_cmd;
3598 cmd->cmd_sent_to_fw = 0;
3600 qlt_unmap_sg(vha, cmd);
3602 if (unlikely(status != CTIO_SUCCESS)) {
3603 switch (status & 0xFFFF) {
3604 case CTIO_LIP_RESET:
3605 case CTIO_TARGET_RESET:
3607 /* driver request abort via Terminate exchange */
3609 case CTIO_INVALID_RX_ID:
3611 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf058,
3612 "qla_target(%d): CTIO with "
3613 "status %#x received, state %x, se_cmd %p, "
3614 "(LIP_RESET=e, ABORTED=2, TARGET_RESET=17, "
3615 "TIMEOUT=b, INVALID_RX_ID=8)\n", vha->vp_idx,
3616 status, cmd->state, se_cmd);
3619 case CTIO_PORT_LOGGED_OUT:
3620 case CTIO_PORT_UNAVAILABLE:
3623 (status & 0xFFFF) == CTIO_PORT_LOGGED_OUT;
3625 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf059,
3626 "qla_target(%d): CTIO with %s status %x "
3627 "received (state %x, se_cmd %p)\n", vha->vp_idx,
3628 logged_out ? "PORT LOGGED OUT" : "PORT UNAVAILABLE",
3629 status, cmd->state, se_cmd);
3631 if (logged_out && cmd->sess) {
3633 * Session is already logged out, but we need
3634 * to notify initiator, who's not aware of this
3636 cmd->sess->logout_on_delete = 0;
3637 cmd->sess->send_els_logo = 1;
3638 qlt_schedule_sess_for_deletion(cmd->sess, true);
3642 case CTIO_SRR_RECEIVED:
3643 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05a,
3644 "qla_target(%d): CTIO with SRR_RECEIVED"
3645 " status %x received (state %x, se_cmd %p)\n",
3646 vha->vp_idx, status, cmd->state, se_cmd);
3647 if (qlt_prepare_srr_ctio(vha, cmd, ctio) != 0)
3652 case CTIO_DIF_ERROR: {
3653 struct ctio_crc_from_fw *crc =
3654 (struct ctio_crc_from_fw *)ctio;
3655 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf073,
3656 "qla_target(%d): CTIO with DIF_ERROR status %x received (state %x, se_cmd %p) actual_dif[0x%llx] expect_dif[0x%llx]\n",
3657 vha->vp_idx, status, cmd->state, se_cmd,
3658 *((u64 *)&crc->actual_dif[0]),
3659 *((u64 *)&crc->expected_dif[0]));
3661 if (qlt_handle_dif_error(vha, cmd, ctio)) {
3662 if (cmd->state == QLA_TGT_STATE_NEED_DATA) {
3663 /* scsi Write/xfer rdy complete */
3666 /* scsi read/xmit respond complete
3667 * call handle dif to send scsi status
3668 * rather than terminate exchange.
3670 cmd->state = QLA_TGT_STATE_PROCESSED;
3671 ha->tgt.tgt_ops->handle_dif_err(cmd);
3675 /* Need to generate a SCSI good completion.
3676 * because FW did not send scsi status.
3684 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05b,
3685 "qla_target(%d): CTIO with error status 0x%x received (state %x, se_cmd %p\n",
3686 vha->vp_idx, status, cmd->state, se_cmd);
3691 /* "cmd->aborted" means
3692 * cmd is already aborted/terminated, we don't
3693 * need to terminate again. The exchange is already
3694 * cleaned up/freed at FW level. Just cleanup at driver
3697 if ((cmd->state != QLA_TGT_STATE_NEED_DATA) &&
3699 cmd->cmd_flags |= BIT_13;
3700 if (qlt_term_ctio_exchange(vha, ctio, cmd, status))
3706 if (cmd->state == QLA_TGT_STATE_PROCESSED) {
3707 cmd->cmd_flags |= BIT_12;
3708 } else if (cmd->state == QLA_TGT_STATE_NEED_DATA) {
3709 cmd->state = QLA_TGT_STATE_DATA_IN;
3711 if (status == CTIO_SUCCESS)
3712 cmd->write_data_transferred = 1;
3714 ha->tgt.tgt_ops->handle_data(cmd);
3716 } else if (cmd->aborted) {
3717 cmd->cmd_flags |= BIT_18;
3718 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01e,
3719 "Aborted command %p (tag %lld) finished\n", cmd, se_cmd->tag);
3721 cmd->cmd_flags |= BIT_19;
3722 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05c,
3723 "qla_target(%d): A command in state (%d) should "
3724 "not return a CTIO complete\n", vha->vp_idx, cmd->state);
3727 if (unlikely(status != CTIO_SUCCESS) &&
3729 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01f, "Finishing failed CTIO\n");
3733 ha->tgt.tgt_ops->free_cmd(cmd);
3736 static inline int qlt_get_fcp_task_attr(struct scsi_qla_host *vha,
3741 switch (task_codes) {
3742 case ATIO_SIMPLE_QUEUE:
3743 fcp_task_attr = TCM_SIMPLE_TAG;
3745 case ATIO_HEAD_OF_QUEUE:
3746 fcp_task_attr = TCM_HEAD_TAG;
3748 case ATIO_ORDERED_QUEUE:
3749 fcp_task_attr = TCM_ORDERED_TAG;
3751 case ATIO_ACA_QUEUE:
3752 fcp_task_attr = TCM_ACA_TAG;
3755 fcp_task_attr = TCM_SIMPLE_TAG;
3758 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05d,
3759 "qla_target: unknown task code %x, use ORDERED instead\n",
3761 fcp_task_attr = TCM_ORDERED_TAG;
3765 return fcp_task_attr;
3768 static struct qla_tgt_sess *qlt_make_local_sess(struct scsi_qla_host *,
3771 * Process context for I/O path into tcm_qla2xxx code
3773 static void __qlt_do_work(struct qla_tgt_cmd *cmd)
3775 scsi_qla_host_t *vha = cmd->vha;
3776 struct qla_hw_data *ha = vha->hw;
3777 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
3778 struct qla_tgt_sess *sess = cmd->sess;
3779 struct atio_from_isp *atio = &cmd->atio;
3781 unsigned long flags;
3782 uint32_t data_length;
3783 int ret, fcp_task_attr, data_dir, bidi = 0;
3786 cmd->cmd_flags |= BIT_1;
3791 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf082,
3792 "cmd with tag %u is aborted\n",
3793 cmd->atio.u.isp24.exchange_addr);
3797 spin_lock_init(&cmd->cmd_lock);
3798 cdb = &atio->u.isp24.fcp_cmnd.cdb[0];
3799 cmd->se_cmd.tag = atio->u.isp24.exchange_addr;
3800 cmd->unpacked_lun = scsilun_to_int(
3801 (struct scsi_lun *)&atio->u.isp24.fcp_cmnd.lun);
3803 if (atio->u.isp24.fcp_cmnd.rddata &&
3804 atio->u.isp24.fcp_cmnd.wrdata) {
3806 data_dir = DMA_TO_DEVICE;
3807 } else if (atio->u.isp24.fcp_cmnd.rddata)
3808 data_dir = DMA_FROM_DEVICE;
3809 else if (atio->u.isp24.fcp_cmnd.wrdata)
3810 data_dir = DMA_TO_DEVICE;
3812 data_dir = DMA_NONE;
3814 fcp_task_attr = qlt_get_fcp_task_attr(vha,
3815 atio->u.isp24.fcp_cmnd.task_attr);
3816 data_length = be32_to_cpu(get_unaligned((uint32_t *)
3817 &atio->u.isp24.fcp_cmnd.add_cdb[
3818 atio->u.isp24.fcp_cmnd.add_cdb_len]));
3820 ret = ha->tgt.tgt_ops->handle_cmd(vha, cmd, cdb, data_length,
3821 fcp_task_attr, data_dir, bidi);
3825 * Drop extra session reference from qla_tgt_handle_cmd_for_atio*(
3827 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
3829 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
3833 ql_dbg(ql_dbg_io, vha, 0x3060, "Terminating work cmd %p", cmd);
3835 * cmd has not sent to target yet, so pass NULL as the second
3836 * argument to qlt_send_term_exchange() and free the memory here.
3838 cmd->cmd_flags |= BIT_2;
3839 spin_lock_irqsave(&ha->hardware_lock, flags);
3840 qlt_send_term_exchange(vha, NULL, &cmd->atio, 1, 0);
3842 qlt_decr_num_pend_cmds(vha);
3843 percpu_ida_free(&sess->se_sess->sess_tag_pool, cmd->se_cmd.map_tag);
3844 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3846 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
3848 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
3851 static void qlt_do_work(struct work_struct *work)
3853 struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
3854 scsi_qla_host_t *vha = cmd->vha;
3855 unsigned long flags;
3857 spin_lock_irqsave(&vha->cmd_list_lock, flags);
3858 list_del(&cmd->cmd_list);
3859 spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
3864 static struct qla_tgt_cmd *qlt_get_tag(scsi_qla_host_t *vha,
3865 struct qla_tgt_sess *sess,
3866 struct atio_from_isp *atio)
3868 struct se_session *se_sess = sess->se_sess;
3869 struct qla_tgt_cmd *cmd;
3872 tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING);
3876 cmd = &((struct qla_tgt_cmd *)se_sess->sess_cmd_map)[tag];
3877 memset(cmd, 0, sizeof(struct qla_tgt_cmd));
3879 memcpy(&cmd->atio, atio, sizeof(*atio));
3880 cmd->state = QLA_TGT_STATE_NEW;
3881 cmd->tgt = vha->vha_tgt.qla_tgt;
3882 qlt_incr_num_pend_cmds(vha);
3884 cmd->se_cmd.map_tag = tag;
3886 cmd->loop_id = sess->loop_id;
3887 cmd->conf_compl_supported = sess->conf_compl_supported;
3890 cmd->jiffies_at_alloc = get_jiffies_64();
3892 cmd->reset_count = vha->hw->chip_reset;
3897 static void qlt_send_busy(struct scsi_qla_host *, struct atio_from_isp *,
3900 static void qlt_create_sess_from_atio(struct work_struct *work)
3902 struct qla_tgt_sess_op *op = container_of(work,
3903 struct qla_tgt_sess_op, work);
3904 scsi_qla_host_t *vha = op->vha;
3905 struct qla_hw_data *ha = vha->hw;
3906 struct qla_tgt_sess *sess;
3907 struct qla_tgt_cmd *cmd;
3908 unsigned long flags;
3909 uint8_t *s_id = op->atio.u.isp24.fcp_hdr.s_id;
3911 spin_lock_irqsave(&vha->cmd_list_lock, flags);
3912 list_del(&op->cmd_list);
3913 spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
3916 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf083,
3917 "sess_op with tag %u is aborted\n",
3918 op->atio.u.isp24.exchange_addr);
3922 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf022,
3923 "qla_target(%d): Unable to find wwn login"
3924 " (s_id %x:%x:%x), trying to create it manually\n",
3925 vha->vp_idx, s_id[0], s_id[1], s_id[2]);
3927 if (op->atio.u.raw.entry_count > 1) {
3928 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf023,
3929 "Dropping multy entry atio %p\n", &op->atio);
3933 sess = qlt_make_local_sess(vha, s_id);
3934 /* sess has an extra creation ref. */
3939 * Now obtain a pre-allocated session tag using the original op->atio
3940 * packet header, and dispatch into __qlt_do_work() using the existing
3943 cmd = qlt_get_tag(vha, sess, &op->atio);
3945 spin_lock_irqsave(&ha->hardware_lock, flags);
3946 qlt_send_busy(vha, &op->atio, SAM_STAT_BUSY);
3948 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3953 * __qlt_do_work() will call qlt_put_sess() to release
3954 * the extra reference taken above by qlt_make_local_sess()
3961 spin_lock_irqsave(&ha->hardware_lock, flags);
3962 qlt_send_term_exchange(vha, NULL, &op->atio, 1, 0);
3963 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3968 /* ha->hardware_lock supposed to be held on entry */
3969 static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
3970 struct atio_from_isp *atio)
3972 struct qla_hw_data *ha = vha->hw;
3973 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
3974 struct qla_tgt_sess *sess;
3975 struct qla_tgt_cmd *cmd;
3977 if (unlikely(tgt->tgt_stop)) {
3978 ql_dbg(ql_dbg_io, vha, 0x3061,
3979 "New command while device %p is shutting down\n", tgt);
3983 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, atio->u.isp24.fcp_hdr.s_id);
3984 if (unlikely(!sess)) {
3985 struct qla_tgt_sess_op *op = kzalloc(sizeof(struct qla_tgt_sess_op),
3990 memcpy(&op->atio, atio, sizeof(*atio));
3993 spin_lock(&vha->cmd_list_lock);
3994 list_add_tail(&op->cmd_list, &vha->qla_sess_op_cmd_list);
3995 spin_unlock(&vha->cmd_list_lock);
3997 INIT_WORK(&op->work, qlt_create_sess_from_atio);
3998 queue_work(qla_tgt_wq, &op->work);
4002 /* Another WWN used to have our s_id. Our PLOGI scheduled its
4003 * session deletion, but it's still in sess_del_work wq */
4004 if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
4005 ql_dbg(ql_dbg_io, vha, 0x3061,
4006 "New command while old session %p is being deleted\n",
4012 * Do kref_get() before returning + dropping qla_hw_data->hardware_lock.
4014 kref_get(&sess->sess_kref);
4016 cmd = qlt_get_tag(vha, sess, atio);
4018 ql_dbg(ql_dbg_io, vha, 0x3062,
4019 "qla_target(%d): Allocation of cmd failed\n", vha->vp_idx);
4025 cmd->cmd_flags |= BIT_0;
4026 cmd->se_cmd.cpuid = ha->msix_count ?
4027 ha->tgt.rspq_vector_cpuid : WORK_CPU_UNBOUND;
4029 spin_lock(&vha->cmd_list_lock);
4030 list_add_tail(&cmd->cmd_list, &vha->qla_cmd_list);
4031 spin_unlock(&vha->cmd_list_lock);
4033 INIT_WORK(&cmd->work, qlt_do_work);
4034 if (ha->msix_count) {
4035 if (cmd->atio.u.isp24.fcp_cmnd.rddata)
4036 queue_work_on(smp_processor_id(), qla_tgt_wq,
4039 queue_work_on(cmd->se_cmd.cpuid, qla_tgt_wq,
4042 queue_work(qla_tgt_wq, &cmd->work);
4048 /* ha->hardware_lock supposed to be held on entry */
4049 static int qlt_issue_task_mgmt(struct qla_tgt_sess *sess, uint32_t lun,
4050 int fn, void *iocb, int flags)
4052 struct scsi_qla_host *vha = sess->vha;
4053 struct qla_hw_data *ha = vha->hw;
4054 struct qla_tgt_mgmt_cmd *mcmd;
4055 struct atio_from_isp *a = (struct atio_from_isp *)iocb;
4059 mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC);
4061 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10009,
4062 "qla_target(%d): Allocation of management "
4063 "command failed, some commands and their data could "
4064 "leak\n", vha->vp_idx);
4067 memset(mcmd, 0, sizeof(*mcmd));
4071 memcpy(&mcmd->orig_iocb.imm_ntfy, iocb,
4072 sizeof(mcmd->orig_iocb.imm_ntfy));
4074 mcmd->tmr_func = fn;
4075 mcmd->flags = flags;
4076 mcmd->reset_count = vha->hw->chip_reset;
4079 case QLA_TGT_CLEAR_ACA:
4080 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10000,
4081 "qla_target(%d): CLEAR_ACA received\n", sess->vha->vp_idx);
4082 tmr_func = TMR_CLEAR_ACA;
4085 case QLA_TGT_TARGET_RESET:
4086 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10001,
4087 "qla_target(%d): TARGET_RESET received\n",
4089 tmr_func = TMR_TARGET_WARM_RESET;
4092 case QLA_TGT_LUN_RESET:
4093 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10002,
4094 "qla_target(%d): LUN_RESET received\n", sess->vha->vp_idx);
4095 tmr_func = TMR_LUN_RESET;
4096 abort_cmds_for_lun(vha, lun, a->u.isp24.fcp_hdr.s_id);
4099 case QLA_TGT_CLEAR_TS:
4100 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10003,
4101 "qla_target(%d): CLEAR_TS received\n", sess->vha->vp_idx);
4102 tmr_func = TMR_CLEAR_TASK_SET;
4105 case QLA_TGT_ABORT_TS:
4106 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10004,
4107 "qla_target(%d): ABORT_TS received\n", sess->vha->vp_idx);
4108 tmr_func = TMR_ABORT_TASK_SET;
4111 case QLA_TGT_ABORT_ALL:
4112 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10005,
4113 "qla_target(%d): Doing ABORT_ALL_TASKS\n",
4118 case QLA_TGT_ABORT_ALL_SESS:
4119 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10006,
4120 "qla_target(%d): Doing ABORT_ALL_TASKS_SESS\n",
4125 case QLA_TGT_NEXUS_LOSS_SESS:
4126 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10007,
4127 "qla_target(%d): Doing NEXUS_LOSS_SESS\n",
4132 case QLA_TGT_NEXUS_LOSS:
4133 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10008,
4134 "qla_target(%d): Doing NEXUS_LOSS\n", sess->vha->vp_idx);
4139 ql_dbg(ql_dbg_tgt_tmr, vha, 0x1000a,
4140 "qla_target(%d): Unknown task mgmt fn 0x%x\n",
4141 sess->vha->vp_idx, fn);
4142 mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
4146 res = ha->tgt.tgt_ops->handle_tmr(mcmd, lun, tmr_func, 0);
4148 ql_dbg(ql_dbg_tgt_tmr, vha, 0x1000b,
4149 "qla_target(%d): tgt.tgt_ops->handle_tmr() failed: %d\n",
4150 sess->vha->vp_idx, res);
4151 mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
4158 /* ha->hardware_lock supposed to be held on entry */
4159 static int qlt_handle_task_mgmt(struct scsi_qla_host *vha, void *iocb)
4161 struct atio_from_isp *a = (struct atio_from_isp *)iocb;
4162 struct qla_hw_data *ha = vha->hw;
4163 struct qla_tgt *tgt;
4164 struct qla_tgt_sess *sess;
4165 uint32_t lun, unpacked_lun;
4167 unsigned long flags;
4169 tgt = vha->vha_tgt.qla_tgt;
4171 lun = a->u.isp24.fcp_cmnd.lun;
4172 fn = a->u.isp24.fcp_cmnd.task_mgmt_flags;
4174 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
4175 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha,
4176 a->u.isp24.fcp_hdr.s_id);
4177 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
4179 unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun);
4182 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf024,
4183 "qla_target(%d): task mgmt fn 0x%x for "
4184 "non-existant session\n", vha->vp_idx, fn);
4185 return qlt_sched_sess_work(tgt, QLA_TGT_SESS_WORK_TM, iocb,
4186 sizeof(struct atio_from_isp));
4189 if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS)
4192 return qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0);
4195 /* ha->hardware_lock supposed to be held on entry */
4196 static int __qlt_abort_task(struct scsi_qla_host *vha,
4197 struct imm_ntfy_from_isp *iocb, struct qla_tgt_sess *sess)
4199 struct atio_from_isp *a = (struct atio_from_isp *)iocb;
4200 struct qla_hw_data *ha = vha->hw;
4201 struct qla_tgt_mgmt_cmd *mcmd;
4202 uint32_t lun, unpacked_lun;
4205 mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC);
4207 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05f,
4208 "qla_target(%d): %s: Allocation of ABORT cmd failed\n",
4209 vha->vp_idx, __func__);
4212 memset(mcmd, 0, sizeof(*mcmd));
4215 memcpy(&mcmd->orig_iocb.imm_ntfy, iocb,
4216 sizeof(mcmd->orig_iocb.imm_ntfy));
4218 lun = a->u.isp24.fcp_cmnd.lun;
4219 unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun);
4220 mcmd->reset_count = vha->hw->chip_reset;
4222 rc = ha->tgt.tgt_ops->handle_tmr(mcmd, unpacked_lun, TMR_ABORT_TASK,
4223 le16_to_cpu(iocb->u.isp2x.seq_id));
4225 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf060,
4226 "qla_target(%d): tgt_ops->handle_tmr() failed: %d\n",
4228 mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
4235 /* ha->hardware_lock supposed to be held on entry */
4236 static int qlt_abort_task(struct scsi_qla_host *vha,
4237 struct imm_ntfy_from_isp *iocb)
4239 struct qla_hw_data *ha = vha->hw;
4240 struct qla_tgt_sess *sess;
4242 unsigned long flags;
4244 loop_id = GET_TARGET_ID(ha, (struct atio_from_isp *)iocb);
4246 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
4247 sess = ha->tgt.tgt_ops->find_sess_by_loop_id(vha, loop_id);
4248 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
4251 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf025,
4252 "qla_target(%d): task abort for unexisting "
4253 "session\n", vha->vp_idx);
4254 return qlt_sched_sess_work(vha->vha_tgt.qla_tgt,
4255 QLA_TGT_SESS_WORK_ABORT, iocb, sizeof(*iocb));
4258 return __qlt_abort_task(vha, iocb, sess);
4261 void qlt_logo_completion_handler(fc_port_t *fcport, int rc)
4263 if (fcport->tgt_session) {
4264 if (rc != MBS_COMMAND_COMPLETE) {
4265 ql_dbg(ql_dbg_tgt_mgt, fcport->vha, 0xf093,
4266 "%s: se_sess %p / sess %p from"
4267 " port %8phC loop_id %#04x s_id %02x:%02x:%02x"
4268 " LOGO failed: %#x\n",
4270 fcport->tgt_session->se_sess,
4271 fcport->tgt_session,
4272 fcport->port_name, fcport->loop_id,
4273 fcport->d_id.b.domain, fcport->d_id.b.area,
4274 fcport->d_id.b.al_pa, rc);
4277 fcport->tgt_session->logout_completed = 1;
4282 * ha->hardware_lock supposed to be held on entry (to protect tgt->sess_list)
4284 * Schedules sessions with matching port_id/loop_id but different wwn for
4285 * deletion. Returns existing session with matching wwn if present.
4288 static struct qla_tgt_sess *
4289 qlt_find_sess_invalidate_other(struct qla_tgt *tgt, uint64_t wwn,
4290 port_id_t port_id, uint16_t loop_id, struct qla_tgt_sess **conflict_sess)
4292 struct qla_tgt_sess *sess = NULL, *other_sess;
4295 *conflict_sess = NULL;
4297 list_for_each_entry(other_sess, &tgt->sess_list, sess_list_entry) {
4299 other_wwn = wwn_to_u64(other_sess->port_name);
4301 if (wwn == other_wwn) {
4307 /* find other sess with nport_id collision */
4308 if (port_id.b24 == other_sess->s_id.b24) {
4309 if (loop_id != other_sess->loop_id) {
4310 ql_dbg(ql_dbg_tgt_tmr, tgt->vha, 0x1000c,
4311 "Invalidating sess %p loop_id %d wwn %llx.\n",
4312 other_sess, other_sess->loop_id, other_wwn);
4315 * logout_on_delete is set by default, but another
4316 * session that has the same s_id/loop_id combo
4317 * might have cleared it when requested this session
4318 * deletion, so don't touch it
4320 qlt_schedule_sess_for_deletion(other_sess, true);
4323 * Another wwn used to have our s_id/loop_id
4324 * kill the session, but don't free the loop_id
4326 other_sess->keep_nport_handle = 1;
4327 *conflict_sess = other_sess;
4328 qlt_schedule_sess_for_deletion(other_sess,
4334 /* find other sess with nport handle collision */
4335 if (loop_id == other_sess->loop_id) {
4336 ql_dbg(ql_dbg_tgt_tmr, tgt->vha, 0x1000d,
4337 "Invalidating sess %p loop_id %d wwn %llx.\n",
4338 other_sess, other_sess->loop_id, other_wwn);
4340 /* Same loop_id but different s_id
4341 * Ok to kill and logout */
4342 qlt_schedule_sess_for_deletion(other_sess, true);
4349 /* Abort any commands for this s_id waiting on qla_tgt_wq workqueue */
4350 static int abort_cmds_for_s_id(struct scsi_qla_host *vha, port_id_t *s_id)
4352 struct qla_tgt_sess_op *op;
4353 struct qla_tgt_cmd *cmd;
4357 key = (((u32)s_id->b.domain << 16) |
4358 ((u32)s_id->b.area << 8) |
4359 ((u32)s_id->b.al_pa));
4361 spin_lock(&vha->cmd_list_lock);
4362 list_for_each_entry(op, &vha->qla_sess_op_cmd_list, cmd_list) {
4363 uint32_t op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id);
4364 if (op_key == key) {
4369 list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) {
4370 uint32_t cmd_key = sid_to_key(cmd->atio.u.isp24.fcp_hdr.s_id);
4371 if (cmd_key == key) {
4376 spin_unlock(&vha->cmd_list_lock);
4382 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
4384 static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
4385 struct imm_ntfy_from_isp *iocb)
4387 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
4388 struct qla_hw_data *ha = vha->hw;
4389 struct qla_tgt_sess *sess = NULL, *conflict_sess = NULL;
4395 qlt_plogi_ack_t *pla;
4396 unsigned long flags;
4398 wwn = wwn_to_u64(iocb->u.isp24.port_name);
4400 port_id.b.domain = iocb->u.isp24.port_id[2];
4401 port_id.b.area = iocb->u.isp24.port_id[1];
4402 port_id.b.al_pa = iocb->u.isp24.port_id[0];
4403 port_id.b.rsvd_1 = 0;
4405 loop_id = le16_to_cpu(iocb->u.isp24.nport_handle);
4407 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf026,
4408 "qla_target(%d): Port ID: 0x%3phC ELS opcode: 0x%02x\n",
4409 vha->vp_idx, iocb->u.isp24.port_id, iocb->u.isp24.status_subcode);
4411 /* res = 1 means ack at the end of thread
4412 * res = 0 means ack async/later.
4414 switch (iocb->u.isp24.status_subcode) {
4417 /* Mark all stale commands in qla_tgt_wq for deletion */
4418 abort_cmds_for_s_id(vha, &port_id);
4421 spin_lock_irqsave(&tgt->ha->tgt.sess_lock, flags);
4422 sess = qlt_find_sess_invalidate_other(tgt, wwn,
4423 port_id, loop_id, &conflict_sess);
4424 spin_unlock_irqrestore(&tgt->ha->tgt.sess_lock, flags);
4427 if (IS_SW_RESV_ADDR(port_id) || (!sess && !conflict_sess)) {
4432 pla = qlt_plogi_ack_find_add(vha, &port_id, iocb);
4434 qlt_send_term_imm_notif(vha, iocb, 1);
4443 qlt_plogi_ack_link(vha, pla, conflict_sess,
4444 QLT_PLOGI_LINK_CONFLICT);
4449 qlt_plogi_ack_link(vha, pla, sess, QLT_PLOGI_LINK_SAME_WWN);
4451 * Under normal circumstances we want to release nport handle
4452 * during LOGO process to avoid nport handle leaks inside FW.
4453 * The exception is when LOGO is done while another PLOGI with
4454 * the same nport handle is waiting as might be the case here.
4455 * Note: there is always a possibily of a race where session
4456 * deletion has already started for other reasons (e.g. ACL
4457 * removal) and now PLOGI arrives:
4458 * 1. if PLOGI arrived in FW after nport handle has been freed,
4459 * FW must have assigned this PLOGI a new/same handle and we
4460 * can proceed ACK'ing it as usual when session deletion
4462 * 2. if PLOGI arrived in FW before LOGO with LCF_FREE_NPORT
4463 * bit reached it, the handle has now been released. We'll
4464 * get an error when we ACK this PLOGI. Nothing will be sent
4465 * back to initiator. Initiator should eventually retry
4466 * PLOGI and situation will correct itself.
4468 sess->keep_nport_handle = ((sess->loop_id == loop_id) &&
4469 (sess->s_id.b24 == port_id.b24));
4470 qlt_schedule_sess_for_deletion(sess, true);
4474 wd3_lo = le16_to_cpu(iocb->u.isp24.u.prli.wd3_lo);
4477 spin_lock_irqsave(&tgt->ha->tgt.sess_lock, flags);
4478 sess = qlt_find_sess_invalidate_other(tgt, wwn, port_id,
4479 loop_id, &conflict_sess);
4480 spin_unlock_irqrestore(&tgt->ha->tgt.sess_lock, flags);
4483 if (conflict_sess) {
4484 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf09b,
4485 "PRLI with conflicting sess %p port %8phC\n",
4486 conflict_sess, conflict_sess->port_name);
4487 qlt_send_term_imm_notif(vha, iocb, 1);
4493 if (sess->deleted) {
4495 * Impatient initiator sent PRLI before last
4496 * PLOGI could finish. Will force him to re-try,
4497 * while last one finishes.
4499 ql_log(ql_log_warn, sess->vha, 0xf095,
4500 "sess %p PRLI received, before plogi ack.\n",
4502 qlt_send_term_imm_notif(vha, iocb, 1);
4508 * This shouldn't happen under normal circumstances,
4509 * since we have deleted the old session during PLOGI
4511 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf096,
4512 "PRLI (loop_id %#04x) for existing sess %p (loop_id %#04x)\n",
4513 sess->loop_id, sess, iocb->u.isp24.nport_handle);
4516 sess->loop_id = loop_id;
4517 sess->s_id = port_id;
4520 sess->conf_compl_supported = 1;
4523 res = 1; /* send notify ack */
4525 /* Make session global (not used in fabric mode) */
4526 if (ha->current_topology != ISP_CFG_F) {
4527 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
4528 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
4529 qla2xxx_wake_dpc(vha);
4531 /* todo: else - create sess here. */
4532 res = 1; /* send notify ack */
4539 res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS);
4544 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
4545 if (tgt->link_reinit_iocb_pending) {
4546 qlt_send_notify_ack(vha, &tgt->link_reinit_iocb,
4548 tgt->link_reinit_iocb_pending = 0;
4550 res = 1; /* send notify ack */
4554 case ELS_FLOGI: /* should never happen */
4556 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf061,
4557 "qla_target(%d): Unsupported ELS command %x "
4558 "received\n", vha->vp_idx, iocb->u.isp24.status_subcode);
4559 res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS);
4566 static int qlt_set_data_offset(struct qla_tgt_cmd *cmd, uint32_t offset)
4570 * FIXME: Reject non zero SRR relative offset until we can test
4571 * this code properly.
4573 pr_debug("Rejecting non zero SRR rel_offs: %u\n", offset);
4576 struct scatterlist *sg, *sgp, *sg_srr, *sg_srr_start = NULL;
4577 size_t first_offset = 0, rem_offset = offset, tmp = 0;
4578 int i, sg_srr_cnt, bufflen = 0;
4580 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe023,
4581 "Entering qla_tgt_set_data_offset: cmd: %p, cmd->sg: %p, "
4582 "cmd->sg_cnt: %u, direction: %d\n",
4583 cmd, cmd->sg, cmd->sg_cnt, cmd->dma_data_direction);
4585 if (!cmd->sg || !cmd->sg_cnt) {
4586 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe055,
4587 "Missing cmd->sg or zero cmd->sg_cnt in"
4588 " qla_tgt_set_data_offset\n");
4592 * Walk the current cmd->sg list until we locate the new sg_srr_start
4594 for_each_sg(cmd->sg, sg, cmd->sg_cnt, i) {
4595 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe024,
4596 "sg[%d]: %p page: %p, length: %d, offset: %d\n",
4597 i, sg, sg_page(sg), sg->length, sg->offset);
4599 if ((sg->length + tmp) > offset) {
4600 first_offset = rem_offset;
4602 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe025,
4603 "Found matching sg[%d], using %p as sg_srr_start, "
4604 "and using first_offset: %zu\n", i, sg,
4609 rem_offset -= sg->length;
4612 if (!sg_srr_start) {
4613 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe056,
4614 "Unable to locate sg_srr_start for offset: %u\n", offset);
4617 sg_srr_cnt = (cmd->sg_cnt - i);
4619 sg_srr = kzalloc(sizeof(struct scatterlist) * sg_srr_cnt, GFP_KERNEL);
4621 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe057,
4622 "Unable to allocate sgp\n");
4625 sg_init_table(sg_srr, sg_srr_cnt);
4628 * Walk the remaining list for sg_srr_start, mapping to the newly
4629 * allocated sg_srr taking first_offset into account.
4631 for_each_sg(sg_srr_start, sg, sg_srr_cnt, i) {
4633 sg_set_page(sgp, sg_page(sg),
4634 (sg->length - first_offset), first_offset);
4637 sg_set_page(sgp, sg_page(sg), sg->length, 0);
4639 bufflen += sgp->length;
4647 cmd->sg_cnt = sg_srr_cnt;
4648 cmd->bufflen = bufflen;
4649 cmd->offset += offset;
4652 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe026, "New cmd->sg: %p\n", cmd->sg);
4653 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe027, "New cmd->sg_cnt: %u\n",
4655 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe028, "New cmd->bufflen: %u\n",
4657 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe029, "New cmd->offset: %u\n",
4660 if (cmd->sg_cnt < 0)
4663 if (cmd->bufflen < 0)
4670 static inline int qlt_srr_adjust_data(struct qla_tgt_cmd *cmd,
4671 uint32_t srr_rel_offs, int *xmit_type)
4673 int res = 0, rel_offs;
4675 rel_offs = srr_rel_offs - cmd->offset;
4676 ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf027, "srr_rel_offs=%d, rel_offs=%d",
4677 srr_rel_offs, rel_offs);
4679 *xmit_type = QLA_TGT_XMIT_ALL;
4682 ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf062,
4683 "qla_target(%d): SRR rel_offs (%d) < 0",
4684 cmd->vha->vp_idx, rel_offs);
4686 } else if (rel_offs == cmd->bufflen)
4687 *xmit_type = QLA_TGT_XMIT_STATUS;
4688 else if (rel_offs > 0)
4689 res = qlt_set_data_offset(cmd, rel_offs);
4694 /* No locks, thread context */
4695 static void qlt_handle_srr(struct scsi_qla_host *vha,
4696 struct qla_tgt_srr_ctio *sctio, struct qla_tgt_srr_imm *imm)
4698 struct imm_ntfy_from_isp *ntfy =
4699 (struct imm_ntfy_from_isp *)&imm->imm_ntfy;
4700 struct qla_hw_data *ha = vha->hw;
4701 struct qla_tgt_cmd *cmd = sctio->cmd;
4702 struct se_cmd *se_cmd = &cmd->se_cmd;
4703 unsigned long flags;
4704 int xmit_type = 0, resp = 0;
4708 offset = le32_to_cpu(ntfy->u.isp24.srr_rel_offs);
4709 srr_ui = ntfy->u.isp24.srr_ui;
4711 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf028, "SRR cmd %p, srr_ui %x\n",
4716 spin_lock_irqsave(&ha->hardware_lock, flags);
4717 qlt_send_notify_ack(vha, ntfy,
4718 0, 0, 0, NOTIFY_ACK_SRR_FLAGS_ACCEPT, 0, 0);
4719 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4720 xmit_type = QLA_TGT_XMIT_STATUS;
4723 case SRR_IU_DATA_IN:
4724 if (!cmd->sg || !cmd->sg_cnt) {
4725 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf063,
4726 "Unable to process SRR_IU_DATA_IN due to"
4727 " missing cmd->sg, state: %d\n", cmd->state);
4731 if (se_cmd->scsi_status != 0) {
4732 ql_dbg(ql_dbg_tgt, vha, 0xe02a,
4733 "Rejecting SRR_IU_DATA_IN with non GOOD "
4737 cmd->bufflen = se_cmd->data_length;
4739 if (qlt_has_data(cmd)) {
4740 if (qlt_srr_adjust_data(cmd, offset, &xmit_type) != 0)
4742 spin_lock_irqsave(&ha->hardware_lock, flags);
4743 qlt_send_notify_ack(vha, ntfy,
4744 0, 0, 0, NOTIFY_ACK_SRR_FLAGS_ACCEPT, 0, 0);
4745 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4748 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf064,
4749 "qla_target(%d): SRR for in data for cmd without them (tag %lld, SCSI status %d), reject",
4750 vha->vp_idx, se_cmd->tag,
4751 cmd->se_cmd.scsi_status);
4755 case SRR_IU_DATA_OUT:
4756 if (!cmd->sg || !cmd->sg_cnt) {
4757 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf065,
4758 "Unable to process SRR_IU_DATA_OUT due to"
4759 " missing cmd->sg\n");
4763 if (se_cmd->scsi_status != 0) {
4764 ql_dbg(ql_dbg_tgt, vha, 0xe02b,
4765 "Rejecting SRR_IU_DATA_OUT"
4766 " with non GOOD scsi_status\n");
4769 cmd->bufflen = se_cmd->data_length;
4771 if (qlt_has_data(cmd)) {
4772 if (qlt_srr_adjust_data(cmd, offset, &xmit_type) != 0)
4774 spin_lock_irqsave(&ha->hardware_lock, flags);
4775 qlt_send_notify_ack(vha, ntfy,
4776 0, 0, 0, NOTIFY_ACK_SRR_FLAGS_ACCEPT, 0, 0);
4777 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4778 if (xmit_type & QLA_TGT_XMIT_DATA) {
4779 cmd->cmd_flags |= BIT_8;
4780 qlt_rdy_to_xfer(cmd);
4783 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf066,
4784 "qla_target(%d): SRR for out data for cmd without them (tag %lld, SCSI status %d), reject",
4785 vha->vp_idx, se_cmd->tag, cmd->se_cmd.scsi_status);
4790 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf067,
4791 "qla_target(%d): Unknown srr_ui value %x",
4792 vha->vp_idx, srr_ui);
4796 /* Transmit response in case of status and data-in cases */
4798 cmd->cmd_flags |= BIT_7;
4799 qlt_xmit_response(cmd, xmit_type, se_cmd->scsi_status);
4805 spin_lock_irqsave(&ha->hardware_lock, flags);
4806 qlt_send_notify_ack(vha, ntfy, 0, 0, 0,
4807 NOTIFY_ACK_SRR_FLAGS_REJECT,
4808 NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM,
4809 NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL);
4810 if (cmd->state == QLA_TGT_STATE_NEED_DATA) {
4811 cmd->state = QLA_TGT_STATE_DATA_IN;
4814 cmd->cmd_flags |= BIT_9;
4815 qlt_send_term_exchange(vha, cmd, &cmd->atio, 1, 0);
4817 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4820 static void qlt_reject_free_srr_imm(struct scsi_qla_host *vha,
4821 struct qla_tgt_srr_imm *imm, int ha_locked)
4823 struct qla_hw_data *ha = vha->hw;
4824 unsigned long flags = 0;
4828 spin_lock_irqsave(&ha->hardware_lock, flags);
4831 qlt_send_notify_ack(vha, (void *)&imm->imm_ntfy, 0, 0, 0,
4832 NOTIFY_ACK_SRR_FLAGS_REJECT,
4833 NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM,
4834 NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL);
4838 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4844 static void qlt_handle_srr_work(struct work_struct *work)
4846 struct qla_tgt *tgt = container_of(work, struct qla_tgt, srr_work);
4847 struct scsi_qla_host *vha = tgt->vha;
4848 struct qla_tgt_srr_ctio *sctio;
4849 unsigned long flags;
4851 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf029, "Entering SRR work (tgt %p)\n",
4855 spin_lock_irqsave(&tgt->srr_lock, flags);
4856 list_for_each_entry(sctio, &tgt->srr_ctio_list, srr_list_entry) {
4857 struct qla_tgt_srr_imm *imm, *i, *ti;
4858 struct qla_tgt_cmd *cmd;
4859 struct se_cmd *se_cmd;
4862 list_for_each_entry_safe(i, ti, &tgt->srr_imm_list,
4864 if (i->srr_id == sctio->srr_id) {
4865 list_del(&i->srr_list_entry);
4867 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf068,
4868 "qla_target(%d): There must be "
4869 "only one IMM SRR per CTIO SRR "
4870 "(IMM SRR %p, id %d, CTIO %p\n",
4871 vha->vp_idx, i, i->srr_id, sctio);
4872 qlt_reject_free_srr_imm(tgt->vha, i, 0);
4878 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02a,
4879 "IMM SRR %p, CTIO SRR %p (id %d)\n", imm, sctio,
4883 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02b,
4884 "Not found matching IMM for SRR CTIO (id %d)\n",
4888 list_del(&sctio->srr_list_entry);
4890 spin_unlock_irqrestore(&tgt->srr_lock, flags);
4894 * Reset qla_tgt_cmd SRR values and SGL pointer+count to follow
4895 * tcm_qla2xxx_write_pending() and tcm_qla2xxx_queue_data_in()
4904 se_cmd = &cmd->se_cmd;
4906 cmd->sg_cnt = se_cmd->t_data_nents;
4907 cmd->sg = se_cmd->t_data_sg;
4909 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02c,
4910 "SRR cmd %p (se_cmd %p, tag %lld, op %x), sg_cnt=%d, offset=%d",
4911 cmd, &cmd->se_cmd, se_cmd->tag, se_cmd->t_task_cdb ?
4912 se_cmd->t_task_cdb[0] : 0, cmd->sg_cnt, cmd->offset);
4914 qlt_handle_srr(vha, sctio, imm);
4920 spin_unlock_irqrestore(&tgt->srr_lock, flags);
4923 /* ha->hardware_lock supposed to be held on entry */
4924 static void qlt_prepare_srr_imm(struct scsi_qla_host *vha,
4925 struct imm_ntfy_from_isp *iocb)
4927 struct qla_tgt_srr_imm *imm;
4928 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
4929 struct qla_tgt_srr_ctio *sctio;
4933 ql_log(ql_log_warn, vha, 0xf02d, "qla_target(%d): SRR received\n",
4936 imm = kzalloc(sizeof(*imm), GFP_ATOMIC);
4938 memcpy(&imm->imm_ntfy, iocb, sizeof(imm->imm_ntfy));
4940 /* IRQ is already OFF */
4941 spin_lock(&tgt->srr_lock);
4942 imm->srr_id = tgt->imm_srr_id;
4943 list_add_tail(&imm->srr_list_entry,
4944 &tgt->srr_imm_list);
4945 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02e,
4946 "IMM NTFY SRR %p added (id %d, ui %x)\n",
4947 imm, imm->srr_id, iocb->u.isp24.srr_ui);
4948 if (tgt->imm_srr_id == tgt->ctio_srr_id) {
4950 list_for_each_entry(sctio, &tgt->srr_ctio_list,
4952 if (sctio->srr_id == imm->srr_id) {
4958 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02f, "%s",
4959 "Scheduling srr work\n");
4960 schedule_work(&tgt->srr_work);
4962 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf030,
4963 "qla_target(%d): imm_srr_id "
4964 "== ctio_srr_id (%d), but there is no "
4965 "corresponding SRR CTIO, deleting IMM "
4966 "SRR %p\n", vha->vp_idx, tgt->ctio_srr_id,
4968 list_del(&imm->srr_list_entry);
4972 spin_unlock(&tgt->srr_lock);
4976 spin_unlock(&tgt->srr_lock);
4978 struct qla_tgt_srr_ctio *ts;
4980 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf069,
4981 "qla_target(%d): Unable to allocate SRR IMM "
4982 "entry, SRR request will be rejected\n", vha->vp_idx);
4984 /* IRQ is already OFF */
4985 spin_lock(&tgt->srr_lock);
4986 list_for_each_entry_safe(sctio, ts, &tgt->srr_ctio_list,
4988 if (sctio->srr_id == tgt->imm_srr_id) {
4989 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf031,
4990 "CTIO SRR %p deleted (id %d)\n",
4991 sctio, sctio->srr_id);
4992 list_del(&sctio->srr_list_entry);
4993 qlt_send_term_exchange(vha, sctio->cmd,
4994 &sctio->cmd->atio, 1, 0);
4998 spin_unlock(&tgt->srr_lock);
5005 qlt_send_notify_ack(vha, iocb, 0, 0, 0,
5006 NOTIFY_ACK_SRR_FLAGS_REJECT,
5007 NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM,
5008 NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL);
5012 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
5014 static void qlt_handle_imm_notify(struct scsi_qla_host *vha,
5015 struct imm_ntfy_from_isp *iocb)
5017 struct qla_hw_data *ha = vha->hw;
5018 uint32_t add_flags = 0;
5019 int send_notify_ack = 1;
5022 status = le16_to_cpu(iocb->u.isp2x.status);
5024 case IMM_NTFY_LIP_RESET:
5026 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf032,
5027 "qla_target(%d): LIP reset (loop %#x), subcode %x\n",
5028 vha->vp_idx, le16_to_cpu(iocb->u.isp24.nport_handle),
5029 iocb->u.isp24.status_subcode);
5031 if (qlt_reset(vha, iocb, QLA_TGT_ABORT_ALL) == 0)
5032 send_notify_ack = 0;
5036 case IMM_NTFY_LIP_LINK_REINIT:
5038 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
5039 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf033,
5040 "qla_target(%d): LINK REINIT (loop %#x, "
5041 "subcode %x)\n", vha->vp_idx,
5042 le16_to_cpu(iocb->u.isp24.nport_handle),
5043 iocb->u.isp24.status_subcode);
5044 if (tgt->link_reinit_iocb_pending) {
5045 qlt_send_notify_ack(vha, &tgt->link_reinit_iocb,
5048 memcpy(&tgt->link_reinit_iocb, iocb, sizeof(*iocb));
5049 tgt->link_reinit_iocb_pending = 1;
5051 * QLogic requires to wait after LINK REINIT for possible
5052 * PDISC or ADISC ELS commands
5054 send_notify_ack = 0;
5058 case IMM_NTFY_PORT_LOGOUT:
5059 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf034,
5060 "qla_target(%d): Port logout (loop "
5061 "%#x, subcode %x)\n", vha->vp_idx,
5062 le16_to_cpu(iocb->u.isp24.nport_handle),
5063 iocb->u.isp24.status_subcode);
5065 if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS) == 0)
5066 send_notify_ack = 0;
5067 /* The sessions will be cleared in the callback, if needed */
5070 case IMM_NTFY_GLBL_TPRLO:
5071 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf035,
5072 "qla_target(%d): Global TPRLO (%x)\n", vha->vp_idx, status);
5073 if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS) == 0)
5074 send_notify_ack = 0;
5075 /* The sessions will be cleared in the callback, if needed */
5078 case IMM_NTFY_PORT_CONFIG:
5079 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf036,
5080 "qla_target(%d): Port config changed (%x)\n", vha->vp_idx,
5082 if (qlt_reset(vha, iocb, QLA_TGT_ABORT_ALL) == 0)
5083 send_notify_ack = 0;
5084 /* The sessions will be cleared in the callback, if needed */
5087 case IMM_NTFY_GLBL_LOGO:
5088 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06a,
5089 "qla_target(%d): Link failure detected\n",
5091 /* I_T nexus loss */
5092 if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS) == 0)
5093 send_notify_ack = 0;
5096 case IMM_NTFY_IOCB_OVERFLOW:
5097 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06b,
5098 "qla_target(%d): Cannot provide requested "
5099 "capability (IOCB overflowed the immediate notify "
5100 "resource count)\n", vha->vp_idx);
5103 case IMM_NTFY_ABORT_TASK:
5104 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf037,
5105 "qla_target(%d): Abort Task (S %08x I %#x -> "
5106 "L %#x)\n", vha->vp_idx,
5107 le16_to_cpu(iocb->u.isp2x.seq_id),
5108 GET_TARGET_ID(ha, (struct atio_from_isp *)iocb),
5109 le16_to_cpu(iocb->u.isp2x.lun));
5110 if (qlt_abort_task(vha, iocb) == 0)
5111 send_notify_ack = 0;
5114 case IMM_NTFY_RESOURCE:
5115 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06c,
5116 "qla_target(%d): Out of resources, host %ld\n",
5117 vha->vp_idx, vha->host_no);
5120 case IMM_NTFY_MSG_RX:
5121 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf038,
5122 "qla_target(%d): Immediate notify task %x\n",
5123 vha->vp_idx, iocb->u.isp2x.task_flags);
5124 if (qlt_handle_task_mgmt(vha, iocb) == 0)
5125 send_notify_ack = 0;
5129 if (qlt_24xx_handle_els(vha, iocb) == 0)
5130 send_notify_ack = 0;
5134 qlt_prepare_srr_imm(vha, iocb);
5135 send_notify_ack = 0;
5139 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06d,
5140 "qla_target(%d): Received unknown immediate "
5141 "notify status %x\n", vha->vp_idx, status);
5145 if (send_notify_ack)
5146 qlt_send_notify_ack(vha, iocb, add_flags, 0, 0, 0, 0, 0);
5150 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
5151 * This function sends busy to ISP 2xxx or 24xx.
5153 static int __qlt_send_busy(struct scsi_qla_host *vha,
5154 struct atio_from_isp *atio, uint16_t status)
5156 struct ctio7_to_24xx *ctio24;
5157 struct qla_hw_data *ha = vha->hw;
5159 struct qla_tgt_sess *sess = NULL;
5160 unsigned long flags;
5162 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
5163 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha,
5164 atio->u.isp24.fcp_hdr.s_id);
5165 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
5167 qlt_send_term_exchange(vha, NULL, atio, 1, 0);
5170 /* Sending marker isn't necessary, since we called from ISR */
5172 pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL);
5174 ql_dbg(ql_dbg_io, vha, 0x3063,
5175 "qla_target(%d): %s failed: unable to allocate "
5176 "request packet", vha->vp_idx, __func__);
5180 vha->tgt_counters.num_q_full_sent++;
5181 pkt->entry_count = 1;
5182 pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
5184 ctio24 = (struct ctio7_to_24xx *)pkt;
5185 ctio24->entry_type = CTIO_TYPE7;
5186 ctio24->nport_handle = sess->loop_id;
5187 ctio24->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
5188 ctio24->vp_index = vha->vp_idx;
5189 ctio24->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
5190 ctio24->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
5191 ctio24->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
5192 ctio24->exchange_addr = atio->u.isp24.exchange_addr;
5193 ctio24->u.status1.flags = (atio->u.isp24.attr << 9) |
5195 CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS |
5196 CTIO7_FLAGS_DONT_RET_CTIO);
5198 * CTIO from fw w/o se_cmd doesn't provide enough info to retry it,
5199 * if the explicit conformation is used.
5201 ctio24->u.status1.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id);
5202 ctio24->u.status1.scsi_status = cpu_to_le16(status);
5203 /* Memory Barrier */
5205 qla2x00_start_iocbs(vha, vha->req);
5210 * This routine is used to allocate a command for either a QFull condition
5211 * (ie reply SAM_STAT_BUSY) or to terminate an exchange that did not go
5215 qlt_alloc_qfull_cmd(struct scsi_qla_host *vha,
5216 struct atio_from_isp *atio, uint16_t status, int qfull)
5218 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
5219 struct qla_hw_data *ha = vha->hw;
5220 struct qla_tgt_sess *sess;
5221 struct se_session *se_sess;
5222 struct qla_tgt_cmd *cmd;
5225 if (unlikely(tgt->tgt_stop)) {
5226 ql_dbg(ql_dbg_io, vha, 0x300a,
5227 "New command while device %p is shutting down\n", tgt);
5231 if ((vha->hw->tgt.num_qfull_cmds_alloc + 1) > MAX_QFULL_CMDS_ALLOC) {
5232 vha->hw->tgt.num_qfull_cmds_dropped++;
5233 if (vha->hw->tgt.num_qfull_cmds_dropped >
5234 vha->qla_stats.stat_max_qfull_cmds_dropped)
5235 vha->qla_stats.stat_max_qfull_cmds_dropped =
5236 vha->hw->tgt.num_qfull_cmds_dropped;
5238 ql_dbg(ql_dbg_io, vha, 0x3068,
5239 "qla_target(%d): %s: QFull CMD dropped[%d]\n",
5240 vha->vp_idx, __func__,
5241 vha->hw->tgt.num_qfull_cmds_dropped);
5243 qlt_chk_exch_leak_thresh_hold(vha);
5247 sess = ha->tgt.tgt_ops->find_sess_by_s_id
5248 (vha, atio->u.isp24.fcp_hdr.s_id);
5252 se_sess = sess->se_sess;
5254 tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING);
5258 cmd = &((struct qla_tgt_cmd *)se_sess->sess_cmd_map)[tag];
5260 ql_dbg(ql_dbg_io, vha, 0x3009,
5261 "qla_target(%d): %s: Allocation of cmd failed\n",
5262 vha->vp_idx, __func__);
5264 vha->hw->tgt.num_qfull_cmds_dropped++;
5265 if (vha->hw->tgt.num_qfull_cmds_dropped >
5266 vha->qla_stats.stat_max_qfull_cmds_dropped)
5267 vha->qla_stats.stat_max_qfull_cmds_dropped =
5268 vha->hw->tgt.num_qfull_cmds_dropped;
5270 qlt_chk_exch_leak_thresh_hold(vha);
5274 memset(cmd, 0, sizeof(struct qla_tgt_cmd));
5276 qlt_incr_num_pend_cmds(vha);
5277 INIT_LIST_HEAD(&cmd->cmd_list);
5278 memcpy(&cmd->atio, atio, sizeof(*atio));
5280 cmd->tgt = vha->vha_tgt.qla_tgt;
5282 cmd->reset_count = vha->hw->chip_reset;
5287 /* NOTE: borrowing the state field to carry the status */
5288 cmd->state = status;
5290 cmd->term_exchg = 1;
5292 list_add_tail(&cmd->cmd_list, &vha->hw->tgt.q_full_list);
5294 vha->hw->tgt.num_qfull_cmds_alloc++;
5295 if (vha->hw->tgt.num_qfull_cmds_alloc >
5296 vha->qla_stats.stat_max_qfull_cmds_alloc)
5297 vha->qla_stats.stat_max_qfull_cmds_alloc =
5298 vha->hw->tgt.num_qfull_cmds_alloc;
5302 qlt_free_qfull_cmds(struct scsi_qla_host *vha)
5304 struct qla_hw_data *ha = vha->hw;
5305 unsigned long flags;
5306 struct qla_tgt_cmd *cmd, *tcmd;
5307 struct list_head free_list;
5310 if (list_empty(&ha->tgt.q_full_list))
5313 INIT_LIST_HEAD(&free_list);
5315 spin_lock_irqsave(&vha->hw->hardware_lock, flags);
5317 if (list_empty(&ha->tgt.q_full_list)) {
5318 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
5322 list_for_each_entry_safe(cmd, tcmd, &ha->tgt.q_full_list, cmd_list) {
5324 /* cmd->state is a borrowed field to hold status */
5325 rc = __qlt_send_busy(vha, &cmd->atio, cmd->state);
5326 else if (cmd->term_exchg)
5327 rc = __qlt_send_term_exchange(vha, NULL, &cmd->atio);
5333 ql_dbg(ql_dbg_io, vha, 0x3006,
5334 "%s: busy sent for ox_id[%04x]\n", __func__,
5335 be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id));
5336 else if (cmd->term_exchg)
5337 ql_dbg(ql_dbg_io, vha, 0x3007,
5338 "%s: Term exchg sent for ox_id[%04x]\n", __func__,
5339 be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id));
5341 ql_dbg(ql_dbg_io, vha, 0x3008,
5342 "%s: Unexpected cmd in QFull list %p\n", __func__,
5345 list_del(&cmd->cmd_list);
5346 list_add_tail(&cmd->cmd_list, &free_list);
5348 /* piggy back on hardware_lock for protection */
5349 vha->hw->tgt.num_qfull_cmds_alloc--;
5351 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
5355 list_for_each_entry_safe(cmd, tcmd, &free_list, cmd_list) {
5356 list_del(&cmd->cmd_list);
5357 /* This cmd was never sent to TCM. There is no need
5358 * to schedule free or call free_cmd
5366 qlt_send_busy(struct scsi_qla_host *vha,
5367 struct atio_from_isp *atio, uint16_t status)
5371 rc = __qlt_send_busy(vha, atio, status);
5373 qlt_alloc_qfull_cmd(vha, atio, status, 1);
5377 qlt_chk_qfull_thresh_hold(struct scsi_qla_host *vha,
5378 struct atio_from_isp *atio, bool ha_locked)
5380 struct qla_hw_data *ha = vha->hw;
5382 unsigned long flags;
5384 if (ha->tgt.num_pend_cmds < Q_FULL_THRESH_HOLD(ha))
5388 spin_lock_irqsave(&ha->hardware_lock, flags);
5389 status = temp_sam_status;
5390 qlt_send_busy(vha, atio, status);
5392 spin_unlock_irqrestore(&ha->hardware_lock, flags);
5397 /* ha->hardware_lock supposed to be held on entry */
5398 /* called via callback from qla2xxx */
5399 static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
5400 struct atio_from_isp *atio, uint8_t ha_locked)
5402 struct qla_hw_data *ha = vha->hw;
5403 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
5405 unsigned long flags;
5407 if (unlikely(tgt == NULL)) {
5408 ql_dbg(ql_dbg_io, vha, 0x3064,
5409 "ATIO pkt, but no tgt (ha %p)", ha);
5413 * In tgt_stop mode we also should allow all requests to pass.
5414 * Otherwise, some commands can stuck.
5417 tgt->atio_irq_cmd_count++;
5419 switch (atio->u.raw.entry_type) {
5421 if (unlikely(atio->u.isp24.exchange_addr ==
5422 ATIO_EXCHANGE_ADDRESS_UNKNOWN)) {
5423 ql_dbg(ql_dbg_io, vha, 0x3065,
5424 "qla_target(%d): ATIO_TYPE7 "
5425 "received with UNKNOWN exchange address, "
5426 "sending QUEUE_FULL\n", vha->vp_idx);
5428 spin_lock_irqsave(&ha->hardware_lock, flags);
5429 qlt_send_busy(vha, atio, SAM_STAT_TASK_SET_FULL);
5431 spin_unlock_irqrestore(&ha->hardware_lock, flags);
5437 if (likely(atio->u.isp24.fcp_cmnd.task_mgmt_flags == 0)) {
5438 rc = qlt_chk_qfull_thresh_hold(vha, atio, ha_locked);
5440 tgt->atio_irq_cmd_count--;
5443 rc = qlt_handle_cmd_for_atio(vha, atio);
5445 rc = qlt_handle_task_mgmt(vha, atio);
5447 if (unlikely(rc != 0)) {
5451 (&ha->hardware_lock, flags);
5453 #if 1 /* With TERM EXCHANGE some FC cards refuse to boot */
5454 qlt_send_busy(vha, atio, SAM_STAT_BUSY);
5456 qlt_send_term_exchange(vha, NULL, atio, 1, 0);
5460 spin_unlock_irqrestore
5461 (&ha->hardware_lock, flags);
5464 if (tgt->tgt_stop) {
5465 ql_dbg(ql_dbg_tgt, vha, 0xe059,
5466 "qla_target: Unable to send "
5467 "command to target for req, "
5470 ql_dbg(ql_dbg_tgt, vha, 0xe05a,
5471 "qla_target(%d): Unable to send "
5472 "command to target, sending BUSY "
5473 "status.\n", vha->vp_idx);
5476 &ha->hardware_lock, flags);
5477 qlt_send_busy(vha, atio, SAM_STAT_BUSY);
5479 spin_unlock_irqrestore(
5480 &ha->hardware_lock, flags);
5486 case IMMED_NOTIFY_TYPE:
5488 if (unlikely(atio->u.isp2x.entry_status != 0)) {
5489 ql_dbg(ql_dbg_tgt, vha, 0xe05b,
5490 "qla_target(%d): Received ATIO packet %x "
5491 "with error status %x\n", vha->vp_idx,
5492 atio->u.raw.entry_type,
5493 atio->u.isp2x.entry_status);
5496 ql_dbg(ql_dbg_tgt, vha, 0xe02e, "%s", "IMMED_NOTIFY ATIO");
5499 spin_lock_irqsave(&ha->hardware_lock, flags);
5500 qlt_handle_imm_notify(vha, (struct imm_ntfy_from_isp *)atio);
5502 spin_unlock_irqrestore(&ha->hardware_lock, flags);
5507 ql_dbg(ql_dbg_tgt, vha, 0xe05c,
5508 "qla_target(%d): Received unknown ATIO atio "
5509 "type %x\n", vha->vp_idx, atio->u.raw.entry_type);
5513 tgt->atio_irq_cmd_count--;
5516 /* ha->hardware_lock supposed to be held on entry */
5517 /* called via callback from qla2xxx */
5518 static void qlt_response_pkt(struct scsi_qla_host *vha, response_t *pkt)
5520 struct qla_hw_data *ha = vha->hw;
5521 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
5523 if (unlikely(tgt == NULL)) {
5524 ql_dbg(ql_dbg_tgt, vha, 0xe05d,
5525 "qla_target(%d): Response pkt %x received, but no "
5526 "tgt (ha %p)\n", vha->vp_idx, pkt->entry_type, ha);
5531 * In tgt_stop mode we also should allow all requests to pass.
5532 * Otherwise, some commands can stuck.
5535 tgt->irq_cmd_count++;
5537 switch (pkt->entry_type) {
5541 struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt;
5542 qlt_do_ctio_completion(vha, entry->handle,
5543 le16_to_cpu(entry->status)|(pkt->entry_status << 16),
5548 case ACCEPT_TGT_IO_TYPE:
5550 struct atio_from_isp *atio = (struct atio_from_isp *)pkt;
5552 if (atio->u.isp2x.status !=
5553 cpu_to_le16(ATIO_CDB_VALID)) {
5554 ql_dbg(ql_dbg_tgt, vha, 0xe05e,
5555 "qla_target(%d): ATIO with error "
5556 "status %x received\n", vha->vp_idx,
5557 le16_to_cpu(atio->u.isp2x.status));
5561 rc = qlt_chk_qfull_thresh_hold(vha, atio, true);
5563 tgt->irq_cmd_count--;
5567 rc = qlt_handle_cmd_for_atio(vha, atio);
5568 if (unlikely(rc != 0)) {
5570 #if 1 /* With TERM EXCHANGE some FC cards refuse to boot */
5571 qlt_send_busy(vha, atio, 0);
5573 qlt_send_term_exchange(vha, NULL, atio, 1, 0);
5576 if (tgt->tgt_stop) {
5577 ql_dbg(ql_dbg_tgt, vha, 0xe05f,
5578 "qla_target: Unable to send "
5579 "command to target, sending TERM "
5580 "EXCHANGE for rsp\n");
5581 qlt_send_term_exchange(vha, NULL,
5584 ql_dbg(ql_dbg_tgt, vha, 0xe060,
5585 "qla_target(%d): Unable to send "
5586 "command to target, sending BUSY "
5587 "status\n", vha->vp_idx);
5588 qlt_send_busy(vha, atio, 0);
5595 case CONTINUE_TGT_IO_TYPE:
5597 struct ctio_to_2xxx *entry = (struct ctio_to_2xxx *)pkt;
5598 qlt_do_ctio_completion(vha, entry->handle,
5599 le16_to_cpu(entry->status)|(pkt->entry_status << 16),
5606 struct ctio_to_2xxx *entry = (struct ctio_to_2xxx *)pkt;
5607 qlt_do_ctio_completion(vha, entry->handle,
5608 le16_to_cpu(entry->status)|(pkt->entry_status << 16),
5613 case IMMED_NOTIFY_TYPE:
5614 ql_dbg(ql_dbg_tgt, vha, 0xe035, "%s", "IMMED_NOTIFY\n");
5615 qlt_handle_imm_notify(vha, (struct imm_ntfy_from_isp *)pkt);
5618 case NOTIFY_ACK_TYPE:
5619 if (tgt->notify_ack_expected > 0) {
5620 struct nack_to_isp *entry = (struct nack_to_isp *)pkt;
5621 ql_dbg(ql_dbg_tgt, vha, 0xe036,
5622 "NOTIFY_ACK seq %08x status %x\n",
5623 le16_to_cpu(entry->u.isp2x.seq_id),
5624 le16_to_cpu(entry->u.isp2x.status));
5625 tgt->notify_ack_expected--;
5626 if (entry->u.isp2x.status !=
5627 cpu_to_le16(NOTIFY_ACK_SUCCESS)) {
5628 ql_dbg(ql_dbg_tgt, vha, 0xe061,
5629 "qla_target(%d): NOTIFY_ACK "
5630 "failed %x\n", vha->vp_idx,
5631 le16_to_cpu(entry->u.isp2x.status));
5634 ql_dbg(ql_dbg_tgt, vha, 0xe062,
5635 "qla_target(%d): Unexpected NOTIFY_ACK received\n",
5640 case ABTS_RECV_24XX:
5641 ql_dbg(ql_dbg_tgt, vha, 0xe037,
5642 "ABTS_RECV_24XX: instance %d\n", vha->vp_idx);
5643 qlt_24xx_handle_abts(vha, (struct abts_recv_from_24xx *)pkt);
5646 case ABTS_RESP_24XX:
5647 if (tgt->abts_resp_expected > 0) {
5648 struct abts_resp_from_24xx_fw *entry =
5649 (struct abts_resp_from_24xx_fw *)pkt;
5650 ql_dbg(ql_dbg_tgt, vha, 0xe038,
5651 "ABTS_RESP_24XX: compl_status %x\n",
5652 entry->compl_status);
5653 tgt->abts_resp_expected--;
5654 if (le16_to_cpu(entry->compl_status) !=
5655 ABTS_RESP_COMPL_SUCCESS) {
5656 if ((entry->error_subcode1 == 0x1E) &&
5657 (entry->error_subcode2 == 0)) {
5659 * We've got a race here: aborted
5660 * exchange not terminated, i.e.
5661 * response for the aborted command was
5662 * sent between the abort request was
5663 * received and processed.
5664 * Unfortunately, the firmware has a
5665 * silly requirement that all aborted
5666 * exchanges must be explicitely
5667 * terminated, otherwise it refuses to
5668 * send responses for the abort
5669 * requests. So, we have to
5670 * (re)terminate the exchange and retry
5671 * the abort response.
5673 qlt_24xx_retry_term_exchange(vha,
5676 ql_dbg(ql_dbg_tgt, vha, 0xe063,
5677 "qla_target(%d): ABTS_RESP_24XX "
5678 "failed %x (subcode %x:%x)",
5679 vha->vp_idx, entry->compl_status,
5680 entry->error_subcode1,
5681 entry->error_subcode2);
5684 ql_dbg(ql_dbg_tgt, vha, 0xe064,
5685 "qla_target(%d): Unexpected ABTS_RESP_24XX "
5686 "received\n", vha->vp_idx);
5691 ql_dbg(ql_dbg_tgt, vha, 0xe065,
5692 "qla_target(%d): Received unknown response pkt "
5693 "type %x\n", vha->vp_idx, pkt->entry_type);
5697 tgt->irq_cmd_count--;
5701 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
5703 void qlt_async_event(uint16_t code, struct scsi_qla_host *vha,
5706 struct qla_hw_data *ha = vha->hw;
5707 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
5710 if (!ha->tgt.tgt_ops)
5713 if (unlikely(tgt == NULL)) {
5714 ql_dbg(ql_dbg_tgt, vha, 0xe03a,
5715 "ASYNC EVENT %#x, but no tgt (ha %p)\n", code, ha);
5719 if (((code == MBA_POINT_TO_POINT) || (code == MBA_CHG_IN_CONNECTION)) &&
5723 * In tgt_stop mode we also should allow all requests to pass.
5724 * Otherwise, some commands can stuck.
5727 tgt->irq_cmd_count++;
5730 case MBA_RESET: /* Reset */
5731 case MBA_SYSTEM_ERR: /* System Error */
5732 case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */
5733 case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */
5734 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03a,
5735 "qla_target(%d): System error async event %#x "
5736 "occurred", vha->vp_idx, code);
5738 case MBA_WAKEUP_THRES: /* Request Queue Wake-up. */
5739 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
5744 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03b,
5745 "qla_target(%d): Async LOOP_UP occurred "
5746 "(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx,
5747 le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]),
5748 le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));
5749 if (tgt->link_reinit_iocb_pending) {
5750 qlt_send_notify_ack(vha, (void *)&tgt->link_reinit_iocb,
5752 tgt->link_reinit_iocb_pending = 0;
5757 case MBA_LIP_OCCURRED:
5760 case MBA_RSCN_UPDATE:
5761 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03c,
5762 "qla_target(%d): Async event %#x occurred "
5763 "(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx, code,
5764 le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]),
5765 le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));
5768 case MBA_PORT_UPDATE:
5769 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03d,
5770 "qla_target(%d): Port update async event %#x "
5771 "occurred: updating the ports database (m[0]=%x, m[1]=%x, "
5772 "m[2]=%x, m[3]=%x)", vha->vp_idx, code,
5773 le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]),
5774 le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));
5776 login_code = le16_to_cpu(mailbox[2]);
5777 if (login_code == 0x4)
5778 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03e,
5779 "Async MB 2: Got PLOGI Complete\n");
5780 else if (login_code == 0x7)
5781 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03f,
5782 "Async MB 2: Port Logged Out\n");
5789 tgt->irq_cmd_count--;
5792 static fc_port_t *qlt_get_port_database(struct scsi_qla_host *vha,
5798 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
5800 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06f,
5801 "qla_target(%d): Allocation of tmp FC port failed",
5806 fcport->loop_id = loop_id;
5808 rc = qla2x00_get_port_database(vha, fcport, 0);
5809 if (rc != QLA_SUCCESS) {
5810 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf070,
5811 "qla_target(%d): Failed to retrieve fcport "
5812 "information -- get_port_database() returned %x "
5813 "(loop_id=0x%04x)", vha->vp_idx, rc, loop_id);
5821 /* Must be called under tgt_mutex */
5822 static struct qla_tgt_sess *qlt_make_local_sess(struct scsi_qla_host *vha,
5825 struct qla_tgt_sess *sess = NULL;
5826 fc_port_t *fcport = NULL;
5827 int rc, global_resets;
5828 uint16_t loop_id = 0;
5830 mutex_lock(&vha->vha_tgt.tgt_mutex);
5834 atomic_read(&vha->vha_tgt.qla_tgt->tgt_global_resets_count);
5836 rc = qla24xx_get_loop_id(vha, s_id, &loop_id);
5838 mutex_unlock(&vha->vha_tgt.tgt_mutex);
5840 if ((s_id[0] == 0xFF) &&
5841 (s_id[1] == 0xFC)) {
5843 * This is Domain Controller, so it should be
5844 * OK to drop SCSI commands from it.
5846 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf042,
5847 "Unable to find initiator with S_ID %x:%x:%x",
5848 s_id[0], s_id[1], s_id[2]);
5850 ql_log(ql_log_info, vha, 0xf071,
5851 "qla_target(%d): Unable to find "
5852 "initiator with S_ID %x:%x:%x",
5853 vha->vp_idx, s_id[0], s_id[1],
5856 if (rc == -ENOENT) {
5857 qlt_port_logo_t logo;
5858 sid_to_portid(s_id, &logo.id);
5860 qlt_send_first_logo(vha, &logo);
5866 fcport = qlt_get_port_database(vha, loop_id);
5868 mutex_unlock(&vha->vha_tgt.tgt_mutex);
5872 if (global_resets !=
5873 atomic_read(&vha->vha_tgt.qla_tgt->tgt_global_resets_count)) {
5874 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf043,
5875 "qla_target(%d): global reset during session discovery "
5876 "(counter was %d, new %d), retrying", vha->vp_idx,
5878 atomic_read(&vha->vha_tgt.
5879 qla_tgt->tgt_global_resets_count));
5883 sess = qlt_create_sess(vha, fcport, true);
5885 mutex_unlock(&vha->vha_tgt.tgt_mutex);
5891 static void qlt_abort_work(struct qla_tgt *tgt,
5892 struct qla_tgt_sess_work_param *prm)
5894 struct scsi_qla_host *vha = tgt->vha;
5895 struct qla_hw_data *ha = vha->hw;
5896 struct qla_tgt_sess *sess = NULL;
5897 unsigned long flags = 0, flags2 = 0;
5901 spin_lock_irqsave(&ha->tgt.sess_lock, flags2);
5906 s_id[0] = prm->abts.fcp_hdr_le.s_id[2];
5907 s_id[1] = prm->abts.fcp_hdr_le.s_id[1];
5908 s_id[2] = prm->abts.fcp_hdr_le.s_id[0];
5910 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id);
5912 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2);
5914 sess = qlt_make_local_sess(vha, s_id);
5915 /* sess has got an extra creation ref */
5917 spin_lock_irqsave(&ha->tgt.sess_lock, flags2);
5921 if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
5926 kref_get(&sess->sess_kref);
5929 spin_lock_irqsave(&ha->hardware_lock, flags);
5934 rc = __qlt_24xx_handle_abts(vha, &prm->abts, sess);
5937 spin_unlock_irqrestore(&ha->hardware_lock, flags);
5940 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2);
5944 spin_lock_irqsave(&ha->hardware_lock, flags);
5947 qlt_24xx_send_abts_resp(vha, &prm->abts, FCP_TMF_REJECTED, false);
5948 spin_unlock_irqrestore(&ha->hardware_lock, flags);
5951 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2);
5954 static void qlt_tmr_work(struct qla_tgt *tgt,
5955 struct qla_tgt_sess_work_param *prm)
5957 struct atio_from_isp *a = &prm->tm_iocb2;
5958 struct scsi_qla_host *vha = tgt->vha;
5959 struct qla_hw_data *ha = vha->hw;
5960 struct qla_tgt_sess *sess = NULL;
5961 unsigned long flags;
5962 uint8_t *s_id = NULL; /* to hide compiler warnings */
5964 uint32_t lun, unpacked_lun;
5968 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
5973 s_id = prm->tm_iocb2.u.isp24.fcp_hdr.s_id;
5974 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id);
5976 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
5978 sess = qlt_make_local_sess(vha, s_id);
5979 /* sess has got an extra creation ref */
5981 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
5985 if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
5990 kref_get(&sess->sess_kref);
5994 lun = a->u.isp24.fcp_cmnd.lun;
5995 fn = a->u.isp24.fcp_cmnd.task_mgmt_flags;
5996 unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun);
5998 rc = qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0);
6003 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
6007 qlt_send_term_exchange(vha, NULL, &prm->tm_iocb2, 1, 0);
6009 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
6012 static void qlt_sess_work_fn(struct work_struct *work)
6014 struct qla_tgt *tgt = container_of(work, struct qla_tgt, sess_work);
6015 struct scsi_qla_host *vha = tgt->vha;
6016 unsigned long flags;
6018 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf000, "Sess work (tgt %p)", tgt);
6020 spin_lock_irqsave(&tgt->sess_work_lock, flags);
6021 while (!list_empty(&tgt->sess_works_list)) {
6022 struct qla_tgt_sess_work_param *prm = list_entry(
6023 tgt->sess_works_list.next, typeof(*prm),
6024 sess_works_list_entry);
6027 * This work can be scheduled on several CPUs at time, so we
6028 * must delete the entry to eliminate double processing
6030 list_del(&prm->sess_works_list_entry);
6032 spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
6034 switch (prm->type) {
6035 case QLA_TGT_SESS_WORK_ABORT:
6036 qlt_abort_work(tgt, prm);
6038 case QLA_TGT_SESS_WORK_TM:
6039 qlt_tmr_work(tgt, prm);
6046 spin_lock_irqsave(&tgt->sess_work_lock, flags);
6050 spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
6053 /* Must be called under tgt_host_action_mutex */
6054 int qlt_add_target(struct qla_hw_data *ha, struct scsi_qla_host *base_vha)
6056 struct qla_tgt *tgt;
6058 if (!QLA_TGT_MODE_ENABLED())
6061 if (!IS_TGT_MODE_CAPABLE(ha)) {
6062 ql_log(ql_log_warn, base_vha, 0xe070,
6063 "This adapter does not support target mode.\n");
6067 ql_dbg(ql_dbg_tgt, base_vha, 0xe03b,
6068 "Registering target for host %ld(%p).\n", base_vha->host_no, ha);
6070 BUG_ON(base_vha->vha_tgt.qla_tgt != NULL);
6072 tgt = kzalloc(sizeof(struct qla_tgt), GFP_KERNEL);
6074 ql_dbg(ql_dbg_tgt, base_vha, 0xe066,
6075 "Unable to allocate struct qla_tgt\n");
6079 if (!(base_vha->host->hostt->supported_mode & MODE_TARGET))
6080 base_vha->host->hostt->supported_mode |= MODE_TARGET;
6083 tgt->vha = base_vha;
6084 init_waitqueue_head(&tgt->waitQ);
6085 INIT_LIST_HEAD(&tgt->sess_list);
6086 INIT_LIST_HEAD(&tgt->del_sess_list);
6087 INIT_DELAYED_WORK(&tgt->sess_del_work,
6088 (void (*)(struct work_struct *))qlt_del_sess_work_fn);
6089 spin_lock_init(&tgt->sess_work_lock);
6090 INIT_WORK(&tgt->sess_work, qlt_sess_work_fn);
6091 INIT_LIST_HEAD(&tgt->sess_works_list);
6092 spin_lock_init(&tgt->srr_lock);
6093 INIT_LIST_HEAD(&tgt->srr_ctio_list);
6094 INIT_LIST_HEAD(&tgt->srr_imm_list);
6095 INIT_WORK(&tgt->srr_work, qlt_handle_srr_work);
6096 atomic_set(&tgt->tgt_global_resets_count, 0);
6098 base_vha->vha_tgt.qla_tgt = tgt;
6100 ql_dbg(ql_dbg_tgt, base_vha, 0xe067,
6101 "qla_target(%d): using 64 Bit PCI addressing",
6103 tgt->tgt_enable_64bit_addr = 1;
6105 tgt->sg_tablesize = QLA_TGT_MAX_SG_24XX(base_vha->req->length - 3);
6106 tgt->datasegs_per_cmd = QLA_TGT_DATASEGS_PER_CMD_24XX;
6107 tgt->datasegs_per_cont = QLA_TGT_DATASEGS_PER_CONT_24XX;
6109 if (base_vha->fc_vport)
6112 mutex_lock(&qla_tgt_mutex);
6113 list_add_tail(&tgt->tgt_list_entry, &qla_tgt_glist);
6114 mutex_unlock(&qla_tgt_mutex);
6119 /* Must be called under tgt_host_action_mutex */
6120 int qlt_remove_target(struct qla_hw_data *ha, struct scsi_qla_host *vha)
6122 if (!vha->vha_tgt.qla_tgt)
6125 if (vha->fc_vport) {
6126 qlt_release(vha->vha_tgt.qla_tgt);
6130 /* free left over qfull cmds */
6131 qlt_init_term_exchange(vha);
6133 mutex_lock(&qla_tgt_mutex);
6134 list_del(&vha->vha_tgt.qla_tgt->tgt_list_entry);
6135 mutex_unlock(&qla_tgt_mutex);
6137 ql_dbg(ql_dbg_tgt, vha, 0xe03c, "Unregistering target for host %ld(%p)",
6139 qlt_release(vha->vha_tgt.qla_tgt);
6144 static void qlt_lport_dump(struct scsi_qla_host *vha, u64 wwpn,
6149 pr_debug("qla2xxx HW vha->node_name: ");
6150 for (i = 0; i < WWN_SIZE; i++)
6151 pr_debug("%02x ", vha->node_name[i]);
6153 pr_debug("qla2xxx HW vha->port_name: ");
6154 for (i = 0; i < WWN_SIZE; i++)
6155 pr_debug("%02x ", vha->port_name[i]);
6158 pr_debug("qla2xxx passed configfs WWPN: ");
6159 put_unaligned_be64(wwpn, b);
6160 for (i = 0; i < WWN_SIZE; i++)
6161 pr_debug("%02x ", b[i]);
6166 * qla_tgt_lport_register - register lport with external module
6168 * @qla_tgt_ops: Pointer for tcm_qla2xxx qla_tgt_ops
6169 * @wwpn: Passwd FC target WWPN
6170 * @callback: lport initialization callback for tcm_qla2xxx code
6171 * @target_lport_ptr: pointer for tcm_qla2xxx specific lport data
6173 int qlt_lport_register(void *target_lport_ptr, u64 phys_wwpn,
6174 u64 npiv_wwpn, u64 npiv_wwnn,
6175 int (*callback)(struct scsi_qla_host *, void *, u64, u64))
6177 struct qla_tgt *tgt;
6178 struct scsi_qla_host *vha;
6179 struct qla_hw_data *ha;
6180 struct Scsi_Host *host;
6181 unsigned long flags;
6185 mutex_lock(&qla_tgt_mutex);
6186 list_for_each_entry(tgt, &qla_tgt_glist, tgt_list_entry) {
6194 if (!(host->hostt->supported_mode & MODE_TARGET))
6197 spin_lock_irqsave(&ha->hardware_lock, flags);
6198 if ((!npiv_wwpn || !npiv_wwnn) && host->active_mode & MODE_TARGET) {
6199 pr_debug("MODE_TARGET already active on qla2xxx(%d)\n",
6201 spin_unlock_irqrestore(&ha->hardware_lock, flags);
6204 if (tgt->tgt_stop) {
6205 pr_debug("MODE_TARGET in shutdown on qla2xxx(%d)\n",
6207 spin_unlock_irqrestore(&ha->hardware_lock, flags);
6210 spin_unlock_irqrestore(&ha->hardware_lock, flags);
6212 if (!scsi_host_get(host)) {
6213 ql_dbg(ql_dbg_tgt, vha, 0xe068,
6214 "Unable to scsi_host_get() for"
6215 " qla2xxx scsi_host\n");
6218 qlt_lport_dump(vha, phys_wwpn, b);
6220 if (memcmp(vha->port_name, b, WWN_SIZE)) {
6221 scsi_host_put(host);
6224 rc = (*callback)(vha, target_lport_ptr, npiv_wwpn, npiv_wwnn);
6226 scsi_host_put(host);
6228 mutex_unlock(&qla_tgt_mutex);
6231 mutex_unlock(&qla_tgt_mutex);
6235 EXPORT_SYMBOL(qlt_lport_register);
6238 * qla_tgt_lport_deregister - Degister lport
6240 * @vha: Registered scsi_qla_host pointer
6242 void qlt_lport_deregister(struct scsi_qla_host *vha)
6244 struct qla_hw_data *ha = vha->hw;
6245 struct Scsi_Host *sh = vha->host;
6247 * Clear the target_lport_ptr qla_target_template pointer in qla_hw_data
6249 vha->vha_tgt.target_lport_ptr = NULL;
6250 ha->tgt.tgt_ops = NULL;
6252 * Release the Scsi_Host reference for the underlying qla2xxx host
6256 EXPORT_SYMBOL(qlt_lport_deregister);
6258 /* Must be called under HW lock */
6259 static void qlt_set_mode(struct scsi_qla_host *vha)
6261 struct qla_hw_data *ha = vha->hw;
6263 switch (ql2x_ini_mode) {
6264 case QLA2XXX_INI_MODE_DISABLED:
6265 case QLA2XXX_INI_MODE_EXCLUSIVE:
6266 vha->host->active_mode = MODE_TARGET;
6268 case QLA2XXX_INI_MODE_ENABLED:
6269 vha->host->active_mode |= MODE_TARGET;
6275 if (ha->tgt.ini_mode_force_reverse)
6276 qla_reverse_ini_mode(vha);
6279 /* Must be called under HW lock */
6280 static void qlt_clear_mode(struct scsi_qla_host *vha)
6282 struct qla_hw_data *ha = vha->hw;
6284 switch (ql2x_ini_mode) {
6285 case QLA2XXX_INI_MODE_DISABLED:
6286 vha->host->active_mode = MODE_UNKNOWN;
6288 case QLA2XXX_INI_MODE_EXCLUSIVE:
6289 vha->host->active_mode = MODE_INITIATOR;
6291 case QLA2XXX_INI_MODE_ENABLED:
6292 vha->host->active_mode &= ~MODE_TARGET;
6298 if (ha->tgt.ini_mode_force_reverse)
6299 qla_reverse_ini_mode(vha);
6303 * qla_tgt_enable_vha - NO LOCK HELD
6305 * host_reset, bring up w/ Target Mode Enabled
6308 qlt_enable_vha(struct scsi_qla_host *vha)
6310 struct qla_hw_data *ha = vha->hw;
6311 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
6312 unsigned long flags;
6313 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
6314 int rspq_ent = QLA83XX_RSPQ_MSIX_ENTRY_NUMBER;
6317 ql_dbg(ql_dbg_tgt, vha, 0xe069,
6318 "Unable to locate qla_tgt pointer from"
6319 " struct qla_hw_data\n");
6324 spin_lock_irqsave(&ha->hardware_lock, flags);
6325 tgt->tgt_stopped = 0;
6327 spin_unlock_irqrestore(&ha->hardware_lock, flags);
6330 qla24xx_disable_vp(vha);
6331 qla24xx_enable_vp(vha);
6333 if (ha->msix_entries) {
6334 ql_dbg(ql_dbg_tgt, vha, 0xffff,
6335 "%s: host%ld : vector %d cpu %d\n",
6336 __func__, vha->host_no,
6337 ha->msix_entries[rspq_ent].vector,
6338 ha->msix_entries[rspq_ent].cpuid);
6340 ha->tgt.rspq_vector_cpuid =
6341 ha->msix_entries[rspq_ent].cpuid;
6344 set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags);
6345 qla2xxx_wake_dpc(base_vha);
6346 WARN_ON_ONCE(qla2x00_wait_for_hba_online(base_vha) !=
6350 EXPORT_SYMBOL(qlt_enable_vha);
6353 * qla_tgt_disable_vha - NO LOCK HELD
6355 * Disable Target Mode and reset the adapter
6357 static void qlt_disable_vha(struct scsi_qla_host *vha)
6359 struct qla_hw_data *ha = vha->hw;
6360 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
6361 unsigned long flags;
6364 ql_dbg(ql_dbg_tgt, vha, 0xe06a,
6365 "Unable to locate qla_tgt pointer from"
6366 " struct qla_hw_data\n");
6371 spin_lock_irqsave(&ha->hardware_lock, flags);
6372 qlt_clear_mode(vha);
6373 spin_unlock_irqrestore(&ha->hardware_lock, flags);
6375 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
6376 qla2xxx_wake_dpc(vha);
6377 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS)
6378 ql_dbg(ql_dbg_tgt, vha, 0xe081,
6379 "qla2x00_wait_for_hba_online() failed\n");
6383 * Called from qla_init.c:qla24xx_vport_create() contex to setup
6384 * the target mode specific struct scsi_qla_host and struct qla_hw_data
6388 qlt_vport_create(struct scsi_qla_host *vha, struct qla_hw_data *ha)
6390 if (!qla_tgt_mode_enabled(vha))
6393 vha->vha_tgt.qla_tgt = NULL;
6395 mutex_init(&vha->vha_tgt.tgt_mutex);
6396 mutex_init(&vha->vha_tgt.tgt_host_action_mutex);
6398 qlt_clear_mode(vha);
6401 * NOTE: Currently the value is kept the same for <24xx and
6402 * >=24xx ISPs. If it is necessary to change it,
6403 * the check should be added for specific ISPs,
6404 * assigning the value appropriately.
6406 ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
6408 qlt_add_target(ha, vha);
6412 qlt_rff_id(struct scsi_qla_host *vha, struct ct_sns_req *ct_req)
6415 * FC-4 Feature bit 0 indicates target functionality to the name server.
6417 if (qla_tgt_mode_enabled(vha)) {
6418 if (qla_ini_mode_enabled(vha))
6419 ct_req->req.rff_id.fc4_feature = BIT_0 | BIT_1;
6421 ct_req->req.rff_id.fc4_feature = BIT_0;
6422 } else if (qla_ini_mode_enabled(vha)) {
6423 ct_req->req.rff_id.fc4_feature = BIT_1;
6428 * qlt_init_atio_q_entries() - Initializes ATIO queue entries.
6431 * Beginning of ATIO ring has initialization control block already built
6432 * by nvram config routine.
6434 * Returns 0 on success.
6437 qlt_init_atio_q_entries(struct scsi_qla_host *vha)
6439 struct qla_hw_data *ha = vha->hw;
6441 struct atio_from_isp *pkt = (struct atio_from_isp *)ha->tgt.atio_ring;
6443 if (!qla_tgt_mode_enabled(vha))
6446 for (cnt = 0; cnt < ha->tgt.atio_q_length; cnt++) {
6447 pkt->u.raw.signature = ATIO_PROCESSED;
6454 * qlt_24xx_process_atio_queue() - Process ATIO queue entries.
6455 * @ha: SCSI driver HA context
6458 qlt_24xx_process_atio_queue(struct scsi_qla_host *vha, uint8_t ha_locked)
6460 struct qla_hw_data *ha = vha->hw;
6461 struct atio_from_isp *pkt;
6464 if (!vha->flags.online)
6467 while ((ha->tgt.atio_ring_ptr->signature != ATIO_PROCESSED) ||
6468 fcpcmd_is_corrupted(ha->tgt.atio_ring_ptr)) {
6469 pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr;
6470 cnt = pkt->u.raw.entry_count;
6472 if (unlikely(fcpcmd_is_corrupted(ha->tgt.atio_ring_ptr))) {
6474 * This packet is corrupted. The header + payload
6475 * can not be trusted. There is no point in passing
6478 ql_log(ql_log_warn, vha, 0xffff,
6479 "corrupted fcp frame SID[%3phN] OXID[%04x] EXCG[%x] %64phN\n",
6480 pkt->u.isp24.fcp_hdr.s_id,
6481 be16_to_cpu(pkt->u.isp24.fcp_hdr.ox_id),
6482 le32_to_cpu(pkt->u.isp24.exchange_addr), pkt);
6484 adjust_corrupted_atio(pkt);
6485 qlt_send_term_exchange(vha, NULL, pkt, ha_locked, 0);
6487 qlt_24xx_atio_pkt_all_vps(vha,
6488 (struct atio_from_isp *)pkt, ha_locked);
6491 for (i = 0; i < cnt; i++) {
6492 ha->tgt.atio_ring_index++;
6493 if (ha->tgt.atio_ring_index == ha->tgt.atio_q_length) {
6494 ha->tgt.atio_ring_index = 0;
6495 ha->tgt.atio_ring_ptr = ha->tgt.atio_ring;
6497 ha->tgt.atio_ring_ptr++;
6499 pkt->u.raw.signature = ATIO_PROCESSED;
6500 pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr;
6505 /* Adjust ring index */
6506 WRT_REG_DWORD(ISP_ATIO_Q_OUT(vha), ha->tgt.atio_ring_index);
6507 RD_REG_DWORD_RELAXED(ISP_ATIO_Q_OUT(vha));
6511 qlt_24xx_config_rings(struct scsi_qla_host *vha)
6513 struct qla_hw_data *ha = vha->hw;
6514 if (!QLA_TGT_MODE_ENABLED())
6517 WRT_REG_DWORD(ISP_ATIO_Q_IN(vha), 0);
6518 WRT_REG_DWORD(ISP_ATIO_Q_OUT(vha), 0);
6519 RD_REG_DWORD(ISP_ATIO_Q_OUT(vha));
6521 if (IS_ATIO_MSIX_CAPABLE(ha)) {
6522 struct qla_msix_entry *msix = &ha->msix_entries[2];
6523 struct init_cb_24xx *icb = (struct init_cb_24xx *)ha->init_cb;
6525 icb->msix_atio = cpu_to_le16(msix->entry);
6526 ql_dbg(ql_dbg_init, vha, 0xf072,
6527 "Registering ICB vector 0x%x for atio que.\n",
6533 qlt_24xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_24xx *nv)
6535 struct qla_hw_data *ha = vha->hw;
6537 if (qla_tgt_mode_enabled(vha)) {
6538 if (!ha->tgt.saved_set) {
6539 /* We save only once */
6540 ha->tgt.saved_exchange_count = nv->exchange_count;
6541 ha->tgt.saved_firmware_options_1 =
6542 nv->firmware_options_1;
6543 ha->tgt.saved_firmware_options_2 =
6544 nv->firmware_options_2;
6545 ha->tgt.saved_firmware_options_3 =
6546 nv->firmware_options_3;
6547 ha->tgt.saved_set = 1;
6550 nv->exchange_count = cpu_to_le16(0xFFFF);
6552 /* Enable target mode */
6553 nv->firmware_options_1 |= cpu_to_le32(BIT_4);
6555 /* Disable ini mode, if requested */
6556 if (!qla_ini_mode_enabled(vha))
6557 nv->firmware_options_1 |= cpu_to_le32(BIT_5);
6559 /* Disable Full Login after LIP */
6560 nv->firmware_options_1 &= cpu_to_le32(~BIT_13);
6561 /* Enable initial LIP */
6562 nv->firmware_options_1 &= cpu_to_le32(~BIT_9);
6563 if (ql2xtgt_tape_enable)
6564 /* Enable FC Tape support */
6565 nv->firmware_options_2 |= cpu_to_le32(BIT_12);
6567 /* Disable FC Tape support */
6568 nv->firmware_options_2 &= cpu_to_le32(~BIT_12);
6570 /* Disable Full Login after LIP */
6571 nv->host_p &= cpu_to_le32(~BIT_10);
6572 /* Enable target PRLI control */
6573 nv->firmware_options_2 |= cpu_to_le32(BIT_14);
6575 if (ha->tgt.saved_set) {
6576 nv->exchange_count = ha->tgt.saved_exchange_count;
6577 nv->firmware_options_1 =
6578 ha->tgt.saved_firmware_options_1;
6579 nv->firmware_options_2 =
6580 ha->tgt.saved_firmware_options_2;
6581 nv->firmware_options_3 =
6582 ha->tgt.saved_firmware_options_3;
6587 /* out-of-order frames reassembly */
6588 nv->firmware_options_3 |= BIT_6|BIT_9;
6590 if (ha->tgt.enable_class_2) {
6591 if (vha->flags.init_done)
6592 fc_host_supported_classes(vha->host) =
6593 FC_COS_CLASS2 | FC_COS_CLASS3;
6595 nv->firmware_options_2 |= cpu_to_le32(BIT_8);
6597 if (vha->flags.init_done)
6598 fc_host_supported_classes(vha->host) = FC_COS_CLASS3;
6600 nv->firmware_options_2 &= ~cpu_to_le32(BIT_8);
6605 qlt_24xx_config_nvram_stage2(struct scsi_qla_host *vha,
6606 struct init_cb_24xx *icb)
6608 struct qla_hw_data *ha = vha->hw;
6610 if (!QLA_TGT_MODE_ENABLED())
6613 if (ha->tgt.node_name_set) {
6614 memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE);
6615 icb->firmware_options_1 |= cpu_to_le32(BIT_14);
6618 /* disable ZIO at start time. */
6619 if (!vha->flags.init_done) {
6621 tmp = le32_to_cpu(icb->firmware_options_2);
6622 tmp &= ~(BIT_3 | BIT_2 | BIT_1 | BIT_0);
6623 icb->firmware_options_2 = cpu_to_le32(tmp);
6628 qlt_81xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_81xx *nv)
6630 struct qla_hw_data *ha = vha->hw;
6632 if (!QLA_TGT_MODE_ENABLED())
6635 if (qla_tgt_mode_enabled(vha)) {
6636 if (!ha->tgt.saved_set) {
6637 /* We save only once */
6638 ha->tgt.saved_exchange_count = nv->exchange_count;
6639 ha->tgt.saved_firmware_options_1 =
6640 nv->firmware_options_1;
6641 ha->tgt.saved_firmware_options_2 =
6642 nv->firmware_options_2;
6643 ha->tgt.saved_firmware_options_3 =
6644 nv->firmware_options_3;
6645 ha->tgt.saved_set = 1;
6648 nv->exchange_count = cpu_to_le16(0xFFFF);
6650 /* Enable target mode */
6651 nv->firmware_options_1 |= cpu_to_le32(BIT_4);
6653 /* Disable ini mode, if requested */
6654 if (!qla_ini_mode_enabled(vha))
6655 nv->firmware_options_1 |= cpu_to_le32(BIT_5);
6657 /* Disable Full Login after LIP */
6658 nv->firmware_options_1 &= cpu_to_le32(~BIT_13);
6659 /* Enable initial LIP */
6660 nv->firmware_options_1 &= cpu_to_le32(~BIT_9);
6661 if (ql2xtgt_tape_enable)
6662 /* Enable FC tape support */
6663 nv->firmware_options_2 |= cpu_to_le32(BIT_12);
6665 /* Disable FC tape support */
6666 nv->firmware_options_2 &= cpu_to_le32(~BIT_12);
6668 /* Disable Full Login after LIP */
6669 nv->host_p &= cpu_to_le32(~BIT_10);
6670 /* Enable target PRLI control */
6671 nv->firmware_options_2 |= cpu_to_le32(BIT_14);
6673 if (ha->tgt.saved_set) {
6674 nv->exchange_count = ha->tgt.saved_exchange_count;
6675 nv->firmware_options_1 =
6676 ha->tgt.saved_firmware_options_1;
6677 nv->firmware_options_2 =
6678 ha->tgt.saved_firmware_options_2;
6679 nv->firmware_options_3 =
6680 ha->tgt.saved_firmware_options_3;
6685 /* out-of-order frames reassembly */
6686 nv->firmware_options_3 |= BIT_6|BIT_9;
6688 if (ha->tgt.enable_class_2) {
6689 if (vha->flags.init_done)
6690 fc_host_supported_classes(vha->host) =
6691 FC_COS_CLASS2 | FC_COS_CLASS3;
6693 nv->firmware_options_2 |= cpu_to_le32(BIT_8);
6695 if (vha->flags.init_done)
6696 fc_host_supported_classes(vha->host) = FC_COS_CLASS3;
6698 nv->firmware_options_2 &= ~cpu_to_le32(BIT_8);
6703 qlt_81xx_config_nvram_stage2(struct scsi_qla_host *vha,
6704 struct init_cb_81xx *icb)
6706 struct qla_hw_data *ha = vha->hw;
6708 if (!QLA_TGT_MODE_ENABLED())
6711 if (ha->tgt.node_name_set) {
6712 memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE);
6713 icb->firmware_options_1 |= cpu_to_le32(BIT_14);
6716 /* disable ZIO at start time. */
6717 if (!vha->flags.init_done) {
6719 tmp = le32_to_cpu(icb->firmware_options_2);
6720 tmp &= ~(BIT_3 | BIT_2 | BIT_1 | BIT_0);
6721 icb->firmware_options_2 = cpu_to_le32(tmp);
6727 qlt_83xx_iospace_config(struct qla_hw_data *ha)
6729 if (!QLA_TGT_MODE_ENABLED())
6732 ha->msix_count += 1; /* For ATIO Q */
6736 qlt_24xx_process_response_error(struct scsi_qla_host *vha,
6737 struct sts_entry_24xx *pkt)
6739 switch (pkt->entry_type) {
6740 case ABTS_RECV_24XX:
6741 case ABTS_RESP_24XX:
6743 case NOTIFY_ACK_TYPE:
6752 qlt_modify_vp_config(struct scsi_qla_host *vha,
6753 struct vp_config_entry_24xx *vpmod)
6755 if (qla_tgt_mode_enabled(vha))
6756 vpmod->options_idx1 &= ~BIT_5;
6757 /* Disable ini mode, if requested */
6758 if (!qla_ini_mode_enabled(vha))
6759 vpmod->options_idx1 &= ~BIT_4;
6763 qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha)
6765 if (!QLA_TGT_MODE_ENABLED())
6768 if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
6769 ISP_ATIO_Q_IN(base_vha) = &ha->mqiobase->isp25mq.atio_q_in;
6770 ISP_ATIO_Q_OUT(base_vha) = &ha->mqiobase->isp25mq.atio_q_out;
6772 ISP_ATIO_Q_IN(base_vha) = &ha->iobase->isp24.atio_q_in;
6773 ISP_ATIO_Q_OUT(base_vha) = &ha->iobase->isp24.atio_q_out;
6776 mutex_init(&base_vha->vha_tgt.tgt_mutex);
6777 mutex_init(&base_vha->vha_tgt.tgt_host_action_mutex);
6778 qlt_clear_mode(base_vha);
6782 qla83xx_msix_atio_q(int irq, void *dev_id)
6784 struct rsp_que *rsp;
6785 scsi_qla_host_t *vha;
6786 struct qla_hw_data *ha;
6787 unsigned long flags;
6789 rsp = (struct rsp_que *) dev_id;
6791 vha = pci_get_drvdata(ha->pdev);
6793 spin_lock_irqsave(&ha->tgt.atio_lock, flags);
6795 qlt_24xx_process_atio_queue(vha, 0);
6797 spin_unlock_irqrestore(&ha->tgt.atio_lock, flags);
6803 qlt_handle_abts_recv_work(struct work_struct *work)
6805 struct qla_tgt_sess_op *op = container_of(work,
6806 struct qla_tgt_sess_op, work);
6807 scsi_qla_host_t *vha = op->vha;
6808 struct qla_hw_data *ha = vha->hw;
6809 unsigned long flags;
6811 if (qla2x00_reset_active(vha) || (op->chip_reset != ha->chip_reset))
6814 spin_lock_irqsave(&ha->tgt.atio_lock, flags);
6815 qlt_24xx_process_atio_queue(vha, 0);
6816 spin_unlock_irqrestore(&ha->tgt.atio_lock, flags);
6818 spin_lock_irqsave(&ha->hardware_lock, flags);
6819 qlt_response_pkt_all_vps(vha, (response_t *)&op->atio);
6820 spin_unlock_irqrestore(&ha->hardware_lock, flags);
6826 qlt_handle_abts_recv(struct scsi_qla_host *vha, response_t *pkt)
6828 struct qla_tgt_sess_op *op;
6830 op = kzalloc(sizeof(*op), GFP_ATOMIC);
6833 /* do not reach for ATIO queue here. This is best effort err
6834 * recovery at this point.
6836 qlt_response_pkt_all_vps(vha, pkt);
6840 memcpy(&op->atio, pkt, sizeof(*pkt));
6842 op->chip_reset = vha->hw->chip_reset;
6843 INIT_WORK(&op->work, qlt_handle_abts_recv_work);
6844 queue_work(qla_tgt_wq, &op->work);
6849 qlt_mem_alloc(struct qla_hw_data *ha)
6851 if (!QLA_TGT_MODE_ENABLED())
6854 ha->tgt.tgt_vp_map = kzalloc(sizeof(struct qla_tgt_vp_map) *
6855 MAX_MULTI_ID_FABRIC, GFP_KERNEL);
6856 if (!ha->tgt.tgt_vp_map)
6859 ha->tgt.atio_ring = dma_alloc_coherent(&ha->pdev->dev,
6860 (ha->tgt.atio_q_length + 1) * sizeof(struct atio_from_isp),
6861 &ha->tgt.atio_dma, GFP_KERNEL);
6862 if (!ha->tgt.atio_ring) {
6863 kfree(ha->tgt.tgt_vp_map);
6870 qlt_mem_free(struct qla_hw_data *ha)
6872 if (!QLA_TGT_MODE_ENABLED())
6875 if (ha->tgt.atio_ring) {
6876 dma_free_coherent(&ha->pdev->dev, (ha->tgt.atio_q_length + 1) *
6877 sizeof(struct atio_from_isp), ha->tgt.atio_ring,
6880 kfree(ha->tgt.tgt_vp_map);
6883 /* vport_slock to be held by the caller */
6885 qlt_update_vp_map(struct scsi_qla_host *vha, int cmd)
6887 if (!QLA_TGT_MODE_ENABLED())
6892 vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = vha;
6895 vha->hw->tgt.tgt_vp_map[vha->d_id.b.al_pa].idx = vha->vp_idx;
6898 vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = NULL;
6901 vha->hw->tgt.tgt_vp_map[vha->d_id.b.al_pa].idx = 0;
6906 static int __init qlt_parse_ini_mode(void)
6908 if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_EXCLUSIVE) == 0)
6909 ql2x_ini_mode = QLA2XXX_INI_MODE_EXCLUSIVE;
6910 else if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_DISABLED) == 0)
6911 ql2x_ini_mode = QLA2XXX_INI_MODE_DISABLED;
6912 else if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_ENABLED) == 0)
6913 ql2x_ini_mode = QLA2XXX_INI_MODE_ENABLED;
6920 int __init qlt_init(void)
6924 if (!qlt_parse_ini_mode()) {
6925 ql_log(ql_log_fatal, NULL, 0xe06b,
6926 "qlt_parse_ini_mode() failed\n");
6930 if (!QLA_TGT_MODE_ENABLED())
6933 qla_tgt_mgmt_cmd_cachep = kmem_cache_create("qla_tgt_mgmt_cmd_cachep",
6934 sizeof(struct qla_tgt_mgmt_cmd), __alignof__(struct
6935 qla_tgt_mgmt_cmd), 0, NULL);
6936 if (!qla_tgt_mgmt_cmd_cachep) {
6937 ql_log(ql_log_fatal, NULL, 0xe06d,
6938 "kmem_cache_create for qla_tgt_mgmt_cmd_cachep failed\n");
6942 qla_tgt_plogi_cachep = kmem_cache_create("qla_tgt_plogi_cachep",
6943 sizeof(qlt_plogi_ack_t),
6944 __alignof__(qlt_plogi_ack_t),
6947 if (!qla_tgt_plogi_cachep) {
6948 ql_log(ql_log_fatal, NULL, 0xe06d,
6949 "kmem_cache_create for qla_tgt_plogi_cachep failed\n");
6951 goto out_mgmt_cmd_cachep;
6954 qla_tgt_mgmt_cmd_mempool = mempool_create(25, mempool_alloc_slab,
6955 mempool_free_slab, qla_tgt_mgmt_cmd_cachep);
6956 if (!qla_tgt_mgmt_cmd_mempool) {
6957 ql_log(ql_log_fatal, NULL, 0xe06e,
6958 "mempool_create for qla_tgt_mgmt_cmd_mempool failed\n");
6960 goto out_plogi_cachep;
6963 qla_tgt_wq = alloc_workqueue("qla_tgt_wq", 0, 0);
6965 ql_log(ql_log_fatal, NULL, 0xe06f,
6966 "alloc_workqueue for qla_tgt_wq failed\n");
6968 goto out_cmd_mempool;
6971 * Return 1 to signal that initiator-mode is being disabled
6973 return (ql2x_ini_mode == QLA2XXX_INI_MODE_DISABLED) ? 1 : 0;
6976 mempool_destroy(qla_tgt_mgmt_cmd_mempool);
6978 kmem_cache_destroy(qla_tgt_plogi_cachep);
6979 out_mgmt_cmd_cachep:
6980 kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep);
6986 if (!QLA_TGT_MODE_ENABLED())
6989 destroy_workqueue(qla_tgt_wq);
6990 mempool_destroy(qla_tgt_mgmt_cmd_mempool);
6991 kmem_cache_destroy(qla_tgt_plogi_cachep);
6992 kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep);