1 /*******************************************************************************
2 * Filename: target_core_device.c (based on iscsi_target_device.c)
4 * This file contains the TCM Virtual Device and Disk Transport
5 * agnostic related functions.
7 * (c) Copyright 2003-2013 Datera, Inc.
9 * Nicholas A. Bellinger <nab@kernel.org>
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
25 ******************************************************************************/
27 #include <linux/net.h>
28 #include <linux/string.h>
29 #include <linux/delay.h>
30 #include <linux/timer.h>
31 #include <linux/slab.h>
32 #include <linux/spinlock.h>
33 #include <linux/kthread.h>
35 #include <linux/export.h>
36 #include <linux/t10-pi.h>
37 #include <asm/unaligned.h>
40 #include <scsi/scsi_common.h>
41 #include <scsi/scsi_proto.h>
43 #include <target/target_core_base.h>
44 #include <target/target_core_backend.h>
45 #include <target/target_core_fabric.h>
47 #include "target_core_internal.h"
48 #include "target_core_alua.h"
49 #include "target_core_pr.h"
50 #include "target_core_ua.h"
52 static DEFINE_MUTEX(device_mutex);
53 static LIST_HEAD(device_list);
54 static DEFINE_IDR(devices_idr);
56 static struct se_hba *lun0_hba;
57 /* not static, needed by tpg.c */
58 struct se_device *g_lun0_dev;
61 transport_lookup_cmd_lun(struct se_cmd *se_cmd, u64 unpacked_lun)
63 struct se_lun *se_lun = NULL;
64 struct se_session *se_sess = se_cmd->se_sess;
65 struct se_node_acl *nacl = se_sess->se_node_acl;
66 struct se_dev_entry *deve;
67 sense_reason_t ret = TCM_NO_SENSE;
70 deve = target_nacl_find_deve(nacl, unpacked_lun);
72 atomic_long_inc(&deve->total_cmds);
74 if (se_cmd->data_direction == DMA_TO_DEVICE)
75 atomic_long_add(se_cmd->data_length,
77 else if (se_cmd->data_direction == DMA_FROM_DEVICE)
78 atomic_long_add(se_cmd->data_length,
81 se_lun = rcu_dereference(deve->se_lun);
83 if (!percpu_ref_tryget_live(&se_lun->lun_ref)) {
88 se_cmd->se_lun = se_lun;
89 se_cmd->pr_res_key = deve->pr_res_key;
90 se_cmd->orig_fe_lun = unpacked_lun;
91 se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
92 se_cmd->lun_ref_active = true;
94 if ((se_cmd->data_direction == DMA_TO_DEVICE) &&
95 deve->lun_access_ro) {
96 pr_err("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN"
97 " Access for 0x%08llx\n",
98 se_cmd->se_tfo->get_fabric_name(),
101 ret = TCM_WRITE_PROTECTED;
110 * Use the se_portal_group->tpg_virt_lun0 to allow for
111 * REPORT_LUNS, et al to be returned when no active
112 * MappedLUN=0 exists for this Initiator Port.
114 if (unpacked_lun != 0) {
115 pr_err("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
116 " Access for 0x%08llx\n",
117 se_cmd->se_tfo->get_fabric_name(),
119 return TCM_NON_EXISTENT_LUN;
122 se_lun = se_sess->se_tpg->tpg_virt_lun0;
123 se_cmd->se_lun = se_sess->se_tpg->tpg_virt_lun0;
124 se_cmd->orig_fe_lun = 0;
125 se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
127 percpu_ref_get(&se_lun->lun_ref);
128 se_cmd->lun_ref_active = true;
131 * Force WRITE PROTECT for virtual LUN 0
133 if ((se_cmd->data_direction != DMA_FROM_DEVICE) &&
134 (se_cmd->data_direction != DMA_NONE)) {
135 ret = TCM_WRITE_PROTECTED;
140 * RCU reference protected by percpu se_lun->lun_ref taken above that
141 * must drop to zero (including initial reference) before this se_lun
142 * pointer can be kfree_rcu() by the final se_lun->lun_group put via
143 * target_core_fabric_configfs.c:target_fabric_port_release
146 se_cmd->se_dev = rcu_dereference_raw(se_lun->lun_se_dev);
147 atomic_long_inc(&se_cmd->se_dev->num_cmds);
149 if (se_cmd->data_direction == DMA_TO_DEVICE)
150 atomic_long_add(se_cmd->data_length,
151 &se_cmd->se_dev->write_bytes);
152 else if (se_cmd->data_direction == DMA_FROM_DEVICE)
153 atomic_long_add(se_cmd->data_length,
154 &se_cmd->se_dev->read_bytes);
158 EXPORT_SYMBOL(transport_lookup_cmd_lun);
160 int transport_lookup_tmr_lun(struct se_cmd *se_cmd, u64 unpacked_lun)
162 struct se_dev_entry *deve;
163 struct se_lun *se_lun = NULL;
164 struct se_session *se_sess = se_cmd->se_sess;
165 struct se_node_acl *nacl = se_sess->se_node_acl;
166 struct se_tmr_req *se_tmr = se_cmd->se_tmr_req;
169 deve = target_nacl_find_deve(nacl, unpacked_lun);
171 se_lun = rcu_dereference(deve->se_lun);
173 if (!percpu_ref_tryget_live(&se_lun->lun_ref)) {
178 se_cmd->se_lun = se_lun;
179 se_cmd->pr_res_key = deve->pr_res_key;
180 se_cmd->orig_fe_lun = unpacked_lun;
181 se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
182 se_cmd->lun_ref_active = true;
188 pr_debug("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
189 " Access for 0x%08llx\n",
190 se_cmd->se_tfo->get_fabric_name(),
194 se_cmd->se_dev = rcu_dereference_raw(se_lun->lun_se_dev);
195 se_tmr->tmr_dev = rcu_dereference_raw(se_lun->lun_se_dev);
199 EXPORT_SYMBOL(transport_lookup_tmr_lun);
201 bool target_lun_is_rdonly(struct se_cmd *cmd)
203 struct se_session *se_sess = cmd->se_sess;
204 struct se_dev_entry *deve;
208 deve = target_nacl_find_deve(se_sess->se_node_acl, cmd->orig_fe_lun);
209 ret = deve && deve->lun_access_ro;
214 EXPORT_SYMBOL(target_lun_is_rdonly);
217 * This function is called from core_scsi3_emulate_pro_register_and_move()
218 * and core_scsi3_decode_spec_i_port(), and will increment &deve->pr_kref
219 * when a matching rtpi is found.
221 struct se_dev_entry *core_get_se_deve_from_rtpi(
222 struct se_node_acl *nacl,
225 struct se_dev_entry *deve;
227 struct se_portal_group *tpg = nacl->se_tpg;
230 hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) {
231 lun = rcu_dereference(deve->se_lun);
233 pr_err("%s device entries device pointer is"
234 " NULL, but Initiator has access.\n",
235 tpg->se_tpg_tfo->get_fabric_name());
238 if (lun->lun_rtpi != rtpi)
241 kref_get(&deve->pr_kref);
251 void core_free_device_list_for_node(
252 struct se_node_acl *nacl,
253 struct se_portal_group *tpg)
255 struct se_dev_entry *deve;
257 mutex_lock(&nacl->lun_entry_mutex);
258 hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) {
259 struct se_lun *lun = rcu_dereference_check(deve->se_lun,
260 lockdep_is_held(&nacl->lun_entry_mutex));
261 core_disable_device_list_for_node(lun, deve, nacl, tpg);
263 mutex_unlock(&nacl->lun_entry_mutex);
266 void core_update_device_list_access(
269 struct se_node_acl *nacl)
271 struct se_dev_entry *deve;
273 mutex_lock(&nacl->lun_entry_mutex);
274 deve = target_nacl_find_deve(nacl, mapped_lun);
276 deve->lun_access_ro = lun_access_ro;
277 mutex_unlock(&nacl->lun_entry_mutex);
281 * Called with rcu_read_lock or nacl->device_list_lock held.
283 struct se_dev_entry *target_nacl_find_deve(struct se_node_acl *nacl, u64 mapped_lun)
285 struct se_dev_entry *deve;
287 hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link)
288 if (deve->mapped_lun == mapped_lun)
293 EXPORT_SYMBOL(target_nacl_find_deve);
295 void target_pr_kref_release(struct kref *kref)
297 struct se_dev_entry *deve = container_of(kref, struct se_dev_entry,
299 complete(&deve->pr_comp);
303 target_luns_data_has_changed(struct se_node_acl *nacl, struct se_dev_entry *new,
306 struct se_dev_entry *tmp;
309 hlist_for_each_entry_rcu(tmp, &nacl->lun_entry_hlist, link) {
310 if (skip_new && tmp == new)
312 core_scsi3_ua_allocate(tmp, 0x3F,
313 ASCQ_3FH_REPORTED_LUNS_DATA_HAS_CHANGED);
318 int core_enable_device_list_for_node(
320 struct se_lun_acl *lun_acl,
323 struct se_node_acl *nacl,
324 struct se_portal_group *tpg)
326 struct se_dev_entry *orig, *new;
328 new = kzalloc(sizeof(*new), GFP_KERNEL);
330 pr_err("Unable to allocate se_dev_entry memory\n");
334 spin_lock_init(&new->ua_lock);
335 INIT_LIST_HEAD(&new->ua_list);
336 INIT_LIST_HEAD(&new->lun_link);
338 new->mapped_lun = mapped_lun;
339 kref_init(&new->pr_kref);
340 init_completion(&new->pr_comp);
342 new->lun_access_ro = lun_access_ro;
343 new->creation_time = get_jiffies_64();
346 mutex_lock(&nacl->lun_entry_mutex);
347 orig = target_nacl_find_deve(nacl, mapped_lun);
348 if (orig && orig->se_lun) {
349 struct se_lun *orig_lun = rcu_dereference_check(orig->se_lun,
350 lockdep_is_held(&nacl->lun_entry_mutex));
352 if (orig_lun != lun) {
353 pr_err("Existing orig->se_lun doesn't match new lun"
354 " for dynamic -> explicit NodeACL conversion:"
355 " %s\n", nacl->initiatorname);
356 mutex_unlock(&nacl->lun_entry_mutex);
360 if (orig->se_lun_acl != NULL) {
361 pr_warn_ratelimited("Detected existing explicit"
362 " se_lun_acl->se_lun_group reference for %s"
363 " mapped_lun: %llu, failing\n",
364 nacl->initiatorname, mapped_lun);
365 mutex_unlock(&nacl->lun_entry_mutex);
370 rcu_assign_pointer(new->se_lun, lun);
371 rcu_assign_pointer(new->se_lun_acl, lun_acl);
372 hlist_del_rcu(&orig->link);
373 hlist_add_head_rcu(&new->link, &nacl->lun_entry_hlist);
374 mutex_unlock(&nacl->lun_entry_mutex);
376 spin_lock(&lun->lun_deve_lock);
377 list_del(&orig->lun_link);
378 list_add_tail(&new->lun_link, &lun->lun_deve_list);
379 spin_unlock(&lun->lun_deve_lock);
381 kref_put(&orig->pr_kref, target_pr_kref_release);
382 wait_for_completion(&orig->pr_comp);
384 target_luns_data_has_changed(nacl, new, true);
385 kfree_rcu(orig, rcu_head);
389 rcu_assign_pointer(new->se_lun, lun);
390 rcu_assign_pointer(new->se_lun_acl, lun_acl);
391 hlist_add_head_rcu(&new->link, &nacl->lun_entry_hlist);
392 mutex_unlock(&nacl->lun_entry_mutex);
394 spin_lock(&lun->lun_deve_lock);
395 list_add_tail(&new->lun_link, &lun->lun_deve_list);
396 spin_unlock(&lun->lun_deve_lock);
398 target_luns_data_has_changed(nacl, new, true);
403 * Called with se_node_acl->lun_entry_mutex held.
405 void core_disable_device_list_for_node(
407 struct se_dev_entry *orig,
408 struct se_node_acl *nacl,
409 struct se_portal_group *tpg)
412 * rcu_dereference_raw protected by se_lun->lun_group symlink
413 * reference to se_device->dev_group.
415 struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev);
417 * If the MappedLUN entry is being disabled, the entry in
418 * lun->lun_deve_list must be removed now before clearing the
419 * struct se_dev_entry pointers below as logic in
420 * core_alua_do_transition_tg_pt() depends on these being present.
422 * deve->se_lun_acl will be NULL for demo-mode created LUNs
423 * that have not been explicitly converted to MappedLUNs ->
424 * struct se_lun_acl, but we remove deve->lun_link from
425 * lun->lun_deve_list. This also means that active UAs and
426 * NodeACL context specific PR metadata for demo-mode
427 * MappedLUN *deve will be released below..
429 spin_lock(&lun->lun_deve_lock);
430 list_del(&orig->lun_link);
431 spin_unlock(&lun->lun_deve_lock);
433 * Disable struct se_dev_entry LUN ACL mapping
435 core_scsi3_ua_release_all(orig);
437 hlist_del_rcu(&orig->link);
438 clear_bit(DEF_PR_REG_ACTIVE, &orig->deve_flags);
439 orig->lun_access_ro = false;
440 orig->creation_time = 0;
441 orig->attach_count--;
443 * Before firing off RCU callback, wait for any in process SPEC_I_PT=1
444 * or REGISTER_AND_MOVE PR operation to complete.
446 kref_put(&orig->pr_kref, target_pr_kref_release);
447 wait_for_completion(&orig->pr_comp);
449 rcu_assign_pointer(orig->se_lun, NULL);
450 rcu_assign_pointer(orig->se_lun_acl, NULL);
452 kfree_rcu(orig, rcu_head);
454 core_scsi3_free_pr_reg_from_nacl(dev, nacl);
455 target_luns_data_has_changed(nacl, NULL, false);
458 /* core_clear_lun_from_tpg():
462 void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg)
464 struct se_node_acl *nacl;
465 struct se_dev_entry *deve;
467 mutex_lock(&tpg->acl_node_mutex);
468 list_for_each_entry(nacl, &tpg->acl_node_list, acl_list) {
470 mutex_lock(&nacl->lun_entry_mutex);
471 hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) {
472 struct se_lun *tmp_lun = rcu_dereference_check(deve->se_lun,
473 lockdep_is_held(&nacl->lun_entry_mutex));
478 core_disable_device_list_for_node(lun, deve, nacl, tpg);
480 mutex_unlock(&nacl->lun_entry_mutex);
482 mutex_unlock(&tpg->acl_node_mutex);
485 int core_alloc_rtpi(struct se_lun *lun, struct se_device *dev)
489 spin_lock(&dev->se_port_lock);
490 if (dev->export_count == 0x0000ffff) {
491 pr_warn("Reached dev->dev_port_count =="
493 spin_unlock(&dev->se_port_lock);
498 * Allocate the next RELATIVE TARGET PORT IDENTIFIER for this struct se_device
499 * Here is the table from spc4r17 section 7.7.3.8.
501 * Table 473 -- RELATIVE TARGET PORT IDENTIFIER field
505 * 1h Relative port 1, historically known as port A
506 * 2h Relative port 2, historically known as port B
507 * 3h to FFFFh Relative port 3 through 65 535
509 lun->lun_rtpi = dev->dev_rpti_counter++;
513 list_for_each_entry(tmp, &dev->dev_sep_list, lun_dev_link) {
515 * Make sure RELATIVE TARGET PORT IDENTIFIER is unique
518 if (lun->lun_rtpi == tmp->lun_rtpi)
521 spin_unlock(&dev->se_port_lock);
526 static void se_release_vpd_for_dev(struct se_device *dev)
528 struct t10_vpd *vpd, *vpd_tmp;
530 spin_lock(&dev->t10_wwn.t10_vpd_lock);
531 list_for_each_entry_safe(vpd, vpd_tmp,
532 &dev->t10_wwn.t10_vpd_list, vpd_list) {
533 list_del(&vpd->vpd_list);
536 spin_unlock(&dev->t10_wwn.t10_vpd_lock);
539 static u32 se_dev_align_max_sectors(u32 max_sectors, u32 block_size)
541 u32 aligned_max_sectors;
544 * Limit max_sectors to a PAGE_SIZE aligned value for modern
545 * transport_allocate_data_tasks() operation.
547 alignment = max(1ul, PAGE_SIZE / block_size);
548 aligned_max_sectors = rounddown(max_sectors, alignment);
550 if (max_sectors != aligned_max_sectors)
551 pr_info("Rounding down aligned max_sectors from %u to %u\n",
552 max_sectors, aligned_max_sectors);
554 return aligned_max_sectors;
557 int core_dev_add_lun(
558 struct se_portal_group *tpg,
559 struct se_device *dev,
564 rc = core_tpg_add_lun(tpg, lun, false, dev);
568 pr_debug("%s_TPG[%u]_LUN[%llu] - Activated %s Logical Unit from"
569 " CORE HBA: %u\n", tpg->se_tpg_tfo->get_fabric_name(),
570 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
571 tpg->se_tpg_tfo->get_fabric_name(), dev->se_hba->hba_id);
573 * Update LUN maps for dynamically added initiators when
574 * generate_node_acl is enabled.
576 if (tpg->se_tpg_tfo->tpg_check_demo_mode(tpg)) {
577 struct se_node_acl *acl;
579 mutex_lock(&tpg->acl_node_mutex);
580 list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
581 if (acl->dynamic_node_acl &&
582 (!tpg->se_tpg_tfo->tpg_check_demo_mode_login_only ||
583 !tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg))) {
584 core_tpg_add_node_to_devs(acl, tpg, lun);
587 mutex_unlock(&tpg->acl_node_mutex);
593 /* core_dev_del_lun():
597 void core_dev_del_lun(
598 struct se_portal_group *tpg,
601 pr_debug("%s_TPG[%u]_LUN[%llu] - Deactivating %s Logical Unit from"
602 " device object\n", tpg->se_tpg_tfo->get_fabric_name(),
603 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
604 tpg->se_tpg_tfo->get_fabric_name());
606 core_tpg_remove_lun(tpg, lun);
609 struct se_lun_acl *core_dev_init_initiator_node_lun_acl(
610 struct se_portal_group *tpg,
611 struct se_node_acl *nacl,
615 struct se_lun_acl *lacl;
617 if (strlen(nacl->initiatorname) >= TRANSPORT_IQN_LEN) {
618 pr_err("%s InitiatorName exceeds maximum size.\n",
619 tpg->se_tpg_tfo->get_fabric_name());
623 lacl = kzalloc(sizeof(struct se_lun_acl), GFP_KERNEL);
625 pr_err("Unable to allocate memory for struct se_lun_acl.\n");
630 lacl->mapped_lun = mapped_lun;
631 lacl->se_lun_nacl = nacl;
636 int core_dev_add_initiator_node_lun_acl(
637 struct se_portal_group *tpg,
638 struct se_lun_acl *lacl,
642 struct se_node_acl *nacl = lacl->se_lun_nacl;
644 * rcu_dereference_raw protected by se_lun->lun_group symlink
645 * reference to se_device->dev_group.
647 struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev);
652 if (lun->lun_access_ro)
653 lun_access_ro = true;
657 if (core_enable_device_list_for_node(lun, lacl, lacl->mapped_lun,
658 lun_access_ro, nacl, tpg) < 0)
661 pr_debug("%s_TPG[%hu]_LUN[%llu->%llu] - Added %s ACL for "
662 " InitiatorNode: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
663 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, lacl->mapped_lun,
664 lun_access_ro ? "RO" : "RW",
665 nacl->initiatorname);
667 * Check to see if there are any existing persistent reservation APTPL
668 * pre-registrations that need to be enabled for this LUN ACL..
670 core_scsi3_check_aptpl_registration(dev, tpg, lun, nacl,
675 int core_dev_del_initiator_node_lun_acl(
677 struct se_lun_acl *lacl)
679 struct se_portal_group *tpg = lun->lun_tpg;
680 struct se_node_acl *nacl;
681 struct se_dev_entry *deve;
683 nacl = lacl->se_lun_nacl;
687 mutex_lock(&nacl->lun_entry_mutex);
688 deve = target_nacl_find_deve(nacl, lacl->mapped_lun);
690 core_disable_device_list_for_node(lun, deve, nacl, tpg);
691 mutex_unlock(&nacl->lun_entry_mutex);
693 pr_debug("%s_TPG[%hu]_LUN[%llu] - Removed ACL for"
694 " InitiatorNode: %s Mapped LUN: %llu\n",
695 tpg->se_tpg_tfo->get_fabric_name(),
696 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
697 nacl->initiatorname, lacl->mapped_lun);
702 void core_dev_free_initiator_node_lun_acl(
703 struct se_portal_group *tpg,
704 struct se_lun_acl *lacl)
706 pr_debug("%s_TPG[%hu] - Freeing ACL for %s InitiatorNode: %s"
707 " Mapped LUN: %llu\n", tpg->se_tpg_tfo->get_fabric_name(),
708 tpg->se_tpg_tfo->tpg_get_tag(tpg),
709 tpg->se_tpg_tfo->get_fabric_name(),
710 lacl->se_lun_nacl->initiatorname, lacl->mapped_lun);
715 static void scsi_dump_inquiry(struct se_device *dev)
717 struct t10_wwn *wwn = &dev->t10_wwn;
721 * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer
723 for (i = 0; i < 8; i++)
724 if (wwn->vendor[i] >= 0x20)
725 buf[i] = wwn->vendor[i];
729 pr_debug(" Vendor: %s\n", buf);
731 for (i = 0; i < 16; i++)
732 if (wwn->model[i] >= 0x20)
733 buf[i] = wwn->model[i];
737 pr_debug(" Model: %s\n", buf);
739 for (i = 0; i < 4; i++)
740 if (wwn->revision[i] >= 0x20)
741 buf[i] = wwn->revision[i];
745 pr_debug(" Revision: %s\n", buf);
747 device_type = dev->transport->get_device_type(dev);
748 pr_debug(" Type: %s ", scsi_device_type(device_type));
751 struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
753 struct se_device *dev;
754 struct se_lun *xcopy_lun;
756 dev = hba->backend->ops->alloc_device(hba, name);
761 dev->transport = hba->backend->ops;
762 dev->prot_length = sizeof(struct t10_pi_tuple);
763 dev->hba_index = hba->hba_index;
765 INIT_LIST_HEAD(&dev->dev_sep_list);
766 INIT_LIST_HEAD(&dev->dev_tmr_list);
767 INIT_LIST_HEAD(&dev->delayed_cmd_list);
768 INIT_LIST_HEAD(&dev->state_list);
769 INIT_LIST_HEAD(&dev->qf_cmd_list);
770 spin_lock_init(&dev->execute_task_lock);
771 spin_lock_init(&dev->delayed_cmd_lock);
772 spin_lock_init(&dev->dev_reservation_lock);
773 spin_lock_init(&dev->se_port_lock);
774 spin_lock_init(&dev->se_tmr_lock);
775 spin_lock_init(&dev->qf_cmd_lock);
776 sema_init(&dev->caw_sem, 1);
777 INIT_LIST_HEAD(&dev->t10_wwn.t10_vpd_list);
778 spin_lock_init(&dev->t10_wwn.t10_vpd_lock);
779 INIT_LIST_HEAD(&dev->t10_pr.registration_list);
780 INIT_LIST_HEAD(&dev->t10_pr.aptpl_reg_list);
781 spin_lock_init(&dev->t10_pr.registration_lock);
782 spin_lock_init(&dev->t10_pr.aptpl_reg_lock);
783 INIT_LIST_HEAD(&dev->t10_alua.tg_pt_gps_list);
784 spin_lock_init(&dev->t10_alua.tg_pt_gps_lock);
785 INIT_LIST_HEAD(&dev->t10_alua.lba_map_list);
786 spin_lock_init(&dev->t10_alua.lba_map_lock);
788 INIT_WORK(&dev->delayed_cmd_work, target_do_delayed_work);
790 dev->t10_wwn.t10_dev = dev;
791 dev->t10_alua.t10_dev = dev;
793 dev->dev_attrib.da_dev = dev;
794 dev->dev_attrib.emulate_model_alias = DA_EMULATE_MODEL_ALIAS;
795 dev->dev_attrib.emulate_dpo = 1;
796 dev->dev_attrib.emulate_fua_write = 1;
797 dev->dev_attrib.emulate_fua_read = 1;
798 dev->dev_attrib.emulate_write_cache = DA_EMULATE_WRITE_CACHE;
799 dev->dev_attrib.emulate_ua_intlck_ctrl = DA_EMULATE_UA_INTLLCK_CTRL;
800 dev->dev_attrib.emulate_tas = DA_EMULATE_TAS;
801 dev->dev_attrib.emulate_tpu = DA_EMULATE_TPU;
802 dev->dev_attrib.emulate_tpws = DA_EMULATE_TPWS;
803 dev->dev_attrib.emulate_caw = DA_EMULATE_CAW;
804 dev->dev_attrib.emulate_3pc = DA_EMULATE_3PC;
805 dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE0_PROT;
806 dev->dev_attrib.enforce_pr_isids = DA_ENFORCE_PR_ISIDS;
807 dev->dev_attrib.force_pr_aptpl = DA_FORCE_PR_APTPL;
808 dev->dev_attrib.is_nonrot = DA_IS_NONROT;
809 dev->dev_attrib.emulate_rest_reord = DA_EMULATE_REST_REORD;
810 dev->dev_attrib.max_unmap_lba_count = DA_MAX_UNMAP_LBA_COUNT;
811 dev->dev_attrib.max_unmap_block_desc_count =
812 DA_MAX_UNMAP_BLOCK_DESC_COUNT;
813 dev->dev_attrib.unmap_granularity = DA_UNMAP_GRANULARITY_DEFAULT;
814 dev->dev_attrib.unmap_granularity_alignment =
815 DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT;
816 dev->dev_attrib.unmap_zeroes_data =
817 DA_UNMAP_ZEROES_DATA_DEFAULT;
818 dev->dev_attrib.max_write_same_len = DA_MAX_WRITE_SAME_LEN;
820 xcopy_lun = &dev->xcopy_lun;
821 rcu_assign_pointer(xcopy_lun->lun_se_dev, dev);
822 init_completion(&xcopy_lun->lun_ref_comp);
823 init_completion(&xcopy_lun->lun_shutdown_comp);
824 INIT_LIST_HEAD(&xcopy_lun->lun_deve_list);
825 INIT_LIST_HEAD(&xcopy_lun->lun_dev_link);
826 mutex_init(&xcopy_lun->lun_tg_pt_md_mutex);
827 xcopy_lun->lun_tpg = &xcopy_pt_tpg;
833 * Check if the underlying struct block_device request_queue supports
834 * the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM
835 * in ATA and we need to set TPE=1
837 bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib,
838 struct request_queue *q)
840 int block_size = queue_logical_block_size(q);
842 if (!blk_queue_discard(q))
845 attrib->max_unmap_lba_count =
846 q->limits.max_discard_sectors >> (ilog2(block_size) - 9);
848 * Currently hardcoded to 1 in Linux/SCSI code..
850 attrib->max_unmap_block_desc_count = 1;
851 attrib->unmap_granularity = q->limits.discard_granularity / block_size;
852 attrib->unmap_granularity_alignment = q->limits.discard_alignment /
854 attrib->unmap_zeroes_data = (q->limits.max_write_zeroes_sectors);
857 EXPORT_SYMBOL(target_configure_unmap_from_queue);
860 * Convert from blocksize advertised to the initiator to the 512 byte
861 * units unconditionally used by the Linux block layer.
863 sector_t target_to_linux_sector(struct se_device *dev, sector_t lb)
865 switch (dev->dev_attrib.block_size) {
876 EXPORT_SYMBOL(target_to_linux_sector);
878 struct devices_idr_iter {
879 int (*fn)(struct se_device *dev, void *data);
883 static int target_devices_idr_iter(int id, void *p, void *data)
884 __must_hold(&device_mutex)
886 struct devices_idr_iter *iter = data;
887 struct se_device *dev = p;
888 struct config_item *item;
892 * We add the device early to the idr, so it can be used
893 * by backend modules during configuration. We do not want
894 * to allow other callers to access partially setup devices,
895 * so we skip them here.
897 if (!target_dev_configured(dev))
900 item = config_item_get_unless_zero(&dev->dev_group.cg_item);
903 mutex_unlock(&device_mutex);
905 ret = iter->fn(dev, iter->data);
906 config_item_put(item);
908 mutex_lock(&device_mutex);
913 * target_for_each_device - iterate over configured devices
914 * @fn: iterator function
915 * @data: pointer to data that will be passed to fn
917 * fn must return 0 to continue looping over devices. non-zero will break
918 * from the loop and return that value to the caller.
920 int target_for_each_device(int (*fn)(struct se_device *dev, void *data),
923 struct devices_idr_iter iter = { .fn = fn, .data = data };
926 mutex_lock(&device_mutex);
927 ret = idr_for_each(&devices_idr, target_devices_idr_iter, &iter);
928 mutex_unlock(&device_mutex);
932 int target_configure_device(struct se_device *dev)
934 struct se_hba *hba = dev->se_hba;
937 if (target_dev_configured(dev)) {
938 pr_err("se_dev->se_dev_ptr already set for storage"
944 * Add early so modules like tcmu can use during its
947 mutex_lock(&device_mutex);
949 * Use cyclic to try and avoid collisions with devices
950 * that were recently removed.
952 id = idr_alloc_cyclic(&devices_idr, dev, 0, INT_MAX, GFP_KERNEL);
953 mutex_unlock(&device_mutex);
960 ret = dev->transport->configure_device(dev);
964 * XXX: there is not much point to have two different values here..
966 dev->dev_attrib.block_size = dev->dev_attrib.hw_block_size;
967 dev->dev_attrib.queue_depth = dev->dev_attrib.hw_queue_depth;
970 * Align max_hw_sectors down to PAGE_SIZE I/O transfers
972 dev->dev_attrib.hw_max_sectors =
973 se_dev_align_max_sectors(dev->dev_attrib.hw_max_sectors,
974 dev->dev_attrib.hw_block_size);
975 dev->dev_attrib.optimal_sectors = dev->dev_attrib.hw_max_sectors;
977 dev->creation_time = get_jiffies_64();
979 ret = core_setup_alua(dev);
981 goto out_destroy_device;
984 * Startup the struct se_device processing thread
986 dev->tmr_wq = alloc_workqueue("tmr-%s", WQ_MEM_RECLAIM | WQ_UNBOUND, 1,
987 dev->transport->name);
989 pr_err("Unable to create tmr workqueue for %s\n",
990 dev->transport->name);
996 * Setup work_queue for QUEUE_FULL
998 INIT_WORK(&dev->qf_work_queue, target_qf_do_work);
1001 * Preload the initial INQUIRY const values if we are doing
1002 * anything virtual (IBLOCK, FILEIO, RAMDISK), but not for TCM/pSCSI
1003 * passthrough because this is being provided by the backend LLD.
1005 if (!(dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)) {
1006 strncpy(&dev->t10_wwn.vendor[0], "LIO-ORG", 8);
1007 strncpy(&dev->t10_wwn.model[0],
1008 dev->transport->inquiry_prod, 16);
1009 strncpy(&dev->t10_wwn.revision[0],
1010 dev->transport->inquiry_rev, 4);
1013 scsi_dump_inquiry(dev);
1015 spin_lock(&hba->device_lock);
1017 spin_unlock(&hba->device_lock);
1019 dev->dev_flags |= DF_CONFIGURED;
1024 core_alua_free_lu_gp_mem(dev);
1026 dev->transport->destroy_device(dev);
1028 mutex_lock(&device_mutex);
1029 idr_remove(&devices_idr, dev->dev_index);
1030 mutex_unlock(&device_mutex);
1032 se_release_vpd_for_dev(dev);
1036 void target_free_device(struct se_device *dev)
1038 struct se_hba *hba = dev->se_hba;
1040 WARN_ON(!list_empty(&dev->dev_sep_list));
1042 if (target_dev_configured(dev)) {
1043 destroy_workqueue(dev->tmr_wq);
1045 dev->transport->destroy_device(dev);
1047 mutex_lock(&device_mutex);
1048 idr_remove(&devices_idr, dev->dev_index);
1049 mutex_unlock(&device_mutex);
1051 spin_lock(&hba->device_lock);
1053 spin_unlock(&hba->device_lock);
1056 core_alua_free_lu_gp_mem(dev);
1057 core_alua_set_lba_map(dev, NULL, 0, 0);
1058 core_scsi3_free_all_registrations(dev);
1059 se_release_vpd_for_dev(dev);
1061 if (dev->transport->free_prot)
1062 dev->transport->free_prot(dev);
1064 dev->transport->free_device(dev);
1067 int core_dev_setup_virtual_lun0(void)
1070 struct se_device *dev;
1071 char buf[] = "rd_pages=8,rd_nullio=1";
1074 hba = core_alloc_hba("rd_mcp", 0, HBA_FLAGS_INTERNAL_USE);
1076 return PTR_ERR(hba);
1078 dev = target_alloc_device(hba, "virt_lun0");
1084 hba->backend->ops->set_configfs_dev_params(dev, buf, sizeof(buf));
1086 ret = target_configure_device(dev);
1088 goto out_free_se_dev;
1095 target_free_device(dev);
1097 core_delete_hba(hba);
1102 void core_dev_release_virtual_lun0(void)
1104 struct se_hba *hba = lun0_hba;
1110 target_free_device(g_lun0_dev);
1111 core_delete_hba(hba);
1115 * Common CDB parsing for kernel and user passthrough.
1118 passthrough_parse_cdb(struct se_cmd *cmd,
1119 sense_reason_t (*exec_cmd)(struct se_cmd *cmd))
1121 unsigned char *cdb = cmd->t_task_cdb;
1122 struct se_device *dev = cmd->se_dev;
1126 * For REPORT LUNS we always need to emulate the response, for everything
1129 if (cdb[0] == REPORT_LUNS) {
1130 cmd->execute_cmd = spc_emulate_report_luns;
1131 return TCM_NO_SENSE;
1135 * For PERSISTENT RESERVE IN/OUT, RELEASE, and RESERVE we need to
1136 * emulate the response, since tcmu does not have the information
1137 * required to process these commands.
1139 if (!(dev->transport->transport_flags &
1140 TRANSPORT_FLAG_PASSTHROUGH_PGR)) {
1141 if (cdb[0] == PERSISTENT_RESERVE_IN) {
1142 cmd->execute_cmd = target_scsi3_emulate_pr_in;
1143 size = get_unaligned_be16(&cdb[7]);
1144 return target_cmd_size_check(cmd, size);
1146 if (cdb[0] == PERSISTENT_RESERVE_OUT) {
1147 cmd->execute_cmd = target_scsi3_emulate_pr_out;
1148 size = get_unaligned_be32(&cdb[5]);
1149 return target_cmd_size_check(cmd, size);
1152 if (cdb[0] == RELEASE || cdb[0] == RELEASE_10) {
1153 cmd->execute_cmd = target_scsi2_reservation_release;
1154 if (cdb[0] == RELEASE_10)
1155 size = get_unaligned_be16(&cdb[7]);
1157 size = cmd->data_length;
1158 return target_cmd_size_check(cmd, size);
1160 if (cdb[0] == RESERVE || cdb[0] == RESERVE_10) {
1161 cmd->execute_cmd = target_scsi2_reservation_reserve;
1162 if (cdb[0] == RESERVE_10)
1163 size = get_unaligned_be16(&cdb[7]);
1165 size = cmd->data_length;
1166 return target_cmd_size_check(cmd, size);
1170 /* Set DATA_CDB flag for ops that should have it */
1181 case WRITE_VERIFY_12:
1182 case WRITE_VERIFY_16:
1183 case COMPARE_AND_WRITE:
1184 case XDWRITEREAD_10:
1185 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
1187 case VARIABLE_LENGTH_CMD:
1188 switch (get_unaligned_be16(&cdb[8])) {
1191 case WRITE_VERIFY_32:
1192 case XDWRITEREAD_32:
1193 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
1198 cmd->execute_cmd = exec_cmd;
1200 return TCM_NO_SENSE;
1202 EXPORT_SYMBOL(passthrough_parse_cdb);