1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*******************************************************************************
3 * Filename: target_core_device.c (based on iscsi_target_device.c)
5 * This file contains the TCM Virtual Device and Disk Transport
6 * agnostic related functions.
8 * (c) Copyright 2003-2013 Datera, Inc.
10 * Nicholas A. Bellinger <nab@kernel.org>
12 ******************************************************************************/
14 #include <linux/net.h>
15 #include <linux/string.h>
16 #include <linux/delay.h>
17 #include <linux/timer.h>
18 #include <linux/slab.h>
19 #include <linux/spinlock.h>
20 #include <linux/kthread.h>
22 #include <linux/export.h>
23 #include <linux/t10-pi.h>
24 #include <asm/unaligned.h>
27 #include <scsi/scsi_common.h>
28 #include <scsi/scsi_proto.h>
30 #include <target/target_core_base.h>
31 #include <target/target_core_backend.h>
32 #include <target/target_core_fabric.h>
34 #include "target_core_internal.h"
35 #include "target_core_alua.h"
36 #include "target_core_pr.h"
37 #include "target_core_ua.h"
39 static DEFINE_MUTEX(device_mutex);
40 static LIST_HEAD(device_list);
41 static DEFINE_IDR(devices_idr);
43 static struct se_hba *lun0_hba;
44 /* not static, needed by tpg.c */
45 struct se_device *g_lun0_dev;
48 transport_lookup_cmd_lun(struct se_cmd *se_cmd)
50 struct se_lun *se_lun = NULL;
51 struct se_session *se_sess = se_cmd->se_sess;
52 struct se_node_acl *nacl = se_sess->se_node_acl;
53 struct se_dev_entry *deve;
54 sense_reason_t ret = TCM_NO_SENSE;
57 deve = target_nacl_find_deve(nacl, se_cmd->orig_fe_lun);
59 atomic_long_inc(&deve->total_cmds);
61 if (se_cmd->data_direction == DMA_TO_DEVICE)
62 atomic_long_add(se_cmd->data_length,
64 else if (se_cmd->data_direction == DMA_FROM_DEVICE)
65 atomic_long_add(se_cmd->data_length,
68 se_lun = rcu_dereference(deve->se_lun);
70 if (!percpu_ref_tryget_live(&se_lun->lun_ref)) {
75 se_cmd->se_lun = se_lun;
76 se_cmd->pr_res_key = deve->pr_res_key;
77 se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
78 se_cmd->lun_ref_active = true;
80 if ((se_cmd->data_direction == DMA_TO_DEVICE) &&
81 deve->lun_access_ro) {
82 pr_err("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN"
83 " Access for 0x%08llx\n",
84 se_cmd->se_tfo->fabric_name,
87 ret = TCM_WRITE_PROTECTED;
96 * Use the se_portal_group->tpg_virt_lun0 to allow for
97 * REPORT_LUNS, et al to be returned when no active
98 * MappedLUN=0 exists for this Initiator Port.
100 if (se_cmd->orig_fe_lun != 0) {
101 pr_err("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
102 " Access for 0x%08llx from %s\n",
103 se_cmd->se_tfo->fabric_name,
105 nacl->initiatorname);
106 return TCM_NON_EXISTENT_LUN;
109 se_lun = se_sess->se_tpg->tpg_virt_lun0;
110 se_cmd->se_lun = se_sess->se_tpg->tpg_virt_lun0;
111 se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
113 percpu_ref_get(&se_lun->lun_ref);
114 se_cmd->lun_ref_active = true;
117 * Force WRITE PROTECT for virtual LUN 0
119 if ((se_cmd->data_direction != DMA_FROM_DEVICE) &&
120 (se_cmd->data_direction != DMA_NONE)) {
121 ret = TCM_WRITE_PROTECTED;
126 * RCU reference protected by percpu se_lun->lun_ref taken above that
127 * must drop to zero (including initial reference) before this se_lun
128 * pointer can be kfree_rcu() by the final se_lun->lun_group put via
129 * target_core_fabric_configfs.c:target_fabric_port_release
132 se_cmd->se_dev = rcu_dereference_raw(se_lun->lun_se_dev);
133 atomic_long_inc(&se_cmd->se_dev->num_cmds);
135 if (se_cmd->data_direction == DMA_TO_DEVICE)
136 atomic_long_add(se_cmd->data_length,
137 &se_cmd->se_dev->write_bytes);
138 else if (se_cmd->data_direction == DMA_FROM_DEVICE)
139 atomic_long_add(se_cmd->data_length,
140 &se_cmd->se_dev->read_bytes);
144 EXPORT_SYMBOL(transport_lookup_cmd_lun);
146 int transport_lookup_tmr_lun(struct se_cmd *se_cmd)
148 struct se_dev_entry *deve;
149 struct se_lun *se_lun = NULL;
150 struct se_session *se_sess = se_cmd->se_sess;
151 struct se_node_acl *nacl = se_sess->se_node_acl;
152 struct se_tmr_req *se_tmr = se_cmd->se_tmr_req;
155 deve = target_nacl_find_deve(nacl, se_cmd->orig_fe_lun);
157 se_lun = rcu_dereference(deve->se_lun);
159 if (!percpu_ref_tryget_live(&se_lun->lun_ref)) {
164 se_cmd->se_lun = se_lun;
165 se_cmd->pr_res_key = deve->pr_res_key;
166 se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
167 se_cmd->lun_ref_active = true;
173 pr_debug("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
174 " Access for 0x%08llx for %s\n",
175 se_cmd->se_tfo->fabric_name,
177 nacl->initiatorname);
180 se_cmd->se_dev = rcu_dereference_raw(se_lun->lun_se_dev);
181 se_tmr->tmr_dev = rcu_dereference_raw(se_lun->lun_se_dev);
185 EXPORT_SYMBOL(transport_lookup_tmr_lun);
187 bool target_lun_is_rdonly(struct se_cmd *cmd)
189 struct se_session *se_sess = cmd->se_sess;
190 struct se_dev_entry *deve;
194 deve = target_nacl_find_deve(se_sess->se_node_acl, cmd->orig_fe_lun);
195 ret = deve && deve->lun_access_ro;
200 EXPORT_SYMBOL(target_lun_is_rdonly);
203 * This function is called from core_scsi3_emulate_pro_register_and_move()
204 * and core_scsi3_decode_spec_i_port(), and will increment &deve->pr_kref
205 * when a matching rtpi is found.
207 struct se_dev_entry *core_get_se_deve_from_rtpi(
208 struct se_node_acl *nacl,
211 struct se_dev_entry *deve;
213 struct se_portal_group *tpg = nacl->se_tpg;
216 hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) {
217 lun = rcu_dereference(deve->se_lun);
219 pr_err("%s device entries device pointer is"
220 " NULL, but Initiator has access.\n",
221 tpg->se_tpg_tfo->fabric_name);
224 if (lun->lun_rtpi != rtpi)
227 kref_get(&deve->pr_kref);
237 void core_free_device_list_for_node(
238 struct se_node_acl *nacl,
239 struct se_portal_group *tpg)
241 struct se_dev_entry *deve;
243 mutex_lock(&nacl->lun_entry_mutex);
244 hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) {
245 struct se_lun *lun = rcu_dereference_check(deve->se_lun,
246 lockdep_is_held(&nacl->lun_entry_mutex));
247 core_disable_device_list_for_node(lun, deve, nacl, tpg);
249 mutex_unlock(&nacl->lun_entry_mutex);
252 void core_update_device_list_access(
255 struct se_node_acl *nacl)
257 struct se_dev_entry *deve;
259 mutex_lock(&nacl->lun_entry_mutex);
260 deve = target_nacl_find_deve(nacl, mapped_lun);
262 deve->lun_access_ro = lun_access_ro;
263 mutex_unlock(&nacl->lun_entry_mutex);
267 * Called with rcu_read_lock or nacl->device_list_lock held.
269 struct se_dev_entry *target_nacl_find_deve(struct se_node_acl *nacl, u64 mapped_lun)
271 struct se_dev_entry *deve;
273 hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link)
274 if (deve->mapped_lun == mapped_lun)
279 EXPORT_SYMBOL(target_nacl_find_deve);
281 void target_pr_kref_release(struct kref *kref)
283 struct se_dev_entry *deve = container_of(kref, struct se_dev_entry,
285 complete(&deve->pr_comp);
289 target_luns_data_has_changed(struct se_node_acl *nacl, struct se_dev_entry *new,
292 struct se_dev_entry *tmp;
295 hlist_for_each_entry_rcu(tmp, &nacl->lun_entry_hlist, link) {
296 if (skip_new && tmp == new)
298 core_scsi3_ua_allocate(tmp, 0x3F,
299 ASCQ_3FH_REPORTED_LUNS_DATA_HAS_CHANGED);
304 int core_enable_device_list_for_node(
306 struct se_lun_acl *lun_acl,
309 struct se_node_acl *nacl,
310 struct se_portal_group *tpg)
312 struct se_dev_entry *orig, *new;
314 new = kzalloc(sizeof(*new), GFP_KERNEL);
316 pr_err("Unable to allocate se_dev_entry memory\n");
320 spin_lock_init(&new->ua_lock);
321 INIT_LIST_HEAD(&new->ua_list);
322 INIT_LIST_HEAD(&new->lun_link);
324 new->mapped_lun = mapped_lun;
325 kref_init(&new->pr_kref);
326 init_completion(&new->pr_comp);
328 new->lun_access_ro = lun_access_ro;
329 new->creation_time = get_jiffies_64();
332 mutex_lock(&nacl->lun_entry_mutex);
333 orig = target_nacl_find_deve(nacl, mapped_lun);
334 if (orig && orig->se_lun) {
335 struct se_lun *orig_lun = rcu_dereference_check(orig->se_lun,
336 lockdep_is_held(&nacl->lun_entry_mutex));
338 if (orig_lun != lun) {
339 pr_err("Existing orig->se_lun doesn't match new lun"
340 " for dynamic -> explicit NodeACL conversion:"
341 " %s\n", nacl->initiatorname);
342 mutex_unlock(&nacl->lun_entry_mutex);
346 if (orig->se_lun_acl != NULL) {
347 pr_warn_ratelimited("Detected existing explicit"
348 " se_lun_acl->se_lun_group reference for %s"
349 " mapped_lun: %llu, failing\n",
350 nacl->initiatorname, mapped_lun);
351 mutex_unlock(&nacl->lun_entry_mutex);
356 rcu_assign_pointer(new->se_lun, lun);
357 rcu_assign_pointer(new->se_lun_acl, lun_acl);
358 hlist_del_rcu(&orig->link);
359 hlist_add_head_rcu(&new->link, &nacl->lun_entry_hlist);
360 mutex_unlock(&nacl->lun_entry_mutex);
362 spin_lock(&lun->lun_deve_lock);
363 list_del(&orig->lun_link);
364 list_add_tail(&new->lun_link, &lun->lun_deve_list);
365 spin_unlock(&lun->lun_deve_lock);
367 kref_put(&orig->pr_kref, target_pr_kref_release);
368 wait_for_completion(&orig->pr_comp);
370 target_luns_data_has_changed(nacl, new, true);
371 kfree_rcu(orig, rcu_head);
375 rcu_assign_pointer(new->se_lun, lun);
376 rcu_assign_pointer(new->se_lun_acl, lun_acl);
377 hlist_add_head_rcu(&new->link, &nacl->lun_entry_hlist);
378 mutex_unlock(&nacl->lun_entry_mutex);
380 spin_lock(&lun->lun_deve_lock);
381 list_add_tail(&new->lun_link, &lun->lun_deve_list);
382 spin_unlock(&lun->lun_deve_lock);
384 target_luns_data_has_changed(nacl, new, true);
388 void core_disable_device_list_for_node(
390 struct se_dev_entry *orig,
391 struct se_node_acl *nacl,
392 struct se_portal_group *tpg)
395 * rcu_dereference_raw protected by se_lun->lun_group symlink
396 * reference to se_device->dev_group.
398 struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev);
400 lockdep_assert_held(&nacl->lun_entry_mutex);
403 * If the MappedLUN entry is being disabled, the entry in
404 * lun->lun_deve_list must be removed now before clearing the
405 * struct se_dev_entry pointers below as logic in
406 * core_alua_do_transition_tg_pt() depends on these being present.
408 * deve->se_lun_acl will be NULL for demo-mode created LUNs
409 * that have not been explicitly converted to MappedLUNs ->
410 * struct se_lun_acl, but we remove deve->lun_link from
411 * lun->lun_deve_list. This also means that active UAs and
412 * NodeACL context specific PR metadata for demo-mode
413 * MappedLUN *deve will be released below..
415 spin_lock(&lun->lun_deve_lock);
416 list_del(&orig->lun_link);
417 spin_unlock(&lun->lun_deve_lock);
419 * Disable struct se_dev_entry LUN ACL mapping
421 core_scsi3_ua_release_all(orig);
423 hlist_del_rcu(&orig->link);
424 clear_bit(DEF_PR_REG_ACTIVE, &orig->deve_flags);
425 orig->lun_access_ro = false;
426 orig->creation_time = 0;
427 orig->attach_count--;
429 * Before firing off RCU callback, wait for any in process SPEC_I_PT=1
430 * or REGISTER_AND_MOVE PR operation to complete.
432 kref_put(&orig->pr_kref, target_pr_kref_release);
433 wait_for_completion(&orig->pr_comp);
435 rcu_assign_pointer(orig->se_lun, NULL);
436 rcu_assign_pointer(orig->se_lun_acl, NULL);
438 kfree_rcu(orig, rcu_head);
440 core_scsi3_free_pr_reg_from_nacl(dev, nacl);
441 target_luns_data_has_changed(nacl, NULL, false);
444 /* core_clear_lun_from_tpg():
448 void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg)
450 struct se_node_acl *nacl;
451 struct se_dev_entry *deve;
453 mutex_lock(&tpg->acl_node_mutex);
454 list_for_each_entry(nacl, &tpg->acl_node_list, acl_list) {
456 mutex_lock(&nacl->lun_entry_mutex);
457 hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) {
458 struct se_lun *tmp_lun = rcu_dereference_check(deve->se_lun,
459 lockdep_is_held(&nacl->lun_entry_mutex));
464 core_disable_device_list_for_node(lun, deve, nacl, tpg);
466 mutex_unlock(&nacl->lun_entry_mutex);
468 mutex_unlock(&tpg->acl_node_mutex);
471 int core_alloc_rtpi(struct se_lun *lun, struct se_device *dev)
475 spin_lock(&dev->se_port_lock);
476 if (dev->export_count == 0x0000ffff) {
477 pr_warn("Reached dev->dev_port_count =="
479 spin_unlock(&dev->se_port_lock);
484 * Allocate the next RELATIVE TARGET PORT IDENTIFIER for this struct se_device
485 * Here is the table from spc4r17 section 7.7.3.8.
487 * Table 473 -- RELATIVE TARGET PORT IDENTIFIER field
491 * 1h Relative port 1, historically known as port A
492 * 2h Relative port 2, historically known as port B
493 * 3h to FFFFh Relative port 3 through 65 535
495 lun->lun_rtpi = dev->dev_rpti_counter++;
499 list_for_each_entry(tmp, &dev->dev_sep_list, lun_dev_link) {
501 * Make sure RELATIVE TARGET PORT IDENTIFIER is unique
504 if (lun->lun_rtpi == tmp->lun_rtpi)
507 spin_unlock(&dev->se_port_lock);
512 static void se_release_vpd_for_dev(struct se_device *dev)
514 struct t10_vpd *vpd, *vpd_tmp;
516 spin_lock(&dev->t10_wwn.t10_vpd_lock);
517 list_for_each_entry_safe(vpd, vpd_tmp,
518 &dev->t10_wwn.t10_vpd_list, vpd_list) {
519 list_del(&vpd->vpd_list);
522 spin_unlock(&dev->t10_wwn.t10_vpd_lock);
525 static u32 se_dev_align_max_sectors(u32 max_sectors, u32 block_size)
527 u32 aligned_max_sectors;
530 * Limit max_sectors to a PAGE_SIZE aligned value for modern
531 * transport_allocate_data_tasks() operation.
533 alignment = max(1ul, PAGE_SIZE / block_size);
534 aligned_max_sectors = rounddown(max_sectors, alignment);
536 if (max_sectors != aligned_max_sectors)
537 pr_info("Rounding down aligned max_sectors from %u to %u\n",
538 max_sectors, aligned_max_sectors);
540 return aligned_max_sectors;
543 int core_dev_add_lun(
544 struct se_portal_group *tpg,
545 struct se_device *dev,
550 rc = core_tpg_add_lun(tpg, lun, false, dev);
554 pr_debug("%s_TPG[%u]_LUN[%llu] - Activated %s Logical Unit from"
555 " CORE HBA: %u\n", tpg->se_tpg_tfo->fabric_name,
556 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
557 tpg->se_tpg_tfo->fabric_name, dev->se_hba->hba_id);
559 * Update LUN maps for dynamically added initiators when
560 * generate_node_acl is enabled.
562 if (tpg->se_tpg_tfo->tpg_check_demo_mode(tpg)) {
563 struct se_node_acl *acl;
565 mutex_lock(&tpg->acl_node_mutex);
566 list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
567 if (acl->dynamic_node_acl &&
568 (!tpg->se_tpg_tfo->tpg_check_demo_mode_login_only ||
569 !tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg))) {
570 core_tpg_add_node_to_devs(acl, tpg, lun);
573 mutex_unlock(&tpg->acl_node_mutex);
579 /* core_dev_del_lun():
583 void core_dev_del_lun(
584 struct se_portal_group *tpg,
587 pr_debug("%s_TPG[%u]_LUN[%llu] - Deactivating %s Logical Unit from"
588 " device object\n", tpg->se_tpg_tfo->fabric_name,
589 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
590 tpg->se_tpg_tfo->fabric_name);
592 core_tpg_remove_lun(tpg, lun);
595 struct se_lun_acl *core_dev_init_initiator_node_lun_acl(
596 struct se_portal_group *tpg,
597 struct se_node_acl *nacl,
601 struct se_lun_acl *lacl;
603 if (strlen(nacl->initiatorname) >= TRANSPORT_IQN_LEN) {
604 pr_err("%s InitiatorName exceeds maximum size.\n",
605 tpg->se_tpg_tfo->fabric_name);
609 lacl = kzalloc(sizeof(struct se_lun_acl), GFP_KERNEL);
611 pr_err("Unable to allocate memory for struct se_lun_acl.\n");
616 lacl->mapped_lun = mapped_lun;
617 lacl->se_lun_nacl = nacl;
622 int core_dev_add_initiator_node_lun_acl(
623 struct se_portal_group *tpg,
624 struct se_lun_acl *lacl,
628 struct se_node_acl *nacl = lacl->se_lun_nacl;
630 * rcu_dereference_raw protected by se_lun->lun_group symlink
631 * reference to se_device->dev_group.
633 struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev);
638 if (lun->lun_access_ro)
639 lun_access_ro = true;
643 if (core_enable_device_list_for_node(lun, lacl, lacl->mapped_lun,
644 lun_access_ro, nacl, tpg) < 0)
647 pr_debug("%s_TPG[%hu]_LUN[%llu->%llu] - Added %s ACL for "
648 " InitiatorNode: %s\n", tpg->se_tpg_tfo->fabric_name,
649 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, lacl->mapped_lun,
650 lun_access_ro ? "RO" : "RW",
651 nacl->initiatorname);
653 * Check to see if there are any existing persistent reservation APTPL
654 * pre-registrations that need to be enabled for this LUN ACL..
656 core_scsi3_check_aptpl_registration(dev, tpg, lun, nacl,
661 int core_dev_del_initiator_node_lun_acl(
663 struct se_lun_acl *lacl)
665 struct se_portal_group *tpg = lun->lun_tpg;
666 struct se_node_acl *nacl;
667 struct se_dev_entry *deve;
669 nacl = lacl->se_lun_nacl;
673 mutex_lock(&nacl->lun_entry_mutex);
674 deve = target_nacl_find_deve(nacl, lacl->mapped_lun);
676 core_disable_device_list_for_node(lun, deve, nacl, tpg);
677 mutex_unlock(&nacl->lun_entry_mutex);
679 pr_debug("%s_TPG[%hu]_LUN[%llu] - Removed ACL for"
680 " InitiatorNode: %s Mapped LUN: %llu\n",
681 tpg->se_tpg_tfo->fabric_name,
682 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
683 nacl->initiatorname, lacl->mapped_lun);
688 void core_dev_free_initiator_node_lun_acl(
689 struct se_portal_group *tpg,
690 struct se_lun_acl *lacl)
692 pr_debug("%s_TPG[%hu] - Freeing ACL for %s InitiatorNode: %s"
693 " Mapped LUN: %llu\n", tpg->se_tpg_tfo->fabric_name,
694 tpg->se_tpg_tfo->tpg_get_tag(tpg),
695 tpg->se_tpg_tfo->fabric_name,
696 lacl->se_lun_nacl->initiatorname, lacl->mapped_lun);
701 static void scsi_dump_inquiry(struct se_device *dev)
703 struct t10_wwn *wwn = &dev->t10_wwn;
704 int device_type = dev->transport->get_device_type(dev);
707 * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer
709 pr_debug(" Vendor: %-" __stringify(INQUIRY_VENDOR_LEN) "s\n",
711 pr_debug(" Model: %-" __stringify(INQUIRY_MODEL_LEN) "s\n",
713 pr_debug(" Revision: %-" __stringify(INQUIRY_REVISION_LEN) "s\n",
715 pr_debug(" Type: %s ", scsi_device_type(device_type));
718 struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
720 struct se_device *dev;
721 struct se_lun *xcopy_lun;
724 dev = hba->backend->ops->alloc_device(hba, name);
728 dev->queues = kcalloc(nr_cpu_ids, sizeof(*dev->queues), GFP_KERNEL);
730 dev->transport->free_device(dev);
734 dev->queue_cnt = nr_cpu_ids;
735 for (i = 0; i < dev->queue_cnt; i++) {
736 INIT_LIST_HEAD(&dev->queues[i].state_list);
737 spin_lock_init(&dev->queues[i].lock);
741 dev->transport = hba->backend->ops;
742 dev->transport_flags = dev->transport->transport_flags_default;
743 dev->prot_length = sizeof(struct t10_pi_tuple);
744 dev->hba_index = hba->hba_index;
746 INIT_LIST_HEAD(&dev->dev_sep_list);
747 INIT_LIST_HEAD(&dev->dev_tmr_list);
748 INIT_LIST_HEAD(&dev->delayed_cmd_list);
749 INIT_LIST_HEAD(&dev->qf_cmd_list);
750 spin_lock_init(&dev->delayed_cmd_lock);
751 spin_lock_init(&dev->dev_reservation_lock);
752 spin_lock_init(&dev->se_port_lock);
753 spin_lock_init(&dev->se_tmr_lock);
754 spin_lock_init(&dev->qf_cmd_lock);
755 sema_init(&dev->caw_sem, 1);
756 INIT_LIST_HEAD(&dev->t10_wwn.t10_vpd_list);
757 spin_lock_init(&dev->t10_wwn.t10_vpd_lock);
758 INIT_LIST_HEAD(&dev->t10_pr.registration_list);
759 INIT_LIST_HEAD(&dev->t10_pr.aptpl_reg_list);
760 spin_lock_init(&dev->t10_pr.registration_lock);
761 spin_lock_init(&dev->t10_pr.aptpl_reg_lock);
762 INIT_LIST_HEAD(&dev->t10_alua.tg_pt_gps_list);
763 spin_lock_init(&dev->t10_alua.tg_pt_gps_lock);
764 INIT_LIST_HEAD(&dev->t10_alua.lba_map_list);
765 spin_lock_init(&dev->t10_alua.lba_map_lock);
767 INIT_WORK(&dev->delayed_cmd_work, target_do_delayed_work);
768 mutex_init(&dev->lun_reset_mutex);
770 dev->t10_wwn.t10_dev = dev;
771 dev->t10_alua.t10_dev = dev;
773 dev->dev_attrib.da_dev = dev;
774 dev->dev_attrib.emulate_model_alias = DA_EMULATE_MODEL_ALIAS;
775 dev->dev_attrib.emulate_dpo = 1;
776 dev->dev_attrib.emulate_fua_write = 1;
777 dev->dev_attrib.emulate_fua_read = 1;
778 dev->dev_attrib.emulate_write_cache = DA_EMULATE_WRITE_CACHE;
779 dev->dev_attrib.emulate_ua_intlck_ctrl = TARGET_UA_INTLCK_CTRL_CLEAR;
780 dev->dev_attrib.emulate_tas = DA_EMULATE_TAS;
781 dev->dev_attrib.emulate_tpu = DA_EMULATE_TPU;
782 dev->dev_attrib.emulate_tpws = DA_EMULATE_TPWS;
783 dev->dev_attrib.emulate_caw = DA_EMULATE_CAW;
784 dev->dev_attrib.emulate_3pc = DA_EMULATE_3PC;
785 dev->dev_attrib.emulate_pr = DA_EMULATE_PR;
786 dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE0_PROT;
787 dev->dev_attrib.enforce_pr_isids = DA_ENFORCE_PR_ISIDS;
788 dev->dev_attrib.force_pr_aptpl = DA_FORCE_PR_APTPL;
789 dev->dev_attrib.is_nonrot = DA_IS_NONROT;
790 dev->dev_attrib.emulate_rest_reord = DA_EMULATE_REST_REORD;
791 dev->dev_attrib.max_unmap_lba_count = DA_MAX_UNMAP_LBA_COUNT;
792 dev->dev_attrib.max_unmap_block_desc_count =
793 DA_MAX_UNMAP_BLOCK_DESC_COUNT;
794 dev->dev_attrib.unmap_granularity = DA_UNMAP_GRANULARITY_DEFAULT;
795 dev->dev_attrib.unmap_granularity_alignment =
796 DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT;
797 dev->dev_attrib.unmap_zeroes_data =
798 DA_UNMAP_ZEROES_DATA_DEFAULT;
799 dev->dev_attrib.max_write_same_len = DA_MAX_WRITE_SAME_LEN;
801 xcopy_lun = &dev->xcopy_lun;
802 rcu_assign_pointer(xcopy_lun->lun_se_dev, dev);
803 init_completion(&xcopy_lun->lun_shutdown_comp);
804 INIT_LIST_HEAD(&xcopy_lun->lun_deve_list);
805 INIT_LIST_HEAD(&xcopy_lun->lun_dev_link);
806 mutex_init(&xcopy_lun->lun_tg_pt_md_mutex);
807 xcopy_lun->lun_tpg = &xcopy_pt_tpg;
809 /* Preload the default INQUIRY const values */
810 strlcpy(dev->t10_wwn.vendor, "LIO-ORG", sizeof(dev->t10_wwn.vendor));
811 strlcpy(dev->t10_wwn.model, dev->transport->inquiry_prod,
812 sizeof(dev->t10_wwn.model));
813 strlcpy(dev->t10_wwn.revision, dev->transport->inquiry_rev,
814 sizeof(dev->t10_wwn.revision));
820 * Check if the underlying struct block_device request_queue supports
821 * the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM
822 * in ATA and we need to set TPE=1
824 bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib,
825 struct request_queue *q)
827 int block_size = queue_logical_block_size(q);
829 if (!blk_queue_discard(q))
832 attrib->max_unmap_lba_count =
833 q->limits.max_discard_sectors >> (ilog2(block_size) - 9);
835 * Currently hardcoded to 1 in Linux/SCSI code..
837 attrib->max_unmap_block_desc_count = 1;
838 attrib->unmap_granularity = q->limits.discard_granularity / block_size;
839 attrib->unmap_granularity_alignment = q->limits.discard_alignment /
843 EXPORT_SYMBOL(target_configure_unmap_from_queue);
846 * Convert from blocksize advertised to the initiator to the 512 byte
847 * units unconditionally used by the Linux block layer.
849 sector_t target_to_linux_sector(struct se_device *dev, sector_t lb)
851 switch (dev->dev_attrib.block_size) {
862 EXPORT_SYMBOL(target_to_linux_sector);
864 struct devices_idr_iter {
865 int (*fn)(struct se_device *dev, void *data);
869 static int target_devices_idr_iter(int id, void *p, void *data)
870 __must_hold(&device_mutex)
872 struct devices_idr_iter *iter = data;
873 struct se_device *dev = p;
874 struct config_item *item;
878 * We add the device early to the idr, so it can be used
879 * by backend modules during configuration. We do not want
880 * to allow other callers to access partially setup devices,
881 * so we skip them here.
883 if (!target_dev_configured(dev))
886 item = config_item_get_unless_zero(&dev->dev_group.cg_item);
889 mutex_unlock(&device_mutex);
891 ret = iter->fn(dev, iter->data);
892 config_item_put(item);
894 mutex_lock(&device_mutex);
899 * target_for_each_device - iterate over configured devices
900 * @fn: iterator function
901 * @data: pointer to data that will be passed to fn
903 * fn must return 0 to continue looping over devices. non-zero will break
904 * from the loop and return that value to the caller.
906 int target_for_each_device(int (*fn)(struct se_device *dev, void *data),
909 struct devices_idr_iter iter = { .fn = fn, .data = data };
912 mutex_lock(&device_mutex);
913 ret = idr_for_each(&devices_idr, target_devices_idr_iter, &iter);
914 mutex_unlock(&device_mutex);
918 int target_configure_device(struct se_device *dev)
920 struct se_hba *hba = dev->se_hba;
923 if (target_dev_configured(dev)) {
924 pr_err("se_dev->se_dev_ptr already set for storage"
930 * Add early so modules like tcmu can use during its
933 mutex_lock(&device_mutex);
935 * Use cyclic to try and avoid collisions with devices
936 * that were recently removed.
938 id = idr_alloc_cyclic(&devices_idr, dev, 0, INT_MAX, GFP_KERNEL);
939 mutex_unlock(&device_mutex);
946 ret = dev->transport->configure_device(dev);
950 * XXX: there is not much point to have two different values here..
952 dev->dev_attrib.block_size = dev->dev_attrib.hw_block_size;
953 dev->dev_attrib.queue_depth = dev->dev_attrib.hw_queue_depth;
956 * Align max_hw_sectors down to PAGE_SIZE I/O transfers
958 dev->dev_attrib.hw_max_sectors =
959 se_dev_align_max_sectors(dev->dev_attrib.hw_max_sectors,
960 dev->dev_attrib.hw_block_size);
961 dev->dev_attrib.optimal_sectors = dev->dev_attrib.hw_max_sectors;
963 dev->creation_time = get_jiffies_64();
965 ret = core_setup_alua(dev);
967 goto out_destroy_device;
970 * Setup work_queue for QUEUE_FULL
972 INIT_WORK(&dev->qf_work_queue, target_qf_do_work);
974 scsi_dump_inquiry(dev);
976 spin_lock(&hba->device_lock);
978 spin_unlock(&hba->device_lock);
980 dev->dev_flags |= DF_CONFIGURED;
985 dev->transport->destroy_device(dev);
987 mutex_lock(&device_mutex);
988 idr_remove(&devices_idr, dev->dev_index);
989 mutex_unlock(&device_mutex);
991 se_release_vpd_for_dev(dev);
995 void target_free_device(struct se_device *dev)
997 struct se_hba *hba = dev->se_hba;
999 WARN_ON(!list_empty(&dev->dev_sep_list));
1001 if (target_dev_configured(dev)) {
1002 dev->transport->destroy_device(dev);
1004 mutex_lock(&device_mutex);
1005 idr_remove(&devices_idr, dev->dev_index);
1006 mutex_unlock(&device_mutex);
1008 spin_lock(&hba->device_lock);
1010 spin_unlock(&hba->device_lock);
1013 core_alua_free_lu_gp_mem(dev);
1014 core_alua_set_lba_map(dev, NULL, 0, 0);
1015 core_scsi3_free_all_registrations(dev);
1016 se_release_vpd_for_dev(dev);
1018 if (dev->transport->free_prot)
1019 dev->transport->free_prot(dev);
1022 dev->transport->free_device(dev);
1025 int core_dev_setup_virtual_lun0(void)
1028 struct se_device *dev;
1029 char buf[] = "rd_pages=8,rd_nullio=1";
1032 hba = core_alloc_hba("rd_mcp", 0, HBA_FLAGS_INTERNAL_USE);
1034 return PTR_ERR(hba);
1036 dev = target_alloc_device(hba, "virt_lun0");
1042 hba->backend->ops->set_configfs_dev_params(dev, buf, sizeof(buf));
1044 ret = target_configure_device(dev);
1046 goto out_free_se_dev;
1053 target_free_device(dev);
1055 core_delete_hba(hba);
1060 void core_dev_release_virtual_lun0(void)
1062 struct se_hba *hba = lun0_hba;
1068 target_free_device(g_lun0_dev);
1069 core_delete_hba(hba);
1073 * Common CDB parsing for kernel and user passthrough.
1076 passthrough_parse_cdb(struct se_cmd *cmd,
1077 sense_reason_t (*exec_cmd)(struct se_cmd *cmd))
1079 unsigned char *cdb = cmd->t_task_cdb;
1080 struct se_device *dev = cmd->se_dev;
1084 * For REPORT LUNS we always need to emulate the response, for everything
1087 if (cdb[0] == REPORT_LUNS) {
1088 cmd->execute_cmd = spc_emulate_report_luns;
1089 return TCM_NO_SENSE;
1093 * With emulate_pr disabled, all reservation requests should fail,
1094 * regardless of whether or not TRANSPORT_FLAG_PASSTHROUGH_PGR is set.
1096 if (!dev->dev_attrib.emulate_pr &&
1097 ((cdb[0] == PERSISTENT_RESERVE_IN) ||
1098 (cdb[0] == PERSISTENT_RESERVE_OUT) ||
1099 (cdb[0] == RELEASE || cdb[0] == RELEASE_10) ||
1100 (cdb[0] == RESERVE || cdb[0] == RESERVE_10))) {
1101 return TCM_UNSUPPORTED_SCSI_OPCODE;
1105 * For PERSISTENT RESERVE IN/OUT, RELEASE, and RESERVE we need to
1106 * emulate the response, since tcmu does not have the information
1107 * required to process these commands.
1109 if (!(dev->transport_flags &
1110 TRANSPORT_FLAG_PASSTHROUGH_PGR)) {
1111 if (cdb[0] == PERSISTENT_RESERVE_IN) {
1112 cmd->execute_cmd = target_scsi3_emulate_pr_in;
1113 size = get_unaligned_be16(&cdb[7]);
1114 return target_cmd_size_check(cmd, size);
1116 if (cdb[0] == PERSISTENT_RESERVE_OUT) {
1117 cmd->execute_cmd = target_scsi3_emulate_pr_out;
1118 size = get_unaligned_be32(&cdb[5]);
1119 return target_cmd_size_check(cmd, size);
1122 if (cdb[0] == RELEASE || cdb[0] == RELEASE_10) {
1123 cmd->execute_cmd = target_scsi2_reservation_release;
1124 if (cdb[0] == RELEASE_10)
1125 size = get_unaligned_be16(&cdb[7]);
1127 size = cmd->data_length;
1128 return target_cmd_size_check(cmd, size);
1130 if (cdb[0] == RESERVE || cdb[0] == RESERVE_10) {
1131 cmd->execute_cmd = target_scsi2_reservation_reserve;
1132 if (cdb[0] == RESERVE_10)
1133 size = get_unaligned_be16(&cdb[7]);
1135 size = cmd->data_length;
1136 return target_cmd_size_check(cmd, size);
1140 /* Set DATA_CDB flag for ops that should have it */
1151 case WRITE_VERIFY_12:
1152 case WRITE_VERIFY_16:
1153 case COMPARE_AND_WRITE:
1154 case XDWRITEREAD_10:
1155 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
1157 case VARIABLE_LENGTH_CMD:
1158 switch (get_unaligned_be16(&cdb[8])) {
1161 case WRITE_VERIFY_32:
1162 case XDWRITEREAD_32:
1163 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
1168 cmd->execute_cmd = exec_cmd;
1170 return TCM_NO_SENSE;
1172 EXPORT_SYMBOL(passthrough_parse_cdb);