1 /*******************************************************************************
2 * Vhost kernel TCM fabric driver for virtio SCSI initiators
4 * (C) Copyright 2010-2013 Datera, Inc.
5 * (C) Copyright 2010-2012 IBM Corp.
7 * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
9 * Authors: Nicholas A. Bellinger <nab@daterainc.com>
10 * Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
22 ****************************************************************************/
24 #include <linux/module.h>
25 #include <linux/moduleparam.h>
26 #include <generated/utsrelease.h>
27 #include <linux/utsname.h>
28 #include <linux/init.h>
29 #include <linux/slab.h>
30 #include <linux/kthread.h>
31 #include <linux/types.h>
32 #include <linux/string.h>
33 #include <linux/configfs.h>
34 #include <linux/ctype.h>
35 #include <linux/compat.h>
36 #include <linux/eventfd.h>
38 #include <linux/vmalloc.h>
39 #include <linux/miscdevice.h>
40 #include <asm/unaligned.h>
41 #include <scsi/scsi_common.h>
42 #include <scsi/scsi_proto.h>
43 #include <target/target_core_base.h>
44 #include <target/target_core_fabric.h>
45 #include <linux/vhost.h>
46 #include <linux/virtio_scsi.h>
47 #include <linux/llist.h>
48 #include <linux/bitmap.h>
49 #include <linux/percpu_ida.h>
53 #define VHOST_SCSI_VERSION "v0.1"
54 #define VHOST_SCSI_NAMELEN 256
55 #define VHOST_SCSI_MAX_CDB_SIZE 32
56 #define VHOST_SCSI_DEFAULT_TAGS 256
57 #define VHOST_SCSI_PREALLOC_SGLS 2048
58 #define VHOST_SCSI_PREALLOC_UPAGES 2048
59 #define VHOST_SCSI_PREALLOC_PROT_SGLS 512
61 /* Max number of requests before requeueing the job.
62 * Using this limit prevents one virtqueue from starving others with
65 #define VHOST_SCSI_WEIGHT 256
67 struct vhost_scsi_inflight {
68 /* Wait for the flush operation to finish */
69 struct completion comp;
70 /* Refcount for the inflight reqs */
74 struct vhost_scsi_cmd {
75 /* Descriptor from vhost_get_vq_desc() for virt_queue segment */
77 /* virtio-scsi initiator task attribute */
79 /* virtio-scsi response incoming iovecs */
81 /* virtio-scsi initiator data direction */
82 enum dma_data_direction tvc_data_direction;
83 /* Expected data transfer length from virtio-scsi header */
85 /* The Tag from include/linux/virtio_scsi.h:struct virtio_scsi_cmd_req */
87 /* The number of scatterlists associated with this cmd */
89 u32 tvc_prot_sgl_count;
90 /* Saved unpacked SCSI LUN for vhost_scsi_submission_work() */
92 /* Pointer to the SGL formatted memory from virtio-scsi */
93 struct scatterlist *tvc_sgl;
94 struct scatterlist *tvc_prot_sgl;
95 struct page **tvc_upages;
96 /* Pointer to response header iovec */
97 struct iovec tvc_resp_iov;
98 /* Pointer to vhost_scsi for our device */
99 struct vhost_scsi *tvc_vhost;
100 /* Pointer to vhost_virtqueue for the cmd */
101 struct vhost_virtqueue *tvc_vq;
102 /* Pointer to vhost nexus memory */
103 struct vhost_scsi_nexus *tvc_nexus;
104 /* The TCM I/O descriptor that is accessed via container_of() */
105 struct se_cmd tvc_se_cmd;
106 /* work item used for cmwq dispatch to vhost_scsi_submission_work() */
107 struct work_struct work;
108 /* Copy of the incoming SCSI command descriptor block (CDB) */
109 unsigned char tvc_cdb[VHOST_SCSI_MAX_CDB_SIZE];
110 /* Sense buffer that will be mapped into outgoing status */
111 unsigned char tvc_sense_buf[TRANSPORT_SENSE_BUFFER];
112 /* Completed commands list, serviced from vhost worker thread */
113 struct llist_node tvc_completion_list;
114 /* Used to track inflight cmd */
115 struct vhost_scsi_inflight *inflight;
118 struct vhost_scsi_nexus {
119 /* Pointer to TCM session for I_T Nexus */
120 struct se_session *tvn_se_sess;
123 struct vhost_scsi_tpg {
124 /* Vhost port target portal group tag for TCM */
126 /* Used to track number of TPG Port/Lun Links wrt to explict I_T Nexus shutdown */
127 int tv_tpg_port_count;
128 /* Used for vhost_scsi device reference to tpg_nexus, protected by tv_tpg_mutex */
129 int tv_tpg_vhost_count;
130 /* Used for enabling T10-PI with legacy devices */
131 int tv_fabric_prot_type;
132 /* list for vhost_scsi_list */
133 struct list_head tv_tpg_list;
134 /* Used to protect access for tpg_nexus */
135 struct mutex tv_tpg_mutex;
136 /* Pointer to the TCM VHost I_T Nexus for this TPG endpoint */
137 struct vhost_scsi_nexus *tpg_nexus;
138 /* Pointer back to vhost_scsi_tport */
139 struct vhost_scsi_tport *tport;
140 /* Returned by vhost_scsi_make_tpg() */
141 struct se_portal_group se_tpg;
142 /* Pointer back to vhost_scsi, protected by tv_tpg_mutex */
143 struct vhost_scsi *vhost_scsi;
146 struct vhost_scsi_tport {
147 /* SCSI protocol the tport is providing */
149 /* Binary World Wide unique Port Name for Vhost Target port */
151 /* ASCII formatted WWPN for Vhost Target port */
152 char tport_name[VHOST_SCSI_NAMELEN];
153 /* Returned by vhost_scsi_make_tport() */
154 struct se_wwn tport_wwn;
157 struct vhost_scsi_evt {
158 /* event to be sent to guest */
159 struct virtio_scsi_event event;
160 /* event list, serviced from vhost worker thread */
161 struct llist_node list;
165 VHOST_SCSI_VQ_CTL = 0,
166 VHOST_SCSI_VQ_EVT = 1,
167 VHOST_SCSI_VQ_IO = 2,
170 /* Note: can't set VIRTIO_F_VERSION_1 yet, since that implies ANY_LAYOUT. */
172 VHOST_SCSI_FEATURES = VHOST_FEATURES | (1ULL << VIRTIO_SCSI_F_HOTPLUG) |
173 (1ULL << VIRTIO_SCSI_F_T10_PI)
176 #define VHOST_SCSI_MAX_TARGET 256
177 #define VHOST_SCSI_MAX_VQ 128
178 #define VHOST_SCSI_MAX_EVENT 128
180 struct vhost_scsi_virtqueue {
181 struct vhost_virtqueue vq;
183 * Reference counting for inflight reqs, used for flush operation. At
184 * each time, one reference tracks new commands submitted, while we
185 * wait for another one to reach 0.
187 struct vhost_scsi_inflight inflights[2];
189 * Indicate current inflight in use, protected by vq->mutex.
190 * Writers must also take dev mutex and flush under it.
196 /* Protected by vhost_scsi->dev.mutex */
197 struct vhost_scsi_tpg **vs_tpg;
198 char vs_vhost_wwpn[TRANSPORT_IQN_LEN];
200 struct vhost_dev dev;
201 struct vhost_scsi_virtqueue vqs[VHOST_SCSI_MAX_VQ];
203 struct vhost_work vs_completion_work; /* cmd completion work item */
204 struct llist_head vs_completion_list; /* cmd completion queue */
206 struct vhost_work vs_event_work; /* evt injection work item */
207 struct llist_head vs_event_list; /* evt injection queue */
209 bool vs_events_missed; /* any missed events, protected by vq->mutex */
210 int vs_events_nr; /* num of pending events, protected by vq->mutex */
213 static struct workqueue_struct *vhost_scsi_workqueue;
215 /* Global spinlock to protect vhost_scsi TPG list for vhost IOCTL access */
216 static DEFINE_MUTEX(vhost_scsi_mutex);
217 static LIST_HEAD(vhost_scsi_list);
219 static int iov_num_pages(void __user *iov_base, size_t iov_len)
221 return (PAGE_ALIGN((unsigned long)iov_base + iov_len) -
222 ((unsigned long)iov_base & PAGE_MASK)) >> PAGE_SHIFT;
225 static void vhost_scsi_done_inflight(struct kref *kref)
227 struct vhost_scsi_inflight *inflight;
229 inflight = container_of(kref, struct vhost_scsi_inflight, kref);
230 complete(&inflight->comp);
233 static void vhost_scsi_init_inflight(struct vhost_scsi *vs,
234 struct vhost_scsi_inflight *old_inflight[])
236 struct vhost_scsi_inflight *new_inflight;
237 struct vhost_virtqueue *vq;
240 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
243 mutex_lock(&vq->mutex);
245 /* store old infight */
246 idx = vs->vqs[i].inflight_idx;
248 old_inflight[i] = &vs->vqs[i].inflights[idx];
250 /* setup new infight */
251 vs->vqs[i].inflight_idx = idx ^ 1;
252 new_inflight = &vs->vqs[i].inflights[idx ^ 1];
253 kref_init(&new_inflight->kref);
254 init_completion(&new_inflight->comp);
256 mutex_unlock(&vq->mutex);
260 static struct vhost_scsi_inflight *
261 vhost_scsi_get_inflight(struct vhost_virtqueue *vq)
263 struct vhost_scsi_inflight *inflight;
264 struct vhost_scsi_virtqueue *svq;
266 svq = container_of(vq, struct vhost_scsi_virtqueue, vq);
267 inflight = &svq->inflights[svq->inflight_idx];
268 kref_get(&inflight->kref);
273 static void vhost_scsi_put_inflight(struct vhost_scsi_inflight *inflight)
275 kref_put(&inflight->kref, vhost_scsi_done_inflight);
278 static int vhost_scsi_check_true(struct se_portal_group *se_tpg)
283 static int vhost_scsi_check_false(struct se_portal_group *se_tpg)
288 static char *vhost_scsi_get_fabric_name(void)
293 static char *vhost_scsi_get_fabric_wwn(struct se_portal_group *se_tpg)
295 struct vhost_scsi_tpg *tpg = container_of(se_tpg,
296 struct vhost_scsi_tpg, se_tpg);
297 struct vhost_scsi_tport *tport = tpg->tport;
299 return &tport->tport_name[0];
302 static u16 vhost_scsi_get_tpgt(struct se_portal_group *se_tpg)
304 struct vhost_scsi_tpg *tpg = container_of(se_tpg,
305 struct vhost_scsi_tpg, se_tpg);
306 return tpg->tport_tpgt;
309 static int vhost_scsi_check_prot_fabric_only(struct se_portal_group *se_tpg)
311 struct vhost_scsi_tpg *tpg = container_of(se_tpg,
312 struct vhost_scsi_tpg, se_tpg);
314 return tpg->tv_fabric_prot_type;
317 static u32 vhost_scsi_tpg_get_inst_index(struct se_portal_group *se_tpg)
322 static void vhost_scsi_release_cmd(struct se_cmd *se_cmd)
324 struct vhost_scsi_cmd *tv_cmd = container_of(se_cmd,
325 struct vhost_scsi_cmd, tvc_se_cmd);
326 struct se_session *se_sess = tv_cmd->tvc_nexus->tvn_se_sess;
329 if (tv_cmd->tvc_sgl_count) {
330 for (i = 0; i < tv_cmd->tvc_sgl_count; i++)
331 put_page(sg_page(&tv_cmd->tvc_sgl[i]));
333 if (tv_cmd->tvc_prot_sgl_count) {
334 for (i = 0; i < tv_cmd->tvc_prot_sgl_count; i++)
335 put_page(sg_page(&tv_cmd->tvc_prot_sgl[i]));
338 vhost_scsi_put_inflight(tv_cmd->inflight);
339 percpu_ida_free(&se_sess->sess_tag_pool, se_cmd->map_tag);
342 static u32 vhost_scsi_sess_get_index(struct se_session *se_sess)
347 static int vhost_scsi_write_pending(struct se_cmd *se_cmd)
349 /* Go ahead and process the write immediately */
350 target_execute_cmd(se_cmd);
354 static int vhost_scsi_write_pending_status(struct se_cmd *se_cmd)
359 static void vhost_scsi_set_default_node_attrs(struct se_node_acl *nacl)
364 static int vhost_scsi_get_cmd_state(struct se_cmd *se_cmd)
369 static void vhost_scsi_complete_cmd(struct vhost_scsi_cmd *cmd)
371 struct vhost_scsi *vs = cmd->tvc_vhost;
373 llist_add(&cmd->tvc_completion_list, &vs->vs_completion_list);
375 vhost_work_queue(&vs->dev, &vs->vs_completion_work);
378 static int vhost_scsi_queue_data_in(struct se_cmd *se_cmd)
380 struct vhost_scsi_cmd *cmd = container_of(se_cmd,
381 struct vhost_scsi_cmd, tvc_se_cmd);
382 vhost_scsi_complete_cmd(cmd);
386 static int vhost_scsi_queue_status(struct se_cmd *se_cmd)
388 struct vhost_scsi_cmd *cmd = container_of(se_cmd,
389 struct vhost_scsi_cmd, tvc_se_cmd);
390 vhost_scsi_complete_cmd(cmd);
394 static void vhost_scsi_queue_tm_rsp(struct se_cmd *se_cmd)
399 static void vhost_scsi_aborted_task(struct se_cmd *se_cmd)
404 static void vhost_scsi_free_evt(struct vhost_scsi *vs, struct vhost_scsi_evt *evt)
410 static struct vhost_scsi_evt *
411 vhost_scsi_allocate_evt(struct vhost_scsi *vs,
412 u32 event, u32 reason)
414 struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
415 struct vhost_scsi_evt *evt;
417 if (vs->vs_events_nr > VHOST_SCSI_MAX_EVENT) {
418 vs->vs_events_missed = true;
422 evt = kzalloc(sizeof(*evt), GFP_KERNEL);
424 vq_err(vq, "Failed to allocate vhost_scsi_evt\n");
425 vs->vs_events_missed = true;
429 evt->event.event = cpu_to_vhost32(vq, event);
430 evt->event.reason = cpu_to_vhost32(vq, reason);
436 static void vhost_scsi_free_cmd(struct vhost_scsi_cmd *cmd)
438 struct se_cmd *se_cmd = &cmd->tvc_se_cmd;
440 /* TODO locking against target/backend threads? */
441 transport_generic_free_cmd(se_cmd, 0);
445 static int vhost_scsi_check_stop_free(struct se_cmd *se_cmd)
447 return target_put_sess_cmd(se_cmd);
451 vhost_scsi_do_evt_work(struct vhost_scsi *vs, struct vhost_scsi_evt *evt)
453 struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
454 struct virtio_scsi_event *event = &evt->event;
455 struct virtio_scsi_event __user *eventp;
459 if (!vq->private_data) {
460 vs->vs_events_missed = true;
465 vhost_disable_notify(&vs->dev, vq);
466 head = vhost_get_vq_desc(vq, vq->iov,
467 ARRAY_SIZE(vq->iov), &out, &in,
470 vs->vs_events_missed = true;
473 if (head == vq->num) {
474 if (vhost_enable_notify(&vs->dev, vq))
476 vs->vs_events_missed = true;
480 if ((vq->iov[out].iov_len != sizeof(struct virtio_scsi_event))) {
481 vq_err(vq, "Expecting virtio_scsi_event, got %zu bytes\n",
482 vq->iov[out].iov_len);
483 vs->vs_events_missed = true;
487 if (vs->vs_events_missed) {
488 event->event |= cpu_to_vhost32(vq, VIRTIO_SCSI_T_EVENTS_MISSED);
489 vs->vs_events_missed = false;
492 eventp = vq->iov[out].iov_base;
493 ret = __copy_to_user(eventp, event, sizeof(*event));
495 vhost_add_used_and_signal(&vs->dev, vq, head, 0);
497 vq_err(vq, "Faulted on vhost_scsi_send_event\n");
500 static void vhost_scsi_evt_work(struct vhost_work *work)
502 struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
504 struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
505 struct vhost_scsi_evt *evt, *t;
506 struct llist_node *llnode;
508 mutex_lock(&vq->mutex);
509 llnode = llist_del_all(&vs->vs_event_list);
510 llist_for_each_entry_safe(evt, t, llnode, list) {
511 vhost_scsi_do_evt_work(vs, evt);
512 vhost_scsi_free_evt(vs, evt);
514 mutex_unlock(&vq->mutex);
517 /* Fill in status and signal that we are done processing this command
519 * This is scheduled in the vhost work queue so we are called with the owner
520 * process mm and can access the vring.
522 static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
524 struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
526 DECLARE_BITMAP(signal, VHOST_SCSI_MAX_VQ);
527 struct virtio_scsi_cmd_resp v_rsp;
528 struct vhost_scsi_cmd *cmd;
529 struct llist_node *llnode;
530 struct se_cmd *se_cmd;
531 struct iov_iter iov_iter;
534 bitmap_zero(signal, VHOST_SCSI_MAX_VQ);
535 llnode = llist_del_all(&vs->vs_completion_list);
536 llist_for_each_entry(cmd, llnode, tvc_completion_list) {
537 se_cmd = &cmd->tvc_se_cmd;
539 pr_debug("%s tv_cmd %p resid %u status %#02x\n", __func__,
540 cmd, se_cmd->residual_count, se_cmd->scsi_status);
542 memset(&v_rsp, 0, sizeof(v_rsp));
543 v_rsp.resid = cpu_to_vhost32(cmd->tvc_vq, se_cmd->residual_count);
544 /* TODO is status_qualifier field needed? */
545 v_rsp.status = se_cmd->scsi_status;
546 v_rsp.sense_len = cpu_to_vhost32(cmd->tvc_vq,
547 se_cmd->scsi_sense_length);
548 memcpy(v_rsp.sense, cmd->tvc_sense_buf,
549 se_cmd->scsi_sense_length);
551 iov_iter_init(&iov_iter, READ, &cmd->tvc_resp_iov,
552 cmd->tvc_in_iovs, sizeof(v_rsp));
553 ret = copy_to_iter(&v_rsp, sizeof(v_rsp), &iov_iter);
554 if (likely(ret == sizeof(v_rsp))) {
555 struct vhost_scsi_virtqueue *q;
556 vhost_add_used(cmd->tvc_vq, cmd->tvc_vq_desc, 0);
557 q = container_of(cmd->tvc_vq, struct vhost_scsi_virtqueue, vq);
559 __set_bit(vq, signal);
561 pr_err("Faulted on virtio_scsi_cmd_resp\n");
563 vhost_scsi_free_cmd(cmd);
567 while ((vq = find_next_bit(signal, VHOST_SCSI_MAX_VQ, vq + 1))
569 vhost_signal(&vs->dev, &vs->vqs[vq].vq);
572 static struct vhost_scsi_cmd *
573 vhost_scsi_get_tag(struct vhost_virtqueue *vq, struct vhost_scsi_tpg *tpg,
574 unsigned char *cdb, u64 scsi_tag, u16 lun, u8 task_attr,
575 u32 exp_data_len, int data_direction)
577 struct vhost_scsi_cmd *cmd;
578 struct vhost_scsi_nexus *tv_nexus;
579 struct se_session *se_sess;
580 struct scatterlist *sg, *prot_sg;
584 tv_nexus = tpg->tpg_nexus;
586 pr_err("Unable to locate active struct vhost_scsi_nexus\n");
587 return ERR_PTR(-EIO);
589 se_sess = tv_nexus->tvn_se_sess;
591 tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING);
593 pr_err("Unable to obtain tag for vhost_scsi_cmd\n");
594 return ERR_PTR(-ENOMEM);
597 cmd = &((struct vhost_scsi_cmd *)se_sess->sess_cmd_map)[tag];
599 prot_sg = cmd->tvc_prot_sgl;
600 pages = cmd->tvc_upages;
601 memset(cmd, 0, sizeof(struct vhost_scsi_cmd));
604 cmd->tvc_prot_sgl = prot_sg;
605 cmd->tvc_upages = pages;
606 cmd->tvc_se_cmd.map_tag = tag;
607 cmd->tvc_tag = scsi_tag;
609 cmd->tvc_task_attr = task_attr;
610 cmd->tvc_exp_data_len = exp_data_len;
611 cmd->tvc_data_direction = data_direction;
612 cmd->tvc_nexus = tv_nexus;
613 cmd->inflight = vhost_scsi_get_inflight(vq);
615 memcpy(cmd->tvc_cdb, cdb, VHOST_SCSI_MAX_CDB_SIZE);
621 * Map a user memory range into a scatterlist
623 * Returns the number of scatterlist entries used or -errno on error.
626 vhost_scsi_map_to_sgl(struct vhost_scsi_cmd *cmd,
629 struct scatterlist *sgl,
632 unsigned int npages = 0, offset, nbytes;
633 unsigned int pages_nr = iov_num_pages(ptr, len);
634 struct scatterlist *sg = sgl;
635 struct page **pages = cmd->tvc_upages;
638 if (pages_nr > VHOST_SCSI_PREALLOC_UPAGES) {
639 pr_err("vhost_scsi_map_to_sgl() pages_nr: %u greater than"
640 " preallocated VHOST_SCSI_PREALLOC_UPAGES: %u\n",
641 pages_nr, VHOST_SCSI_PREALLOC_UPAGES);
645 ret = get_user_pages_fast((unsigned long)ptr, pages_nr, write, pages);
646 /* No pages were pinned */
649 /* Less pages pinned than wanted */
650 if (ret != pages_nr) {
651 for (i = 0; i < ret; i++)
658 offset = (uintptr_t)ptr & ~PAGE_MASK;
659 nbytes = min_t(unsigned int, PAGE_SIZE - offset, len);
660 sg_set_page(sg, pages[npages], nbytes, offset);
672 vhost_scsi_calc_sgls(struct iov_iter *iter, size_t bytes, int max_sgls)
676 if (!iter || !iter->iov) {
677 pr_err("%s: iter->iov is NULL, but expected bytes: %zu"
678 " present\n", __func__, bytes);
682 sgl_count = iov_iter_npages(iter, 0xffff);
683 if (sgl_count > max_sgls) {
684 pr_err("%s: requested sgl_count: %d exceeds pre-allocated"
685 " max_sgls: %d\n", __func__, sgl_count, max_sgls);
692 vhost_scsi_iov_to_sgl(struct vhost_scsi_cmd *cmd, bool write,
693 struct iov_iter *iter,
694 struct scatterlist *sg, int sg_count)
696 size_t off = iter->iov_offset;
697 struct scatterlist *p = sg;
700 for (i = 0; i < iter->nr_segs; i++) {
701 void __user *base = iter->iov[i].iov_base + off;
702 size_t len = iter->iov[i].iov_len - off;
704 ret = vhost_scsi_map_to_sgl(cmd, base, len, sg, write);
707 struct page *page = sg_page(p++);
720 vhost_scsi_mapal(struct vhost_scsi_cmd *cmd,
721 size_t prot_bytes, struct iov_iter *prot_iter,
722 size_t data_bytes, struct iov_iter *data_iter)
725 bool write = (cmd->tvc_data_direction == DMA_FROM_DEVICE);
728 sgl_count = vhost_scsi_calc_sgls(prot_iter, prot_bytes,
729 VHOST_SCSI_PREALLOC_PROT_SGLS);
733 sg_init_table(cmd->tvc_prot_sgl, sgl_count);
734 cmd->tvc_prot_sgl_count = sgl_count;
735 pr_debug("%s prot_sg %p prot_sgl_count %u\n", __func__,
736 cmd->tvc_prot_sgl, cmd->tvc_prot_sgl_count);
738 ret = vhost_scsi_iov_to_sgl(cmd, write, prot_iter,
740 cmd->tvc_prot_sgl_count);
742 cmd->tvc_prot_sgl_count = 0;
746 sgl_count = vhost_scsi_calc_sgls(data_iter, data_bytes,
747 VHOST_SCSI_PREALLOC_SGLS);
751 sg_init_table(cmd->tvc_sgl, sgl_count);
752 cmd->tvc_sgl_count = sgl_count;
753 pr_debug("%s data_sg %p data_sgl_count %u\n", __func__,
754 cmd->tvc_sgl, cmd->tvc_sgl_count);
756 ret = vhost_scsi_iov_to_sgl(cmd, write, data_iter,
757 cmd->tvc_sgl, cmd->tvc_sgl_count);
759 cmd->tvc_sgl_count = 0;
765 static int vhost_scsi_to_tcm_attr(int attr)
768 case VIRTIO_SCSI_S_SIMPLE:
769 return TCM_SIMPLE_TAG;
770 case VIRTIO_SCSI_S_ORDERED:
771 return TCM_ORDERED_TAG;
772 case VIRTIO_SCSI_S_HEAD:
774 case VIRTIO_SCSI_S_ACA:
779 return TCM_SIMPLE_TAG;
782 static void vhost_scsi_submission_work(struct work_struct *work)
784 struct vhost_scsi_cmd *cmd =
785 container_of(work, struct vhost_scsi_cmd, work);
786 struct vhost_scsi_nexus *tv_nexus;
787 struct se_cmd *se_cmd = &cmd->tvc_se_cmd;
788 struct scatterlist *sg_ptr, *sg_prot_ptr = NULL;
791 /* FIXME: BIDI operation */
792 if (cmd->tvc_sgl_count) {
793 sg_ptr = cmd->tvc_sgl;
795 if (cmd->tvc_prot_sgl_count)
796 sg_prot_ptr = cmd->tvc_prot_sgl;
798 se_cmd->prot_pto = true;
802 tv_nexus = cmd->tvc_nexus;
805 rc = target_submit_cmd_map_sgls(se_cmd, tv_nexus->tvn_se_sess,
806 cmd->tvc_cdb, &cmd->tvc_sense_buf[0],
807 cmd->tvc_lun, cmd->tvc_exp_data_len,
808 vhost_scsi_to_tcm_attr(cmd->tvc_task_attr),
809 cmd->tvc_data_direction, TARGET_SCF_ACK_KREF,
810 sg_ptr, cmd->tvc_sgl_count, NULL, 0, sg_prot_ptr,
811 cmd->tvc_prot_sgl_count);
813 transport_send_check_condition_and_sense(se_cmd,
814 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
815 transport_generic_free_cmd(se_cmd, 0);
820 vhost_scsi_send_bad_target(struct vhost_scsi *vs,
821 struct vhost_virtqueue *vq,
822 int head, unsigned out)
824 struct virtio_scsi_cmd_resp __user *resp;
825 struct virtio_scsi_cmd_resp rsp;
828 memset(&rsp, 0, sizeof(rsp));
829 rsp.response = VIRTIO_SCSI_S_BAD_TARGET;
830 resp = vq->iov[out].iov_base;
831 ret = __copy_to_user(resp, &rsp, sizeof(rsp));
833 vhost_add_used_and_signal(&vs->dev, vq, head, 0);
835 pr_err("Faulted on virtio_scsi_cmd_resp\n");
839 vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
841 struct vhost_scsi_tpg **vs_tpg, *tpg;
842 struct virtio_scsi_cmd_req v_req;
843 struct virtio_scsi_cmd_req_pi v_req_pi;
844 struct vhost_scsi_cmd *cmd;
845 struct iov_iter out_iter, in_iter, prot_iter, data_iter;
847 u32 exp_data_len, data_direction;
848 unsigned int out = 0, in = 0;
849 int head, ret, prot_bytes, c = 0;
850 size_t req_size, rsp_size = sizeof(struct virtio_scsi_cmd_resp);
851 size_t out_size, in_size;
853 u8 *target, *lunp, task_attr;
854 bool t10_pi = vhost_has_feature(vq, VIRTIO_SCSI_F_T10_PI);
857 mutex_lock(&vq->mutex);
859 * We can handle the vq only after the endpoint is setup by calling the
860 * VHOST_SCSI_SET_ENDPOINT ioctl.
862 vs_tpg = vq->private_data;
866 vhost_disable_notify(&vs->dev, vq);
869 head = vhost_get_vq_desc(vq, vq->iov,
870 ARRAY_SIZE(vq->iov), &out, &in,
872 pr_debug("vhost_get_vq_desc: head: %d, out: %u in: %u\n",
874 /* On error, stop handling until the next kick. */
875 if (unlikely(head < 0))
877 /* Nothing new? Wait for eventfd to tell us they refilled. */
878 if (head == vq->num) {
879 if (unlikely(vhost_enable_notify(&vs->dev, vq))) {
880 vhost_disable_notify(&vs->dev, vq);
886 * Check for a sane response buffer so we can report early
887 * errors back to the guest.
889 if (unlikely(vq->iov[out].iov_len < rsp_size)) {
890 vq_err(vq, "Expecting at least virtio_scsi_cmd_resp"
891 " size, got %zu bytes\n", vq->iov[out].iov_len);
895 * Setup pointers and values based upon different virtio-scsi
896 * request header if T10_PI is enabled in KVM guest.
900 req_size = sizeof(v_req_pi);
901 lunp = &v_req_pi.lun[0];
902 target = &v_req_pi.lun[1];
905 req_size = sizeof(v_req);
906 lunp = &v_req.lun[0];
907 target = &v_req.lun[1];
910 * FIXME: Not correct for BIDI operation
912 out_size = iov_length(vq->iov, out);
913 in_size = iov_length(&vq->iov[out], in);
916 * Copy over the virtio-scsi request header, which for a
917 * ANY_LAYOUT enabled guest may span multiple iovecs, or a
918 * single iovec may contain both the header + outgoing
921 * copy_from_iter() will advance out_iter, so that it will
922 * point at the start of the outgoing WRITE payload, if
923 * DMA_TO_DEVICE is set.
925 iov_iter_init(&out_iter, WRITE, vq->iov, out, out_size);
927 if (unlikely(!copy_from_iter_full(req, req_size, &out_iter))) {
928 vq_err(vq, "Faulted on copy_from_iter\n");
929 vhost_scsi_send_bad_target(vs, vq, head, out);
932 /* virtio-scsi spec requires byte 0 of the lun to be 1 */
933 if (unlikely(*lunp != 1)) {
934 vq_err(vq, "Illegal virtio-scsi lun: %u\n", *lunp);
935 vhost_scsi_send_bad_target(vs, vq, head, out);
939 tpg = ACCESS_ONCE(vs_tpg[*target]);
940 if (unlikely(!tpg)) {
941 /* Target does not exist, fail the request */
942 vhost_scsi_send_bad_target(vs, vq, head, out);
946 * Determine data_direction by calculating the total outgoing
947 * iovec sizes + incoming iovec sizes vs. virtio-scsi request +
948 * response headers respectively.
950 * For DMA_TO_DEVICE this is out_iter, which is already pointing
951 * to the right place.
953 * For DMA_FROM_DEVICE, the iovec will be just past the end
954 * of the virtio-scsi response header in either the same
955 * or immediately following iovec.
957 * Any associated T10_PI bytes for the outgoing / incoming
958 * payloads are included in calculation of exp_data_len here.
962 if (out_size > req_size) {
963 data_direction = DMA_TO_DEVICE;
964 exp_data_len = out_size - req_size;
965 data_iter = out_iter;
966 } else if (in_size > rsp_size) {
967 data_direction = DMA_FROM_DEVICE;
968 exp_data_len = in_size - rsp_size;
970 iov_iter_init(&in_iter, READ, &vq->iov[out], in,
971 rsp_size + exp_data_len);
972 iov_iter_advance(&in_iter, rsp_size);
975 data_direction = DMA_NONE;
979 * If T10_PI header + payload is present, setup prot_iter values
980 * and recalculate data_iter for vhost_scsi_mapal() mapping to
981 * host scatterlists via get_user_pages_fast().
984 if (v_req_pi.pi_bytesout) {
985 if (data_direction != DMA_TO_DEVICE) {
986 vq_err(vq, "Received non zero pi_bytesout,"
987 " but wrong data_direction\n");
988 vhost_scsi_send_bad_target(vs, vq, head, out);
991 prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesout);
992 } else if (v_req_pi.pi_bytesin) {
993 if (data_direction != DMA_FROM_DEVICE) {
994 vq_err(vq, "Received non zero pi_bytesin,"
995 " but wrong data_direction\n");
996 vhost_scsi_send_bad_target(vs, vq, head, out);
999 prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesin);
1002 * Set prot_iter to data_iter and truncate it to
1003 * prot_bytes, and advance data_iter past any
1004 * preceeding prot_bytes that may be present.
1006 * Also fix up the exp_data_len to reflect only the
1007 * actual data payload length.
1010 exp_data_len -= prot_bytes;
1011 prot_iter = data_iter;
1012 iov_iter_truncate(&prot_iter, prot_bytes);
1013 iov_iter_advance(&data_iter, prot_bytes);
1015 tag = vhost64_to_cpu(vq, v_req_pi.tag);
1016 task_attr = v_req_pi.task_attr;
1017 cdb = &v_req_pi.cdb[0];
1018 lun = ((v_req_pi.lun[2] << 8) | v_req_pi.lun[3]) & 0x3FFF;
1020 tag = vhost64_to_cpu(vq, v_req.tag);
1021 task_attr = v_req.task_attr;
1022 cdb = &v_req.cdb[0];
1023 lun = ((v_req.lun[2] << 8) | v_req.lun[3]) & 0x3FFF;
1026 * Check that the received CDB size does not exceeded our
1027 * hardcoded max for vhost-scsi, then get a pre-allocated
1028 * cmd descriptor for the new virtio-scsi tag.
1030 * TODO what if cdb was too small for varlen cdb header?
1032 if (unlikely(scsi_command_size(cdb) > VHOST_SCSI_MAX_CDB_SIZE)) {
1033 vq_err(vq, "Received SCSI CDB with command_size: %d that"
1034 " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
1035 scsi_command_size(cdb), VHOST_SCSI_MAX_CDB_SIZE);
1036 vhost_scsi_send_bad_target(vs, vq, head, out);
1039 cmd = vhost_scsi_get_tag(vq, tpg, cdb, tag, lun, task_attr,
1040 exp_data_len + prot_bytes,
1043 vq_err(vq, "vhost_scsi_get_tag failed %ld\n",
1045 vhost_scsi_send_bad_target(vs, vq, head, out);
1048 cmd->tvc_vhost = vs;
1050 cmd->tvc_resp_iov = vq->iov[out];
1051 cmd->tvc_in_iovs = in;
1053 pr_debug("vhost_scsi got command opcode: %#02x, lun: %d\n",
1054 cmd->tvc_cdb[0], cmd->tvc_lun);
1055 pr_debug("cmd: %p exp_data_len: %d, prot_bytes: %d data_direction:"
1056 " %d\n", cmd, exp_data_len, prot_bytes, data_direction);
1058 if (data_direction != DMA_NONE) {
1059 ret = vhost_scsi_mapal(cmd,
1060 prot_bytes, &prot_iter,
1061 exp_data_len, &data_iter);
1062 if (unlikely(ret)) {
1063 vq_err(vq, "Failed to map iov to sgl\n");
1064 vhost_scsi_release_cmd(&cmd->tvc_se_cmd);
1065 vhost_scsi_send_bad_target(vs, vq, head, out);
1070 * Save the descriptor from vhost_get_vq_desc() to be used to
1071 * complete the virtio-scsi request in TCM callback context via
1072 * vhost_scsi_queue_data_in() and vhost_scsi_queue_status()
1074 cmd->tvc_vq_desc = head;
1076 * Dispatch cmd descriptor for cmwq execution in process
1077 * context provided by vhost_scsi_workqueue. This also ensures
1078 * cmd is executed on the same kworker CPU as this vhost
1079 * thread to gain positive L2 cache locality effects.
1081 INIT_WORK(&cmd->work, vhost_scsi_submission_work);
1082 queue_work(vhost_scsi_workqueue, &cmd->work);
1083 } while (likely(!vhost_exceeds_weight(vq, ++c, 0)));
1085 mutex_unlock(&vq->mutex);
1088 static void vhost_scsi_ctl_handle_kick(struct vhost_work *work)
1090 pr_debug("%s: The handling func for control queue.\n", __func__);
1094 vhost_scsi_send_evt(struct vhost_scsi *vs,
1095 struct vhost_scsi_tpg *tpg,
1100 struct vhost_scsi_evt *evt;
1102 evt = vhost_scsi_allocate_evt(vs, event, reason);
1107 /* TODO: share lun setup code with virtio-scsi.ko */
1109 * Note: evt->event is zeroed when we allocate it and
1110 * lun[4-7] need to be zero according to virtio-scsi spec.
1112 evt->event.lun[0] = 0x01;
1113 evt->event.lun[1] = tpg->tport_tpgt;
1114 if (lun->unpacked_lun >= 256)
1115 evt->event.lun[2] = lun->unpacked_lun >> 8 | 0x40 ;
1116 evt->event.lun[3] = lun->unpacked_lun & 0xFF;
1119 llist_add(&evt->list, &vs->vs_event_list);
1120 vhost_work_queue(&vs->dev, &vs->vs_event_work);
1123 static void vhost_scsi_evt_handle_kick(struct vhost_work *work)
1125 struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
1127 struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
1129 mutex_lock(&vq->mutex);
1130 if (!vq->private_data)
1133 if (vs->vs_events_missed)
1134 vhost_scsi_send_evt(vs, NULL, NULL, VIRTIO_SCSI_T_NO_EVENT, 0);
1136 mutex_unlock(&vq->mutex);
1139 static void vhost_scsi_handle_kick(struct vhost_work *work)
1141 struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
1143 struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
1145 vhost_scsi_handle_vq(vs, vq);
1148 static void vhost_scsi_flush_vq(struct vhost_scsi *vs, int index)
1150 vhost_poll_flush(&vs->vqs[index].vq.poll);
1153 /* Callers must hold dev mutex */
1154 static void vhost_scsi_flush(struct vhost_scsi *vs)
1156 struct vhost_scsi_inflight *old_inflight[VHOST_SCSI_MAX_VQ];
1159 /* Init new inflight and remember the old inflight */
1160 vhost_scsi_init_inflight(vs, old_inflight);
1163 * The inflight->kref was initialized to 1. We decrement it here to
1164 * indicate the start of the flush operation so that it will reach 0
1165 * when all the reqs are finished.
1167 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
1168 kref_put(&old_inflight[i]->kref, vhost_scsi_done_inflight);
1170 /* Flush both the vhost poll and vhost work */
1171 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
1172 vhost_scsi_flush_vq(vs, i);
1173 vhost_work_flush(&vs->dev, &vs->vs_completion_work);
1174 vhost_work_flush(&vs->dev, &vs->vs_event_work);
1176 /* Wait for all reqs issued before the flush to be finished */
1177 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
1178 wait_for_completion(&old_inflight[i]->comp);
1182 * Called from vhost_scsi_ioctl() context to walk the list of available
1183 * vhost_scsi_tpg with an active struct vhost_scsi_nexus
1185 * The lock nesting rule is:
1186 * vhost_scsi_mutex -> vs->dev.mutex -> tpg->tv_tpg_mutex -> vq->mutex
1189 vhost_scsi_set_endpoint(struct vhost_scsi *vs,
1190 struct vhost_scsi_target *t)
1192 struct se_portal_group *se_tpg;
1193 struct vhost_scsi_tport *tv_tport;
1194 struct vhost_scsi_tpg *tpg;
1195 struct vhost_scsi_tpg **vs_tpg;
1196 struct vhost_virtqueue *vq;
1197 int index, ret, i, len;
1200 mutex_lock(&vhost_scsi_mutex);
1201 mutex_lock(&vs->dev.mutex);
1203 /* Verify that ring has been setup correctly. */
1204 for (index = 0; index < vs->dev.nvqs; ++index) {
1205 /* Verify that ring has been setup correctly. */
1206 if (!vhost_vq_access_ok(&vs->vqs[index].vq)) {
1212 len = sizeof(vs_tpg[0]) * VHOST_SCSI_MAX_TARGET;
1213 vs_tpg = kzalloc(len, GFP_KERNEL);
1219 memcpy(vs_tpg, vs->vs_tpg, len);
1221 list_for_each_entry(tpg, &vhost_scsi_list, tv_tpg_list) {
1222 mutex_lock(&tpg->tv_tpg_mutex);
1223 if (!tpg->tpg_nexus) {
1224 mutex_unlock(&tpg->tv_tpg_mutex);
1227 if (tpg->tv_tpg_vhost_count != 0) {
1228 mutex_unlock(&tpg->tv_tpg_mutex);
1231 tv_tport = tpg->tport;
1233 if (!strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
1234 if (vs->vs_tpg && vs->vs_tpg[tpg->tport_tpgt]) {
1236 mutex_unlock(&tpg->tv_tpg_mutex);
1241 * In order to ensure individual vhost-scsi configfs
1242 * groups cannot be removed while in use by vhost ioctl,
1243 * go ahead and take an explicit se_tpg->tpg_group.cg_item
1246 se_tpg = &tpg->se_tpg;
1247 ret = target_depend_item(&se_tpg->tpg_group.cg_item);
1249 pr_warn("configfs_depend_item() failed: %d\n", ret);
1251 mutex_unlock(&tpg->tv_tpg_mutex);
1254 tpg->tv_tpg_vhost_count++;
1255 tpg->vhost_scsi = vs;
1256 vs_tpg[tpg->tport_tpgt] = tpg;
1257 smp_mb__after_atomic();
1260 mutex_unlock(&tpg->tv_tpg_mutex);
1264 memcpy(vs->vs_vhost_wwpn, t->vhost_wwpn,
1265 sizeof(vs->vs_vhost_wwpn));
1266 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
1267 vq = &vs->vqs[i].vq;
1268 mutex_lock(&vq->mutex);
1269 vq->private_data = vs_tpg;
1270 vhost_vq_init_access(vq);
1271 mutex_unlock(&vq->mutex);
1279 * Act as synchronize_rcu to make sure access to
1280 * old vs->vs_tpg is finished.
1282 vhost_scsi_flush(vs);
1284 vs->vs_tpg = vs_tpg;
1287 mutex_unlock(&vs->dev.mutex);
1288 mutex_unlock(&vhost_scsi_mutex);
1293 vhost_scsi_clear_endpoint(struct vhost_scsi *vs,
1294 struct vhost_scsi_target *t)
1296 struct se_portal_group *se_tpg;
1297 struct vhost_scsi_tport *tv_tport;
1298 struct vhost_scsi_tpg *tpg;
1299 struct vhost_virtqueue *vq;
1304 mutex_lock(&vhost_scsi_mutex);
1305 mutex_lock(&vs->dev.mutex);
1306 /* Verify that ring has been setup correctly. */
1307 for (index = 0; index < vs->dev.nvqs; ++index) {
1308 if (!vhost_vq_access_ok(&vs->vqs[index].vq)) {
1319 for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) {
1321 tpg = vs->vs_tpg[target];
1325 mutex_lock(&tpg->tv_tpg_mutex);
1326 tv_tport = tpg->tport;
1332 if (strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
1333 pr_warn("tv_tport->tport_name: %s, tpg->tport_tpgt: %hu"
1334 " does not match t->vhost_wwpn: %s, t->vhost_tpgt: %hu\n",
1335 tv_tport->tport_name, tpg->tport_tpgt,
1336 t->vhost_wwpn, t->vhost_tpgt);
1340 tpg->tv_tpg_vhost_count--;
1341 tpg->vhost_scsi = NULL;
1342 vs->vs_tpg[target] = NULL;
1344 mutex_unlock(&tpg->tv_tpg_mutex);
1346 * Release se_tpg->tpg_group.cg_item configfs dependency now
1347 * to allow vhost-scsi WWPN se_tpg->tpg_group shutdown to occur.
1349 se_tpg = &tpg->se_tpg;
1350 target_undepend_item(&se_tpg->tpg_group.cg_item);
1353 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
1354 vq = &vs->vqs[i].vq;
1355 mutex_lock(&vq->mutex);
1356 vq->private_data = NULL;
1357 mutex_unlock(&vq->mutex);
1361 * Act as synchronize_rcu to make sure access to
1362 * old vs->vs_tpg is finished.
1364 vhost_scsi_flush(vs);
1367 WARN_ON(vs->vs_events_nr);
1368 mutex_unlock(&vs->dev.mutex);
1369 mutex_unlock(&vhost_scsi_mutex);
1373 mutex_unlock(&tpg->tv_tpg_mutex);
1375 mutex_unlock(&vs->dev.mutex);
1376 mutex_unlock(&vhost_scsi_mutex);
1380 static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features)
1382 struct vhost_virtqueue *vq;
1385 if (features & ~VHOST_SCSI_FEATURES)
1388 mutex_lock(&vs->dev.mutex);
1389 if ((features & (1 << VHOST_F_LOG_ALL)) &&
1390 !vhost_log_access_ok(&vs->dev)) {
1391 mutex_unlock(&vs->dev.mutex);
1395 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
1396 vq = &vs->vqs[i].vq;
1397 mutex_lock(&vq->mutex);
1398 vq->acked_features = features;
1399 mutex_unlock(&vq->mutex);
1401 mutex_unlock(&vs->dev.mutex);
1405 static int vhost_scsi_open(struct inode *inode, struct file *f)
1407 struct vhost_scsi *vs;
1408 struct vhost_virtqueue **vqs;
1411 vs = kzalloc(sizeof(*vs), GFP_KERNEL | __GFP_NOWARN | __GFP_RETRY_MAYFAIL);
1413 vs = vzalloc(sizeof(*vs));
1418 vqs = kmalloc(VHOST_SCSI_MAX_VQ * sizeof(*vqs), GFP_KERNEL);
1422 vhost_work_init(&vs->vs_completion_work, vhost_scsi_complete_cmd_work);
1423 vhost_work_init(&vs->vs_event_work, vhost_scsi_evt_work);
1425 vs->vs_events_nr = 0;
1426 vs->vs_events_missed = false;
1428 vqs[VHOST_SCSI_VQ_CTL] = &vs->vqs[VHOST_SCSI_VQ_CTL].vq;
1429 vqs[VHOST_SCSI_VQ_EVT] = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
1430 vs->vqs[VHOST_SCSI_VQ_CTL].vq.handle_kick = vhost_scsi_ctl_handle_kick;
1431 vs->vqs[VHOST_SCSI_VQ_EVT].vq.handle_kick = vhost_scsi_evt_handle_kick;
1432 for (i = VHOST_SCSI_VQ_IO; i < VHOST_SCSI_MAX_VQ; i++) {
1433 vqs[i] = &vs->vqs[i].vq;
1434 vs->vqs[i].vq.handle_kick = vhost_scsi_handle_kick;
1436 vhost_dev_init(&vs->dev, vqs, VHOST_SCSI_MAX_VQ, VHOST_SCSI_WEIGHT, 0);
1438 vhost_scsi_init_inflight(vs, NULL);
1440 f->private_data = vs;
1449 static int vhost_scsi_release(struct inode *inode, struct file *f)
1451 struct vhost_scsi *vs = f->private_data;
1452 struct vhost_scsi_target t;
1454 mutex_lock(&vs->dev.mutex);
1455 memcpy(t.vhost_wwpn, vs->vs_vhost_wwpn, sizeof(t.vhost_wwpn));
1456 mutex_unlock(&vs->dev.mutex);
1457 vhost_scsi_clear_endpoint(vs, &t);
1458 vhost_dev_stop(&vs->dev);
1459 vhost_dev_cleanup(&vs->dev, false);
1460 /* Jobs can re-queue themselves in evt kick handler. Do extra flush. */
1461 vhost_scsi_flush(vs);
1468 vhost_scsi_ioctl(struct file *f,
1472 struct vhost_scsi *vs = f->private_data;
1473 struct vhost_scsi_target backend;
1474 void __user *argp = (void __user *)arg;
1475 u64 __user *featurep = argp;
1476 u32 __user *eventsp = argp;
1479 int r, abi_version = VHOST_SCSI_ABI_VERSION;
1480 struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
1483 case VHOST_SCSI_SET_ENDPOINT:
1484 if (copy_from_user(&backend, argp, sizeof backend))
1486 if (backend.reserved != 0)
1489 return vhost_scsi_set_endpoint(vs, &backend);
1490 case VHOST_SCSI_CLEAR_ENDPOINT:
1491 if (copy_from_user(&backend, argp, sizeof backend))
1493 if (backend.reserved != 0)
1496 return vhost_scsi_clear_endpoint(vs, &backend);
1497 case VHOST_SCSI_GET_ABI_VERSION:
1498 if (copy_to_user(argp, &abi_version, sizeof abi_version))
1501 case VHOST_SCSI_SET_EVENTS_MISSED:
1502 if (get_user(events_missed, eventsp))
1504 mutex_lock(&vq->mutex);
1505 vs->vs_events_missed = events_missed;
1506 mutex_unlock(&vq->mutex);
1508 case VHOST_SCSI_GET_EVENTS_MISSED:
1509 mutex_lock(&vq->mutex);
1510 events_missed = vs->vs_events_missed;
1511 mutex_unlock(&vq->mutex);
1512 if (put_user(events_missed, eventsp))
1515 case VHOST_GET_FEATURES:
1516 features = VHOST_SCSI_FEATURES;
1517 if (copy_to_user(featurep, &features, sizeof features))
1520 case VHOST_SET_FEATURES:
1521 if (copy_from_user(&features, featurep, sizeof features))
1523 return vhost_scsi_set_features(vs, features);
1525 mutex_lock(&vs->dev.mutex);
1526 r = vhost_dev_ioctl(&vs->dev, ioctl, argp);
1527 /* TODO: flush backend after dev ioctl. */
1528 if (r == -ENOIOCTLCMD)
1529 r = vhost_vring_ioctl(&vs->dev, ioctl, argp);
1530 mutex_unlock(&vs->dev.mutex);
1535 #ifdef CONFIG_COMPAT
1536 static long vhost_scsi_compat_ioctl(struct file *f, unsigned int ioctl,
1539 return vhost_scsi_ioctl(f, ioctl, (unsigned long)compat_ptr(arg));
1543 static const struct file_operations vhost_scsi_fops = {
1544 .owner = THIS_MODULE,
1545 .release = vhost_scsi_release,
1546 .unlocked_ioctl = vhost_scsi_ioctl,
1547 #ifdef CONFIG_COMPAT
1548 .compat_ioctl = vhost_scsi_compat_ioctl,
1550 .open = vhost_scsi_open,
1551 .llseek = noop_llseek,
1554 static struct miscdevice vhost_scsi_misc = {
1560 static int __init vhost_scsi_register(void)
1562 return misc_register(&vhost_scsi_misc);
1565 static void vhost_scsi_deregister(void)
1567 misc_deregister(&vhost_scsi_misc);
1570 static char *vhost_scsi_dump_proto_id(struct vhost_scsi_tport *tport)
1572 switch (tport->tport_proto_id) {
1573 case SCSI_PROTOCOL_SAS:
1575 case SCSI_PROTOCOL_FCP:
1577 case SCSI_PROTOCOL_ISCSI:
1587 vhost_scsi_do_plug(struct vhost_scsi_tpg *tpg,
1588 struct se_lun *lun, bool plug)
1591 struct vhost_scsi *vs = tpg->vhost_scsi;
1592 struct vhost_virtqueue *vq;
1598 mutex_lock(&vs->dev.mutex);
1601 reason = VIRTIO_SCSI_EVT_RESET_RESCAN;
1603 reason = VIRTIO_SCSI_EVT_RESET_REMOVED;
1605 vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
1606 mutex_lock(&vq->mutex);
1607 if (vhost_has_feature(vq, VIRTIO_SCSI_F_HOTPLUG))
1608 vhost_scsi_send_evt(vs, tpg, lun,
1609 VIRTIO_SCSI_T_TRANSPORT_RESET, reason);
1610 mutex_unlock(&vq->mutex);
1611 mutex_unlock(&vs->dev.mutex);
1614 static void vhost_scsi_hotplug(struct vhost_scsi_tpg *tpg, struct se_lun *lun)
1616 vhost_scsi_do_plug(tpg, lun, true);
1619 static void vhost_scsi_hotunplug(struct vhost_scsi_tpg *tpg, struct se_lun *lun)
1621 vhost_scsi_do_plug(tpg, lun, false);
1624 static int vhost_scsi_port_link(struct se_portal_group *se_tpg,
1627 struct vhost_scsi_tpg *tpg = container_of(se_tpg,
1628 struct vhost_scsi_tpg, se_tpg);
1630 mutex_lock(&vhost_scsi_mutex);
1632 mutex_lock(&tpg->tv_tpg_mutex);
1633 tpg->tv_tpg_port_count++;
1634 mutex_unlock(&tpg->tv_tpg_mutex);
1636 vhost_scsi_hotplug(tpg, lun);
1638 mutex_unlock(&vhost_scsi_mutex);
1643 static void vhost_scsi_port_unlink(struct se_portal_group *se_tpg,
1646 struct vhost_scsi_tpg *tpg = container_of(se_tpg,
1647 struct vhost_scsi_tpg, se_tpg);
1649 mutex_lock(&vhost_scsi_mutex);
1651 mutex_lock(&tpg->tv_tpg_mutex);
1652 tpg->tv_tpg_port_count--;
1653 mutex_unlock(&tpg->tv_tpg_mutex);
1655 vhost_scsi_hotunplug(tpg, lun);
1657 mutex_unlock(&vhost_scsi_mutex);
1660 static void vhost_scsi_free_cmd_map_res(struct se_session *se_sess)
1662 struct vhost_scsi_cmd *tv_cmd;
1665 if (!se_sess->sess_cmd_map)
1668 for (i = 0; i < VHOST_SCSI_DEFAULT_TAGS; i++) {
1669 tv_cmd = &((struct vhost_scsi_cmd *)se_sess->sess_cmd_map)[i];
1671 kfree(tv_cmd->tvc_sgl);
1672 kfree(tv_cmd->tvc_prot_sgl);
1673 kfree(tv_cmd->tvc_upages);
1677 static ssize_t vhost_scsi_tpg_attrib_fabric_prot_type_store(
1678 struct config_item *item, const char *page, size_t count)
1680 struct se_portal_group *se_tpg = attrib_to_tpg(item);
1681 struct vhost_scsi_tpg *tpg = container_of(se_tpg,
1682 struct vhost_scsi_tpg, se_tpg);
1684 int ret = kstrtoul(page, 0, &val);
1687 pr_err("kstrtoul() returned %d for fabric_prot_type\n", ret);
1690 if (val != 0 && val != 1 && val != 3) {
1691 pr_err("Invalid vhost_scsi fabric_prot_type: %lu\n", val);
1694 tpg->tv_fabric_prot_type = val;
1699 static ssize_t vhost_scsi_tpg_attrib_fabric_prot_type_show(
1700 struct config_item *item, char *page)
1702 struct se_portal_group *se_tpg = attrib_to_tpg(item);
1703 struct vhost_scsi_tpg *tpg = container_of(se_tpg,
1704 struct vhost_scsi_tpg, se_tpg);
1706 return sprintf(page, "%d\n", tpg->tv_fabric_prot_type);
1709 CONFIGFS_ATTR(vhost_scsi_tpg_attrib_, fabric_prot_type);
1711 static struct configfs_attribute *vhost_scsi_tpg_attrib_attrs[] = {
1712 &vhost_scsi_tpg_attrib_attr_fabric_prot_type,
1716 static int vhost_scsi_nexus_cb(struct se_portal_group *se_tpg,
1717 struct se_session *se_sess, void *p)
1719 struct vhost_scsi_cmd *tv_cmd;
1722 for (i = 0; i < VHOST_SCSI_DEFAULT_TAGS; i++) {
1723 tv_cmd = &((struct vhost_scsi_cmd *)se_sess->sess_cmd_map)[i];
1725 tv_cmd->tvc_sgl = kzalloc(sizeof(struct scatterlist) *
1726 VHOST_SCSI_PREALLOC_SGLS, GFP_KERNEL);
1727 if (!tv_cmd->tvc_sgl) {
1728 pr_err("Unable to allocate tv_cmd->tvc_sgl\n");
1732 tv_cmd->tvc_upages = kzalloc(sizeof(struct page *) *
1733 VHOST_SCSI_PREALLOC_UPAGES, GFP_KERNEL);
1734 if (!tv_cmd->tvc_upages) {
1735 pr_err("Unable to allocate tv_cmd->tvc_upages\n");
1739 tv_cmd->tvc_prot_sgl = kzalloc(sizeof(struct scatterlist) *
1740 VHOST_SCSI_PREALLOC_PROT_SGLS, GFP_KERNEL);
1741 if (!tv_cmd->tvc_prot_sgl) {
1742 pr_err("Unable to allocate tv_cmd->tvc_prot_sgl\n");
1748 vhost_scsi_free_cmd_map_res(se_sess);
1752 static int vhost_scsi_make_nexus(struct vhost_scsi_tpg *tpg,
1755 struct vhost_scsi_nexus *tv_nexus;
1757 mutex_lock(&tpg->tv_tpg_mutex);
1758 if (tpg->tpg_nexus) {
1759 mutex_unlock(&tpg->tv_tpg_mutex);
1760 pr_debug("tpg->tpg_nexus already exists\n");
1764 tv_nexus = kzalloc(sizeof(struct vhost_scsi_nexus), GFP_KERNEL);
1766 mutex_unlock(&tpg->tv_tpg_mutex);
1767 pr_err("Unable to allocate struct vhost_scsi_nexus\n");
1771 * Since we are running in 'demo mode' this call with generate a
1772 * struct se_node_acl for the vhost_scsi struct se_portal_group with
1773 * the SCSI Initiator port name of the passed configfs group 'name'.
1775 tv_nexus->tvn_se_sess = target_alloc_session(&tpg->se_tpg,
1776 VHOST_SCSI_DEFAULT_TAGS,
1777 sizeof(struct vhost_scsi_cmd),
1778 TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS,
1779 (unsigned char *)name, tv_nexus,
1780 vhost_scsi_nexus_cb);
1781 if (IS_ERR(tv_nexus->tvn_se_sess)) {
1782 mutex_unlock(&tpg->tv_tpg_mutex);
1786 tpg->tpg_nexus = tv_nexus;
1788 mutex_unlock(&tpg->tv_tpg_mutex);
1792 static int vhost_scsi_drop_nexus(struct vhost_scsi_tpg *tpg)
1794 struct se_session *se_sess;
1795 struct vhost_scsi_nexus *tv_nexus;
1797 mutex_lock(&tpg->tv_tpg_mutex);
1798 tv_nexus = tpg->tpg_nexus;
1800 mutex_unlock(&tpg->tv_tpg_mutex);
1804 se_sess = tv_nexus->tvn_se_sess;
1806 mutex_unlock(&tpg->tv_tpg_mutex);
1810 if (tpg->tv_tpg_port_count != 0) {
1811 mutex_unlock(&tpg->tv_tpg_mutex);
1812 pr_err("Unable to remove TCM_vhost I_T Nexus with"
1813 " active TPG port count: %d\n",
1814 tpg->tv_tpg_port_count);
1818 if (tpg->tv_tpg_vhost_count != 0) {
1819 mutex_unlock(&tpg->tv_tpg_mutex);
1820 pr_err("Unable to remove TCM_vhost I_T Nexus with"
1821 " active TPG vhost count: %d\n",
1822 tpg->tv_tpg_vhost_count);
1826 pr_debug("TCM_vhost_ConfigFS: Removing I_T Nexus to emulated"
1827 " %s Initiator Port: %s\n", vhost_scsi_dump_proto_id(tpg->tport),
1828 tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
1830 vhost_scsi_free_cmd_map_res(se_sess);
1832 * Release the SCSI I_T Nexus to the emulated vhost Target Port
1834 transport_deregister_session(tv_nexus->tvn_se_sess);
1835 tpg->tpg_nexus = NULL;
1836 mutex_unlock(&tpg->tv_tpg_mutex);
1842 static ssize_t vhost_scsi_tpg_nexus_show(struct config_item *item, char *page)
1844 struct se_portal_group *se_tpg = to_tpg(item);
1845 struct vhost_scsi_tpg *tpg = container_of(se_tpg,
1846 struct vhost_scsi_tpg, se_tpg);
1847 struct vhost_scsi_nexus *tv_nexus;
1850 mutex_lock(&tpg->tv_tpg_mutex);
1851 tv_nexus = tpg->tpg_nexus;
1853 mutex_unlock(&tpg->tv_tpg_mutex);
1856 ret = snprintf(page, PAGE_SIZE, "%s\n",
1857 tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
1858 mutex_unlock(&tpg->tv_tpg_mutex);
1863 static ssize_t vhost_scsi_tpg_nexus_store(struct config_item *item,
1864 const char *page, size_t count)
1866 struct se_portal_group *se_tpg = to_tpg(item);
1867 struct vhost_scsi_tpg *tpg = container_of(se_tpg,
1868 struct vhost_scsi_tpg, se_tpg);
1869 struct vhost_scsi_tport *tport_wwn = tpg->tport;
1870 unsigned char i_port[VHOST_SCSI_NAMELEN], *ptr, *port_ptr;
1873 * Shutdown the active I_T nexus if 'NULL' is passed..
1875 if (!strncmp(page, "NULL", 4)) {
1876 ret = vhost_scsi_drop_nexus(tpg);
1877 return (!ret) ? count : ret;
1880 * Otherwise make sure the passed virtual Initiator port WWN matches
1881 * the fabric protocol_id set in vhost_scsi_make_tport(), and call
1882 * vhost_scsi_make_nexus().
1884 if (strlen(page) >= VHOST_SCSI_NAMELEN) {
1885 pr_err("Emulated NAA Sas Address: %s, exceeds"
1886 " max: %d\n", page, VHOST_SCSI_NAMELEN);
1889 snprintf(&i_port[0], VHOST_SCSI_NAMELEN, "%s", page);
1891 ptr = strstr(i_port, "naa.");
1893 if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_SAS) {
1894 pr_err("Passed SAS Initiator Port %s does not"
1895 " match target port protoid: %s\n", i_port,
1896 vhost_scsi_dump_proto_id(tport_wwn));
1899 port_ptr = &i_port[0];
1902 ptr = strstr(i_port, "fc.");
1904 if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_FCP) {
1905 pr_err("Passed FCP Initiator Port %s does not"
1906 " match target port protoid: %s\n", i_port,
1907 vhost_scsi_dump_proto_id(tport_wwn));
1910 port_ptr = &i_port[3]; /* Skip over "fc." */
1913 ptr = strstr(i_port, "iqn.");
1915 if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_ISCSI) {
1916 pr_err("Passed iSCSI Initiator Port %s does not"
1917 " match target port protoid: %s\n", i_port,
1918 vhost_scsi_dump_proto_id(tport_wwn));
1921 port_ptr = &i_port[0];
1924 pr_err("Unable to locate prefix for emulated Initiator Port:"
1928 * Clear any trailing newline for the NAA WWN
1931 if (i_port[strlen(i_port)-1] == '\n')
1932 i_port[strlen(i_port)-1] = '\0';
1934 ret = vhost_scsi_make_nexus(tpg, port_ptr);
1941 CONFIGFS_ATTR(vhost_scsi_tpg_, nexus);
1943 static struct configfs_attribute *vhost_scsi_tpg_attrs[] = {
1944 &vhost_scsi_tpg_attr_nexus,
1948 static struct se_portal_group *
1949 vhost_scsi_make_tpg(struct se_wwn *wwn,
1950 struct config_group *group,
1953 struct vhost_scsi_tport *tport = container_of(wwn,
1954 struct vhost_scsi_tport, tport_wwn);
1956 struct vhost_scsi_tpg *tpg;
1960 if (strstr(name, "tpgt_") != name)
1961 return ERR_PTR(-EINVAL);
1962 if (kstrtou16(name + 5, 10, &tpgt) || tpgt >= VHOST_SCSI_MAX_TARGET)
1963 return ERR_PTR(-EINVAL);
1965 tpg = kzalloc(sizeof(struct vhost_scsi_tpg), GFP_KERNEL);
1967 pr_err("Unable to allocate struct vhost_scsi_tpg");
1968 return ERR_PTR(-ENOMEM);
1970 mutex_init(&tpg->tv_tpg_mutex);
1971 INIT_LIST_HEAD(&tpg->tv_tpg_list);
1973 tpg->tport_tpgt = tpgt;
1975 ret = core_tpg_register(wwn, &tpg->se_tpg, tport->tport_proto_id);
1980 mutex_lock(&vhost_scsi_mutex);
1981 list_add_tail(&tpg->tv_tpg_list, &vhost_scsi_list);
1982 mutex_unlock(&vhost_scsi_mutex);
1984 return &tpg->se_tpg;
1987 static void vhost_scsi_drop_tpg(struct se_portal_group *se_tpg)
1989 struct vhost_scsi_tpg *tpg = container_of(se_tpg,
1990 struct vhost_scsi_tpg, se_tpg);
1992 mutex_lock(&vhost_scsi_mutex);
1993 list_del(&tpg->tv_tpg_list);
1994 mutex_unlock(&vhost_scsi_mutex);
1996 * Release the virtual I_T Nexus for this vhost TPG
1998 vhost_scsi_drop_nexus(tpg);
2000 * Deregister the se_tpg from TCM..
2002 core_tpg_deregister(se_tpg);
2006 static struct se_wwn *
2007 vhost_scsi_make_tport(struct target_fabric_configfs *tf,
2008 struct config_group *group,
2011 struct vhost_scsi_tport *tport;
2016 /* if (vhost_scsi_parse_wwn(name, &wwpn, 1) < 0)
2017 return ERR_PTR(-EINVAL); */
2019 tport = kzalloc(sizeof(struct vhost_scsi_tport), GFP_KERNEL);
2021 pr_err("Unable to allocate struct vhost_scsi_tport");
2022 return ERR_PTR(-ENOMEM);
2024 tport->tport_wwpn = wwpn;
2026 * Determine the emulated Protocol Identifier and Target Port Name
2027 * based on the incoming configfs directory name.
2029 ptr = strstr(name, "naa.");
2031 tport->tport_proto_id = SCSI_PROTOCOL_SAS;
2034 ptr = strstr(name, "fc.");
2036 tport->tport_proto_id = SCSI_PROTOCOL_FCP;
2037 off = 3; /* Skip over "fc." */
2040 ptr = strstr(name, "iqn.");
2042 tport->tport_proto_id = SCSI_PROTOCOL_ISCSI;
2046 pr_err("Unable to locate prefix for emulated Target Port:"
2049 return ERR_PTR(-EINVAL);
2052 if (strlen(name) >= VHOST_SCSI_NAMELEN) {
2053 pr_err("Emulated %s Address: %s, exceeds"
2054 " max: %d\n", name, vhost_scsi_dump_proto_id(tport),
2055 VHOST_SCSI_NAMELEN);
2057 return ERR_PTR(-EINVAL);
2059 snprintf(&tport->tport_name[0], VHOST_SCSI_NAMELEN, "%s", &name[off]);
2061 pr_debug("TCM_VHost_ConfigFS: Allocated emulated Target"
2062 " %s Address: %s\n", vhost_scsi_dump_proto_id(tport), name);
2064 return &tport->tport_wwn;
2067 static void vhost_scsi_drop_tport(struct se_wwn *wwn)
2069 struct vhost_scsi_tport *tport = container_of(wwn,
2070 struct vhost_scsi_tport, tport_wwn);
2072 pr_debug("TCM_VHost_ConfigFS: Deallocating emulated Target"
2073 " %s Address: %s\n", vhost_scsi_dump_proto_id(tport),
2080 vhost_scsi_wwn_version_show(struct config_item *item, char *page)
2082 return sprintf(page, "TCM_VHOST fabric module %s on %s/%s"
2083 "on "UTS_RELEASE"\n", VHOST_SCSI_VERSION, utsname()->sysname,
2084 utsname()->machine);
2087 CONFIGFS_ATTR_RO(vhost_scsi_wwn_, version);
2089 static struct configfs_attribute *vhost_scsi_wwn_attrs[] = {
2090 &vhost_scsi_wwn_attr_version,
2094 static const struct target_core_fabric_ops vhost_scsi_ops = {
2095 .module = THIS_MODULE,
2097 .get_fabric_name = vhost_scsi_get_fabric_name,
2098 .tpg_get_wwn = vhost_scsi_get_fabric_wwn,
2099 .tpg_get_tag = vhost_scsi_get_tpgt,
2100 .tpg_check_demo_mode = vhost_scsi_check_true,
2101 .tpg_check_demo_mode_cache = vhost_scsi_check_true,
2102 .tpg_check_demo_mode_write_protect = vhost_scsi_check_false,
2103 .tpg_check_prod_mode_write_protect = vhost_scsi_check_false,
2104 .tpg_check_prot_fabric_only = vhost_scsi_check_prot_fabric_only,
2105 .tpg_get_inst_index = vhost_scsi_tpg_get_inst_index,
2106 .release_cmd = vhost_scsi_release_cmd,
2107 .check_stop_free = vhost_scsi_check_stop_free,
2108 .sess_get_index = vhost_scsi_sess_get_index,
2109 .sess_get_initiator_sid = NULL,
2110 .write_pending = vhost_scsi_write_pending,
2111 .write_pending_status = vhost_scsi_write_pending_status,
2112 .set_default_node_attributes = vhost_scsi_set_default_node_attrs,
2113 .get_cmd_state = vhost_scsi_get_cmd_state,
2114 .queue_data_in = vhost_scsi_queue_data_in,
2115 .queue_status = vhost_scsi_queue_status,
2116 .queue_tm_rsp = vhost_scsi_queue_tm_rsp,
2117 .aborted_task = vhost_scsi_aborted_task,
2119 * Setup callers for generic logic in target_core_fabric_configfs.c
2121 .fabric_make_wwn = vhost_scsi_make_tport,
2122 .fabric_drop_wwn = vhost_scsi_drop_tport,
2123 .fabric_make_tpg = vhost_scsi_make_tpg,
2124 .fabric_drop_tpg = vhost_scsi_drop_tpg,
2125 .fabric_post_link = vhost_scsi_port_link,
2126 .fabric_pre_unlink = vhost_scsi_port_unlink,
2128 .tfc_wwn_attrs = vhost_scsi_wwn_attrs,
2129 .tfc_tpg_base_attrs = vhost_scsi_tpg_attrs,
2130 .tfc_tpg_attrib_attrs = vhost_scsi_tpg_attrib_attrs,
2133 static int __init vhost_scsi_init(void)
2137 pr_debug("TCM_VHOST fabric module %s on %s/%s"
2138 " on "UTS_RELEASE"\n", VHOST_SCSI_VERSION, utsname()->sysname,
2139 utsname()->machine);
2142 * Use our own dedicated workqueue for submitting I/O into
2143 * target core to avoid contention within system_wq.
2145 vhost_scsi_workqueue = alloc_workqueue("vhost_scsi", 0, 0);
2146 if (!vhost_scsi_workqueue)
2149 ret = vhost_scsi_register();
2151 goto out_destroy_workqueue;
2153 ret = target_register_template(&vhost_scsi_ops);
2155 goto out_vhost_scsi_deregister;
2159 out_vhost_scsi_deregister:
2160 vhost_scsi_deregister();
2161 out_destroy_workqueue:
2162 destroy_workqueue(vhost_scsi_workqueue);
2167 static void vhost_scsi_exit(void)
2169 target_unregister_template(&vhost_scsi_ops);
2170 vhost_scsi_deregister();
2171 destroy_workqueue(vhost_scsi_workqueue);
2174 MODULE_DESCRIPTION("VHOST_SCSI series fabric driver");
2175 MODULE_ALIAS("tcm_vhost");
2176 MODULE_LICENSE("GPL");
2177 module_init(vhost_scsi_init);
2178 module_exit(vhost_scsi_exit);