1 /*******************************************************************************
2 * Vhost kernel TCM fabric driver for virtio SCSI initiators
4 * (C) Copyright 2010-2013 Datera, Inc.
5 * (C) Copyright 2010-2012 IBM Corp.
7 * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
9 * Authors: Nicholas A. Bellinger <nab@daterainc.com>
10 * Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
22 ****************************************************************************/
24 #include <linux/module.h>
25 #include <linux/moduleparam.h>
26 #include <generated/utsrelease.h>
27 #include <linux/utsname.h>
28 #include <linux/init.h>
29 #include <linux/slab.h>
30 #include <linux/kthread.h>
31 #include <linux/types.h>
32 #include <linux/string.h>
33 #include <linux/configfs.h>
34 #include <linux/ctype.h>
35 #include <linux/compat.h>
36 #include <linux/eventfd.h>
38 #include <linux/vmalloc.h>
39 #include <linux/miscdevice.h>
40 #include <asm/unaligned.h>
41 #include <scsi/scsi_common.h>
42 #include <scsi/scsi_proto.h>
43 #include <target/target_core_base.h>
44 #include <target/target_core_fabric.h>
45 #include <linux/vhost.h>
46 #include <linux/virtio_scsi.h>
47 #include <linux/llist.h>
48 #include <linux/bitmap.h>
52 #define VHOST_SCSI_VERSION "v0.1"
53 #define VHOST_SCSI_NAMELEN 256
54 #define VHOST_SCSI_MAX_CDB_SIZE 32
55 #define VHOST_SCSI_DEFAULT_TAGS 256
56 #define VHOST_SCSI_PREALLOC_SGLS 2048
57 #define VHOST_SCSI_PREALLOC_UPAGES 2048
58 #define VHOST_SCSI_PREALLOC_PROT_SGLS 2048
60 /* Max number of requests before requeueing the job.
61 * Using this limit prevents one virtqueue from starving others with
64 #define VHOST_SCSI_WEIGHT 256
66 struct vhost_scsi_inflight {
67 /* Wait for the flush operation to finish */
68 struct completion comp;
69 /* Refcount for the inflight reqs */
73 struct vhost_scsi_cmd {
74 /* Descriptor from vhost_get_vq_desc() for virt_queue segment */
76 /* virtio-scsi initiator task attribute */
78 /* virtio-scsi response incoming iovecs */
80 /* virtio-scsi initiator data direction */
81 enum dma_data_direction tvc_data_direction;
82 /* Expected data transfer length from virtio-scsi header */
84 /* The Tag from include/linux/virtio_scsi.h:struct virtio_scsi_cmd_req */
86 /* The number of scatterlists associated with this cmd */
88 u32 tvc_prot_sgl_count;
89 /* Saved unpacked SCSI LUN for vhost_scsi_submission_work() */
91 /* Pointer to the SGL formatted memory from virtio-scsi */
92 struct scatterlist *tvc_sgl;
93 struct scatterlist *tvc_prot_sgl;
94 struct page **tvc_upages;
95 /* Pointer to response header iovec */
96 struct iovec tvc_resp_iov;
97 /* Pointer to vhost_scsi for our device */
98 struct vhost_scsi *tvc_vhost;
99 /* Pointer to vhost_virtqueue for the cmd */
100 struct vhost_virtqueue *tvc_vq;
101 /* Pointer to vhost nexus memory */
102 struct vhost_scsi_nexus *tvc_nexus;
103 /* The TCM I/O descriptor that is accessed via container_of() */
104 struct se_cmd tvc_se_cmd;
105 /* work item used for cmwq dispatch to vhost_scsi_submission_work() */
106 struct work_struct work;
107 /* Copy of the incoming SCSI command descriptor block (CDB) */
108 unsigned char tvc_cdb[VHOST_SCSI_MAX_CDB_SIZE];
109 /* Sense buffer that will be mapped into outgoing status */
110 unsigned char tvc_sense_buf[TRANSPORT_SENSE_BUFFER];
111 /* Completed commands list, serviced from vhost worker thread */
112 struct llist_node tvc_completion_list;
113 /* Used to track inflight cmd */
114 struct vhost_scsi_inflight *inflight;
117 struct vhost_scsi_nexus {
118 /* Pointer to TCM session for I_T Nexus */
119 struct se_session *tvn_se_sess;
122 struct vhost_scsi_tpg {
123 /* Vhost port target portal group tag for TCM */
125 /* Used to track number of TPG Port/Lun Links wrt to explict I_T Nexus shutdown */
126 int tv_tpg_port_count;
127 /* Used for vhost_scsi device reference to tpg_nexus, protected by tv_tpg_mutex */
128 int tv_tpg_vhost_count;
129 /* Used for enabling T10-PI with legacy devices */
130 int tv_fabric_prot_type;
131 /* list for vhost_scsi_list */
132 struct list_head tv_tpg_list;
133 /* Used to protect access for tpg_nexus */
134 struct mutex tv_tpg_mutex;
135 /* Pointer to the TCM VHost I_T Nexus for this TPG endpoint */
136 struct vhost_scsi_nexus *tpg_nexus;
137 /* Pointer back to vhost_scsi_tport */
138 struct vhost_scsi_tport *tport;
139 /* Returned by vhost_scsi_make_tpg() */
140 struct se_portal_group se_tpg;
141 /* Pointer back to vhost_scsi, protected by tv_tpg_mutex */
142 struct vhost_scsi *vhost_scsi;
145 struct vhost_scsi_tport {
146 /* SCSI protocol the tport is providing */
148 /* Binary World Wide unique Port Name for Vhost Target port */
150 /* ASCII formatted WWPN for Vhost Target port */
151 char tport_name[VHOST_SCSI_NAMELEN];
152 /* Returned by vhost_scsi_make_tport() */
153 struct se_wwn tport_wwn;
156 struct vhost_scsi_evt {
157 /* event to be sent to guest */
158 struct virtio_scsi_event event;
159 /* event list, serviced from vhost worker thread */
160 struct llist_node list;
164 VHOST_SCSI_VQ_CTL = 0,
165 VHOST_SCSI_VQ_EVT = 1,
166 VHOST_SCSI_VQ_IO = 2,
169 /* Note: can't set VIRTIO_F_VERSION_1 yet, since that implies ANY_LAYOUT. */
171 VHOST_SCSI_FEATURES = VHOST_FEATURES | (1ULL << VIRTIO_SCSI_F_HOTPLUG) |
172 (1ULL << VIRTIO_SCSI_F_T10_PI)
175 #define VHOST_SCSI_MAX_TARGET 256
176 #define VHOST_SCSI_MAX_VQ 128
177 #define VHOST_SCSI_MAX_EVENT 128
179 struct vhost_scsi_virtqueue {
180 struct vhost_virtqueue vq;
182 * Reference counting for inflight reqs, used for flush operation. At
183 * each time, one reference tracks new commands submitted, while we
184 * wait for another one to reach 0.
186 struct vhost_scsi_inflight inflights[2];
188 * Indicate current inflight in use, protected by vq->mutex.
189 * Writers must also take dev mutex and flush under it.
195 /* Protected by vhost_scsi->dev.mutex */
196 struct vhost_scsi_tpg **vs_tpg;
197 char vs_vhost_wwpn[TRANSPORT_IQN_LEN];
199 struct vhost_dev dev;
200 struct vhost_scsi_virtqueue vqs[VHOST_SCSI_MAX_VQ];
202 struct vhost_work vs_completion_work; /* cmd completion work item */
203 struct llist_head vs_completion_list; /* cmd completion queue */
205 struct vhost_work vs_event_work; /* evt injection work item */
206 struct llist_head vs_event_list; /* evt injection queue */
208 bool vs_events_missed; /* any missed events, protected by vq->mutex */
209 int vs_events_nr; /* num of pending events, protected by vq->mutex */
213 * Context for processing request and control queue operations.
215 struct vhost_scsi_ctx {
217 unsigned int out, in;
218 size_t req_size, rsp_size;
219 size_t out_size, in_size;
222 struct iov_iter out_iter;
225 static struct workqueue_struct *vhost_scsi_workqueue;
227 /* Global spinlock to protect vhost_scsi TPG list for vhost IOCTL access */
228 static DEFINE_MUTEX(vhost_scsi_mutex);
229 static LIST_HEAD(vhost_scsi_list);
231 static void vhost_scsi_done_inflight(struct kref *kref)
233 struct vhost_scsi_inflight *inflight;
235 inflight = container_of(kref, struct vhost_scsi_inflight, kref);
236 complete(&inflight->comp);
239 static void vhost_scsi_init_inflight(struct vhost_scsi *vs,
240 struct vhost_scsi_inflight *old_inflight[])
242 struct vhost_scsi_inflight *new_inflight;
243 struct vhost_virtqueue *vq;
246 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
249 mutex_lock(&vq->mutex);
251 /* store old infight */
252 idx = vs->vqs[i].inflight_idx;
254 old_inflight[i] = &vs->vqs[i].inflights[idx];
256 /* setup new infight */
257 vs->vqs[i].inflight_idx = idx ^ 1;
258 new_inflight = &vs->vqs[i].inflights[idx ^ 1];
259 kref_init(&new_inflight->kref);
260 init_completion(&new_inflight->comp);
262 mutex_unlock(&vq->mutex);
266 static struct vhost_scsi_inflight *
267 vhost_scsi_get_inflight(struct vhost_virtqueue *vq)
269 struct vhost_scsi_inflight *inflight;
270 struct vhost_scsi_virtqueue *svq;
272 svq = container_of(vq, struct vhost_scsi_virtqueue, vq);
273 inflight = &svq->inflights[svq->inflight_idx];
274 kref_get(&inflight->kref);
279 static void vhost_scsi_put_inflight(struct vhost_scsi_inflight *inflight)
281 kref_put(&inflight->kref, vhost_scsi_done_inflight);
284 static int vhost_scsi_check_true(struct se_portal_group *se_tpg)
289 static int vhost_scsi_check_false(struct se_portal_group *se_tpg)
294 static char *vhost_scsi_get_fabric_wwn(struct se_portal_group *se_tpg)
296 struct vhost_scsi_tpg *tpg = container_of(se_tpg,
297 struct vhost_scsi_tpg, se_tpg);
298 struct vhost_scsi_tport *tport = tpg->tport;
300 return &tport->tport_name[0];
303 static u16 vhost_scsi_get_tpgt(struct se_portal_group *se_tpg)
305 struct vhost_scsi_tpg *tpg = container_of(se_tpg,
306 struct vhost_scsi_tpg, se_tpg);
307 return tpg->tport_tpgt;
310 static int vhost_scsi_check_prot_fabric_only(struct se_portal_group *se_tpg)
312 struct vhost_scsi_tpg *tpg = container_of(se_tpg,
313 struct vhost_scsi_tpg, se_tpg);
315 return tpg->tv_fabric_prot_type;
318 static u32 vhost_scsi_tpg_get_inst_index(struct se_portal_group *se_tpg)
323 static void vhost_scsi_release_cmd_res(struct se_cmd *se_cmd)
325 struct vhost_scsi_cmd *tv_cmd = container_of(se_cmd,
326 struct vhost_scsi_cmd, tvc_se_cmd);
327 struct se_session *se_sess = tv_cmd->tvc_nexus->tvn_se_sess;
330 if (tv_cmd->tvc_sgl_count) {
331 for (i = 0; i < tv_cmd->tvc_sgl_count; i++)
332 put_page(sg_page(&tv_cmd->tvc_sgl[i]));
334 if (tv_cmd->tvc_prot_sgl_count) {
335 for (i = 0; i < tv_cmd->tvc_prot_sgl_count; i++)
336 put_page(sg_page(&tv_cmd->tvc_prot_sgl[i]));
339 vhost_scsi_put_inflight(tv_cmd->inflight);
340 target_free_tag(se_sess, se_cmd);
343 static void vhost_scsi_release_cmd(struct se_cmd *se_cmd)
345 struct vhost_scsi_cmd *cmd = container_of(se_cmd,
346 struct vhost_scsi_cmd, tvc_se_cmd);
347 struct vhost_scsi *vs = cmd->tvc_vhost;
349 llist_add(&cmd->tvc_completion_list, &vs->vs_completion_list);
350 vhost_work_queue(&vs->dev, &vs->vs_completion_work);
353 static u32 vhost_scsi_sess_get_index(struct se_session *se_sess)
358 static int vhost_scsi_write_pending(struct se_cmd *se_cmd)
360 /* Go ahead and process the write immediately */
361 target_execute_cmd(se_cmd);
365 static void vhost_scsi_set_default_node_attrs(struct se_node_acl *nacl)
370 static int vhost_scsi_get_cmd_state(struct se_cmd *se_cmd)
375 static int vhost_scsi_queue_data_in(struct se_cmd *se_cmd)
377 transport_generic_free_cmd(se_cmd, 0);
381 static int vhost_scsi_queue_status(struct se_cmd *se_cmd)
383 transport_generic_free_cmd(se_cmd, 0);
387 static void vhost_scsi_queue_tm_rsp(struct se_cmd *se_cmd)
392 static void vhost_scsi_aborted_task(struct se_cmd *se_cmd)
397 static void vhost_scsi_free_evt(struct vhost_scsi *vs, struct vhost_scsi_evt *evt)
403 static struct vhost_scsi_evt *
404 vhost_scsi_allocate_evt(struct vhost_scsi *vs,
405 u32 event, u32 reason)
407 struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
408 struct vhost_scsi_evt *evt;
410 if (vs->vs_events_nr > VHOST_SCSI_MAX_EVENT) {
411 vs->vs_events_missed = true;
415 evt = kzalloc(sizeof(*evt), GFP_KERNEL);
417 vq_err(vq, "Failed to allocate vhost_scsi_evt\n");
418 vs->vs_events_missed = true;
422 evt->event.event = cpu_to_vhost32(vq, event);
423 evt->event.reason = cpu_to_vhost32(vq, reason);
429 static int vhost_scsi_check_stop_free(struct se_cmd *se_cmd)
431 return target_put_sess_cmd(se_cmd);
435 vhost_scsi_do_evt_work(struct vhost_scsi *vs, struct vhost_scsi_evt *evt)
437 struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
438 struct virtio_scsi_event *event = &evt->event;
439 struct virtio_scsi_event __user *eventp;
443 if (!vq->private_data) {
444 vs->vs_events_missed = true;
449 vhost_disable_notify(&vs->dev, vq);
450 head = vhost_get_vq_desc(vq, vq->iov,
451 ARRAY_SIZE(vq->iov), &out, &in,
454 vs->vs_events_missed = true;
457 if (head == vq->num) {
458 if (vhost_enable_notify(&vs->dev, vq))
460 vs->vs_events_missed = true;
464 if ((vq->iov[out].iov_len != sizeof(struct virtio_scsi_event))) {
465 vq_err(vq, "Expecting virtio_scsi_event, got %zu bytes\n",
466 vq->iov[out].iov_len);
467 vs->vs_events_missed = true;
471 if (vs->vs_events_missed) {
472 event->event |= cpu_to_vhost32(vq, VIRTIO_SCSI_T_EVENTS_MISSED);
473 vs->vs_events_missed = false;
476 eventp = vq->iov[out].iov_base;
477 ret = __copy_to_user(eventp, event, sizeof(*event));
479 vhost_add_used_and_signal(&vs->dev, vq, head, 0);
481 vq_err(vq, "Faulted on vhost_scsi_send_event\n");
484 static void vhost_scsi_evt_work(struct vhost_work *work)
486 struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
488 struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
489 struct vhost_scsi_evt *evt, *t;
490 struct llist_node *llnode;
492 mutex_lock(&vq->mutex);
493 llnode = llist_del_all(&vs->vs_event_list);
494 llist_for_each_entry_safe(evt, t, llnode, list) {
495 vhost_scsi_do_evt_work(vs, evt);
496 vhost_scsi_free_evt(vs, evt);
498 mutex_unlock(&vq->mutex);
501 /* Fill in status and signal that we are done processing this command
503 * This is scheduled in the vhost work queue so we are called with the owner
504 * process mm and can access the vring.
506 static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
508 struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
510 DECLARE_BITMAP(signal, VHOST_SCSI_MAX_VQ);
511 struct virtio_scsi_cmd_resp v_rsp;
512 struct vhost_scsi_cmd *cmd, *t;
513 struct llist_node *llnode;
514 struct se_cmd *se_cmd;
515 struct iov_iter iov_iter;
518 bitmap_zero(signal, VHOST_SCSI_MAX_VQ);
519 llnode = llist_del_all(&vs->vs_completion_list);
520 llist_for_each_entry_safe(cmd, t, llnode, tvc_completion_list) {
521 se_cmd = &cmd->tvc_se_cmd;
523 pr_debug("%s tv_cmd %p resid %u status %#02x\n", __func__,
524 cmd, se_cmd->residual_count, se_cmd->scsi_status);
526 memset(&v_rsp, 0, sizeof(v_rsp));
527 v_rsp.resid = cpu_to_vhost32(cmd->tvc_vq, se_cmd->residual_count);
528 /* TODO is status_qualifier field needed? */
529 v_rsp.status = se_cmd->scsi_status;
530 v_rsp.sense_len = cpu_to_vhost32(cmd->tvc_vq,
531 se_cmd->scsi_sense_length);
532 memcpy(v_rsp.sense, cmd->tvc_sense_buf,
533 se_cmd->scsi_sense_length);
535 iov_iter_init(&iov_iter, READ, &cmd->tvc_resp_iov,
536 cmd->tvc_in_iovs, sizeof(v_rsp));
537 ret = copy_to_iter(&v_rsp, sizeof(v_rsp), &iov_iter);
538 if (likely(ret == sizeof(v_rsp))) {
539 struct vhost_scsi_virtqueue *q;
540 vhost_add_used(cmd->tvc_vq, cmd->tvc_vq_desc, 0);
541 q = container_of(cmd->tvc_vq, struct vhost_scsi_virtqueue, vq);
543 __set_bit(vq, signal);
545 pr_err("Faulted on virtio_scsi_cmd_resp\n");
547 vhost_scsi_release_cmd_res(se_cmd);
551 while ((vq = find_next_bit(signal, VHOST_SCSI_MAX_VQ, vq + 1))
553 vhost_signal(&vs->dev, &vs->vqs[vq].vq);
556 static struct vhost_scsi_cmd *
557 vhost_scsi_get_tag(struct vhost_virtqueue *vq, struct vhost_scsi_tpg *tpg,
558 unsigned char *cdb, u64 scsi_tag, u16 lun, u8 task_attr,
559 u32 exp_data_len, int data_direction)
561 struct vhost_scsi_cmd *cmd;
562 struct vhost_scsi_nexus *tv_nexus;
563 struct se_session *se_sess;
564 struct scatterlist *sg, *prot_sg;
568 tv_nexus = tpg->tpg_nexus;
570 pr_err("Unable to locate active struct vhost_scsi_nexus\n");
571 return ERR_PTR(-EIO);
573 se_sess = tv_nexus->tvn_se_sess;
575 tag = sbitmap_queue_get(&se_sess->sess_tag_pool, &cpu);
577 pr_err("Unable to obtain tag for vhost_scsi_cmd\n");
578 return ERR_PTR(-ENOMEM);
581 cmd = &((struct vhost_scsi_cmd *)se_sess->sess_cmd_map)[tag];
583 prot_sg = cmd->tvc_prot_sgl;
584 pages = cmd->tvc_upages;
585 memset(cmd, 0, sizeof(*cmd));
587 cmd->tvc_prot_sgl = prot_sg;
588 cmd->tvc_upages = pages;
589 cmd->tvc_se_cmd.map_tag = tag;
590 cmd->tvc_se_cmd.map_cpu = cpu;
591 cmd->tvc_tag = scsi_tag;
593 cmd->tvc_task_attr = task_attr;
594 cmd->tvc_exp_data_len = exp_data_len;
595 cmd->tvc_data_direction = data_direction;
596 cmd->tvc_nexus = tv_nexus;
597 cmd->inflight = vhost_scsi_get_inflight(vq);
599 memcpy(cmd->tvc_cdb, cdb, VHOST_SCSI_MAX_CDB_SIZE);
605 * Map a user memory range into a scatterlist
607 * Returns the number of scatterlist entries used or -errno on error.
610 vhost_scsi_map_to_sgl(struct vhost_scsi_cmd *cmd,
611 struct iov_iter *iter,
612 struct scatterlist *sgl,
615 struct page **pages = cmd->tvc_upages;
616 struct scatterlist *sg = sgl;
619 unsigned int npages = 0;
621 bytes = iov_iter_get_pages(iter, pages, LONG_MAX,
622 VHOST_SCSI_PREALLOC_UPAGES, &offset);
623 /* No pages were pinned */
625 return bytes < 0 ? bytes : -EFAULT;
627 iov_iter_advance(iter, bytes);
630 unsigned n = min_t(unsigned, PAGE_SIZE - offset, bytes);
631 sg_set_page(sg++, pages[npages++], n, offset);
639 vhost_scsi_calc_sgls(struct iov_iter *iter, size_t bytes, int max_sgls)
643 if (!iter || !iter->iov) {
644 pr_err("%s: iter->iov is NULL, but expected bytes: %zu"
645 " present\n", __func__, bytes);
649 sgl_count = iov_iter_npages(iter, 0xffff);
650 if (sgl_count > max_sgls) {
651 pr_err("%s: requested sgl_count: %d exceeds pre-allocated"
652 " max_sgls: %d\n", __func__, sgl_count, max_sgls);
659 vhost_scsi_iov_to_sgl(struct vhost_scsi_cmd *cmd, bool write,
660 struct iov_iter *iter,
661 struct scatterlist *sg, int sg_count)
663 struct scatterlist *p = sg;
666 while (iov_iter_count(iter)) {
667 ret = vhost_scsi_map_to_sgl(cmd, iter, sg, write);
670 struct page *page = sg_page(p++);
682 vhost_scsi_mapal(struct vhost_scsi_cmd *cmd,
683 size_t prot_bytes, struct iov_iter *prot_iter,
684 size_t data_bytes, struct iov_iter *data_iter)
687 bool write = (cmd->tvc_data_direction == DMA_FROM_DEVICE);
690 sgl_count = vhost_scsi_calc_sgls(prot_iter, prot_bytes,
691 VHOST_SCSI_PREALLOC_PROT_SGLS);
695 sg_init_table(cmd->tvc_prot_sgl, sgl_count);
696 cmd->tvc_prot_sgl_count = sgl_count;
697 pr_debug("%s prot_sg %p prot_sgl_count %u\n", __func__,
698 cmd->tvc_prot_sgl, cmd->tvc_prot_sgl_count);
700 ret = vhost_scsi_iov_to_sgl(cmd, write, prot_iter,
702 cmd->tvc_prot_sgl_count);
704 cmd->tvc_prot_sgl_count = 0;
708 sgl_count = vhost_scsi_calc_sgls(data_iter, data_bytes,
709 VHOST_SCSI_PREALLOC_SGLS);
713 sg_init_table(cmd->tvc_sgl, sgl_count);
714 cmd->tvc_sgl_count = sgl_count;
715 pr_debug("%s data_sg %p data_sgl_count %u\n", __func__,
716 cmd->tvc_sgl, cmd->tvc_sgl_count);
718 ret = vhost_scsi_iov_to_sgl(cmd, write, data_iter,
719 cmd->tvc_sgl, cmd->tvc_sgl_count);
721 cmd->tvc_sgl_count = 0;
727 static int vhost_scsi_to_tcm_attr(int attr)
730 case VIRTIO_SCSI_S_SIMPLE:
731 return TCM_SIMPLE_TAG;
732 case VIRTIO_SCSI_S_ORDERED:
733 return TCM_ORDERED_TAG;
734 case VIRTIO_SCSI_S_HEAD:
736 case VIRTIO_SCSI_S_ACA:
741 return TCM_SIMPLE_TAG;
744 static void vhost_scsi_submission_work(struct work_struct *work)
746 struct vhost_scsi_cmd *cmd =
747 container_of(work, struct vhost_scsi_cmd, work);
748 struct vhost_scsi_nexus *tv_nexus;
749 struct se_cmd *se_cmd = &cmd->tvc_se_cmd;
750 struct scatterlist *sg_ptr, *sg_prot_ptr = NULL;
753 /* FIXME: BIDI operation */
754 if (cmd->tvc_sgl_count) {
755 sg_ptr = cmd->tvc_sgl;
757 if (cmd->tvc_prot_sgl_count)
758 sg_prot_ptr = cmd->tvc_prot_sgl;
760 se_cmd->prot_pto = true;
764 tv_nexus = cmd->tvc_nexus;
767 rc = target_submit_cmd_map_sgls(se_cmd, tv_nexus->tvn_se_sess,
768 cmd->tvc_cdb, &cmd->tvc_sense_buf[0],
769 cmd->tvc_lun, cmd->tvc_exp_data_len,
770 vhost_scsi_to_tcm_attr(cmd->tvc_task_attr),
771 cmd->tvc_data_direction, TARGET_SCF_ACK_KREF,
772 sg_ptr, cmd->tvc_sgl_count, NULL, 0, sg_prot_ptr,
773 cmd->tvc_prot_sgl_count);
775 transport_send_check_condition_and_sense(se_cmd,
776 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
777 transport_generic_free_cmd(se_cmd, 0);
782 vhost_scsi_send_bad_target(struct vhost_scsi *vs,
783 struct vhost_virtqueue *vq,
784 int head, unsigned out)
786 struct virtio_scsi_cmd_resp __user *resp;
787 struct virtio_scsi_cmd_resp rsp;
790 memset(&rsp, 0, sizeof(rsp));
791 rsp.response = VIRTIO_SCSI_S_BAD_TARGET;
792 resp = vq->iov[out].iov_base;
793 ret = __copy_to_user(resp, &rsp, sizeof(rsp));
795 vhost_add_used_and_signal(&vs->dev, vq, head, 0);
797 pr_err("Faulted on virtio_scsi_cmd_resp\n");
801 vhost_scsi_get_desc(struct vhost_scsi *vs, struct vhost_virtqueue *vq,
802 struct vhost_scsi_ctx *vc)
806 vc->head = vhost_get_vq_desc(vq, vq->iov,
807 ARRAY_SIZE(vq->iov), &vc->out, &vc->in,
810 pr_debug("vhost_get_vq_desc: head: %d, out: %u in: %u\n",
811 vc->head, vc->out, vc->in);
813 /* On error, stop handling until the next kick. */
814 if (unlikely(vc->head < 0))
817 /* Nothing new? Wait for eventfd to tell us they refilled. */
818 if (vc->head == vq->num) {
819 if (unlikely(vhost_enable_notify(&vs->dev, vq))) {
820 vhost_disable_notify(&vs->dev, vq);
827 * Get the size of request and response buffers.
828 * FIXME: Not correct for BIDI operation
830 vc->out_size = iov_length(vq->iov, vc->out);
831 vc->in_size = iov_length(&vq->iov[vc->out], vc->in);
834 * Copy over the virtio-scsi request header, which for a
835 * ANY_LAYOUT enabled guest may span multiple iovecs, or a
836 * single iovec may contain both the header + outgoing
839 * copy_from_iter() will advance out_iter, so that it will
840 * point at the start of the outgoing WRITE payload, if
841 * DMA_TO_DEVICE is set.
843 iov_iter_init(&vc->out_iter, WRITE, vq->iov, vc->out, vc->out_size);
851 vhost_scsi_chk_size(struct vhost_virtqueue *vq, struct vhost_scsi_ctx *vc)
853 if (unlikely(vc->in_size < vc->rsp_size)) {
855 "Response buf too small, need min %zu bytes got %zu",
856 vc->rsp_size, vc->in_size);
858 } else if (unlikely(vc->out_size < vc->req_size)) {
860 "Request buf too small, need min %zu bytes got %zu",
861 vc->req_size, vc->out_size);
869 vhost_scsi_get_req(struct vhost_virtqueue *vq, struct vhost_scsi_ctx *vc,
870 struct vhost_scsi_tpg **tpgp)
874 if (unlikely(!copy_from_iter_full(vc->req, vc->req_size,
876 vq_err(vq, "Faulted on copy_from_iter_full\n");
877 } else if (unlikely(*vc->lunp != 1)) {
878 /* virtio-scsi spec requires byte 0 of the lun to be 1 */
879 vq_err(vq, "Illegal virtio-scsi lun: %u\n", *vc->lunp);
881 struct vhost_scsi_tpg **vs_tpg, *tpg;
883 vs_tpg = vq->private_data; /* validated at handler entry */
885 tpg = READ_ONCE(vs_tpg[*vc->target]);
886 if (unlikely(!tpg)) {
887 vq_err(vq, "Target 0x%x does not exist\n", *vc->target);
899 vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
901 struct vhost_scsi_tpg **vs_tpg, *tpg;
902 struct virtio_scsi_cmd_req v_req;
903 struct virtio_scsi_cmd_req_pi v_req_pi;
904 struct vhost_scsi_ctx vc;
905 struct vhost_scsi_cmd *cmd;
906 struct iov_iter in_iter, prot_iter, data_iter;
908 u32 exp_data_len, data_direction;
909 int ret, prot_bytes, c = 0;
912 bool t10_pi = vhost_has_feature(vq, VIRTIO_SCSI_F_T10_PI);
915 mutex_lock(&vq->mutex);
917 * We can handle the vq only after the endpoint is setup by calling the
918 * VHOST_SCSI_SET_ENDPOINT ioctl.
920 vs_tpg = vq->private_data;
924 memset(&vc, 0, sizeof(vc));
925 vc.rsp_size = sizeof(struct virtio_scsi_cmd_resp);
927 vhost_disable_notify(&vs->dev, vq);
930 ret = vhost_scsi_get_desc(vs, vq, &vc);
935 * Setup pointers and values based upon different virtio-scsi
936 * request header if T10_PI is enabled in KVM guest.
940 vc.req_size = sizeof(v_req_pi);
941 vc.lunp = &v_req_pi.lun[0];
942 vc.target = &v_req_pi.lun[1];
945 vc.req_size = sizeof(v_req);
946 vc.lunp = &v_req.lun[0];
947 vc.target = &v_req.lun[1];
951 * Validate the size of request and response buffers.
952 * Check for a sane response buffer so we can report
953 * early errors back to the guest.
955 ret = vhost_scsi_chk_size(vq, &vc);
959 ret = vhost_scsi_get_req(vq, &vc, &tpg);
963 ret = -EIO; /* bad target on any error from here on */
966 * Determine data_direction by calculating the total outgoing
967 * iovec sizes + incoming iovec sizes vs. virtio-scsi request +
968 * response headers respectively.
970 * For DMA_TO_DEVICE this is out_iter, which is already pointing
971 * to the right place.
973 * For DMA_FROM_DEVICE, the iovec will be just past the end
974 * of the virtio-scsi response header in either the same
975 * or immediately following iovec.
977 * Any associated T10_PI bytes for the outgoing / incoming
978 * payloads are included in calculation of exp_data_len here.
982 if (vc.out_size > vc.req_size) {
983 data_direction = DMA_TO_DEVICE;
984 exp_data_len = vc.out_size - vc.req_size;
985 data_iter = vc.out_iter;
986 } else if (vc.in_size > vc.rsp_size) {
987 data_direction = DMA_FROM_DEVICE;
988 exp_data_len = vc.in_size - vc.rsp_size;
990 iov_iter_init(&in_iter, READ, &vq->iov[vc.out], vc.in,
991 vc.rsp_size + exp_data_len);
992 iov_iter_advance(&in_iter, vc.rsp_size);
995 data_direction = DMA_NONE;
999 * If T10_PI header + payload is present, setup prot_iter values
1000 * and recalculate data_iter for vhost_scsi_mapal() mapping to
1001 * host scatterlists via get_user_pages_fast().
1004 if (v_req_pi.pi_bytesout) {
1005 if (data_direction != DMA_TO_DEVICE) {
1006 vq_err(vq, "Received non zero pi_bytesout,"
1007 " but wrong data_direction\n");
1010 prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesout);
1011 } else if (v_req_pi.pi_bytesin) {
1012 if (data_direction != DMA_FROM_DEVICE) {
1013 vq_err(vq, "Received non zero pi_bytesin,"
1014 " but wrong data_direction\n");
1017 prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesin);
1020 * Set prot_iter to data_iter and truncate it to
1021 * prot_bytes, and advance data_iter past any
1022 * preceeding prot_bytes that may be present.
1024 * Also fix up the exp_data_len to reflect only the
1025 * actual data payload length.
1028 exp_data_len -= prot_bytes;
1029 prot_iter = data_iter;
1030 iov_iter_truncate(&prot_iter, prot_bytes);
1031 iov_iter_advance(&data_iter, prot_bytes);
1033 tag = vhost64_to_cpu(vq, v_req_pi.tag);
1034 task_attr = v_req_pi.task_attr;
1035 cdb = &v_req_pi.cdb[0];
1036 lun = ((v_req_pi.lun[2] << 8) | v_req_pi.lun[3]) & 0x3FFF;
1038 tag = vhost64_to_cpu(vq, v_req.tag);
1039 task_attr = v_req.task_attr;
1040 cdb = &v_req.cdb[0];
1041 lun = ((v_req.lun[2] << 8) | v_req.lun[3]) & 0x3FFF;
1044 * Check that the received CDB size does not exceeded our
1045 * hardcoded max for vhost-scsi, then get a pre-allocated
1046 * cmd descriptor for the new virtio-scsi tag.
1048 * TODO what if cdb was too small for varlen cdb header?
1050 if (unlikely(scsi_command_size(cdb) > VHOST_SCSI_MAX_CDB_SIZE)) {
1051 vq_err(vq, "Received SCSI CDB with command_size: %d that"
1052 " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
1053 scsi_command_size(cdb), VHOST_SCSI_MAX_CDB_SIZE);
1056 cmd = vhost_scsi_get_tag(vq, tpg, cdb, tag, lun, task_attr,
1057 exp_data_len + prot_bytes,
1060 vq_err(vq, "vhost_scsi_get_tag failed %ld\n",
1064 cmd->tvc_vhost = vs;
1066 cmd->tvc_resp_iov = vq->iov[vc.out];
1067 cmd->tvc_in_iovs = vc.in;
1069 pr_debug("vhost_scsi got command opcode: %#02x, lun: %d\n",
1070 cmd->tvc_cdb[0], cmd->tvc_lun);
1071 pr_debug("cmd: %p exp_data_len: %d, prot_bytes: %d data_direction:"
1072 " %d\n", cmd, exp_data_len, prot_bytes, data_direction);
1074 if (data_direction != DMA_NONE) {
1075 if (unlikely(vhost_scsi_mapal(cmd, prot_bytes,
1076 &prot_iter, exp_data_len,
1078 vq_err(vq, "Failed to map iov to sgl\n");
1079 vhost_scsi_release_cmd_res(&cmd->tvc_se_cmd);
1084 * Save the descriptor from vhost_get_vq_desc() to be used to
1085 * complete the virtio-scsi request in TCM callback context via
1086 * vhost_scsi_queue_data_in() and vhost_scsi_queue_status()
1088 cmd->tvc_vq_desc = vc.head;
1090 * Dispatch cmd descriptor for cmwq execution in process
1091 * context provided by vhost_scsi_workqueue. This also ensures
1092 * cmd is executed on the same kworker CPU as this vhost
1093 * thread to gain positive L2 cache locality effects.
1095 INIT_WORK(&cmd->work, vhost_scsi_submission_work);
1096 queue_work(vhost_scsi_workqueue, &cmd->work);
1100 * ENXIO: No more requests, or read error, wait for next kick
1101 * EINVAL: Invalid response buffer, drop the request
1102 * EIO: Respond with bad target
1103 * EAGAIN: Pending request
1107 else if (ret == -EIO)
1108 vhost_scsi_send_bad_target(vs, vq, vc.head, vc.out);
1109 } while (likely(!vhost_exceeds_weight(vq, ++c, 0)));
1111 mutex_unlock(&vq->mutex);
1115 vhost_scsi_send_tmf_reject(struct vhost_scsi *vs,
1116 struct vhost_virtqueue *vq,
1117 struct vhost_scsi_ctx *vc)
1119 struct virtio_scsi_ctrl_tmf_resp rsp;
1120 struct iov_iter iov_iter;
1123 pr_debug("%s\n", __func__);
1124 memset(&rsp, 0, sizeof(rsp));
1125 rsp.response = VIRTIO_SCSI_S_FUNCTION_REJECTED;
1127 iov_iter_init(&iov_iter, READ, &vq->iov[vc->out], vc->in, sizeof(rsp));
1129 ret = copy_to_iter(&rsp, sizeof(rsp), &iov_iter);
1130 if (likely(ret == sizeof(rsp)))
1131 vhost_add_used_and_signal(&vs->dev, vq, vc->head, 0);
1133 pr_err("Faulted on virtio_scsi_ctrl_tmf_resp\n");
1137 vhost_scsi_send_an_resp(struct vhost_scsi *vs,
1138 struct vhost_virtqueue *vq,
1139 struct vhost_scsi_ctx *vc)
1141 struct virtio_scsi_ctrl_an_resp rsp;
1142 struct iov_iter iov_iter;
1145 pr_debug("%s\n", __func__);
1146 memset(&rsp, 0, sizeof(rsp)); /* event_actual = 0 */
1147 rsp.response = VIRTIO_SCSI_S_OK;
1149 iov_iter_init(&iov_iter, READ, &vq->iov[vc->out], vc->in, sizeof(rsp));
1151 ret = copy_to_iter(&rsp, sizeof(rsp), &iov_iter);
1152 if (likely(ret == sizeof(rsp)))
1153 vhost_add_used_and_signal(&vs->dev, vq, vc->head, 0);
1155 pr_err("Faulted on virtio_scsi_ctrl_an_resp\n");
1159 vhost_scsi_ctl_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
1163 struct virtio_scsi_ctrl_an_req an;
1164 struct virtio_scsi_ctrl_tmf_req tmf;
1166 struct vhost_scsi_ctx vc;
1170 mutex_lock(&vq->mutex);
1172 * We can handle the vq only after the endpoint is setup by calling the
1173 * VHOST_SCSI_SET_ENDPOINT ioctl.
1175 if (!vq->private_data)
1178 memset(&vc, 0, sizeof(vc));
1180 vhost_disable_notify(&vs->dev, vq);
1183 ret = vhost_scsi_get_desc(vs, vq, &vc);
1188 * Get the request type first in order to setup
1189 * other parameters dependent on the type.
1191 vc.req = &v_req.type;
1192 typ_size = sizeof(v_req.type);
1194 if (unlikely(!copy_from_iter_full(vc.req, typ_size,
1196 vq_err(vq, "Faulted on copy_from_iter tmf type\n");
1198 * The size of the response buffer depends on the
1199 * request type and must be validated against it.
1200 * Since the request type is not known, don't send
1206 switch (vhost32_to_cpu(vq, v_req.type)) {
1207 case VIRTIO_SCSI_T_TMF:
1208 vc.req = &v_req.tmf;
1209 vc.req_size = sizeof(struct virtio_scsi_ctrl_tmf_req);
1210 vc.rsp_size = sizeof(struct virtio_scsi_ctrl_tmf_resp);
1211 vc.lunp = &v_req.tmf.lun[0];
1212 vc.target = &v_req.tmf.lun[1];
1214 case VIRTIO_SCSI_T_AN_QUERY:
1215 case VIRTIO_SCSI_T_AN_SUBSCRIBE:
1217 vc.req_size = sizeof(struct virtio_scsi_ctrl_an_req);
1218 vc.rsp_size = sizeof(struct virtio_scsi_ctrl_an_resp);
1219 vc.lunp = &v_req.an.lun[0];
1223 vq_err(vq, "Unknown control request %d", v_req.type);
1228 * Validate the size of request and response buffers.
1229 * Check for a sane response buffer so we can report
1230 * early errors back to the guest.
1232 ret = vhost_scsi_chk_size(vq, &vc);
1237 * Get the rest of the request now that its size is known.
1240 vc.req_size -= typ_size;
1242 ret = vhost_scsi_get_req(vq, &vc, NULL);
1246 if (v_req.type == VIRTIO_SCSI_T_TMF)
1247 vhost_scsi_send_tmf_reject(vs, vq, &vc);
1249 vhost_scsi_send_an_resp(vs, vq, &vc);
1252 * ENXIO: No more requests, or read error, wait for next kick
1253 * EINVAL: Invalid response buffer, drop the request
1254 * EIO: Respond with bad target
1255 * EAGAIN: Pending request
1259 else if (ret == -EIO)
1260 vhost_scsi_send_bad_target(vs, vq, vc.head, vc.out);
1261 } while (likely(!vhost_exceeds_weight(vq, ++c, 0)));
1263 mutex_unlock(&vq->mutex);
1266 static void vhost_scsi_ctl_handle_kick(struct vhost_work *work)
1268 struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
1270 struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
1272 pr_debug("%s: The handling func for control queue.\n", __func__);
1273 vhost_scsi_ctl_handle_vq(vs, vq);
1277 vhost_scsi_send_evt(struct vhost_scsi *vs,
1278 struct vhost_scsi_tpg *tpg,
1283 struct vhost_scsi_evt *evt;
1285 evt = vhost_scsi_allocate_evt(vs, event, reason);
1290 /* TODO: share lun setup code with virtio-scsi.ko */
1292 * Note: evt->event is zeroed when we allocate it and
1293 * lun[4-7] need to be zero according to virtio-scsi spec.
1295 evt->event.lun[0] = 0x01;
1296 evt->event.lun[1] = tpg->tport_tpgt;
1297 if (lun->unpacked_lun >= 256)
1298 evt->event.lun[2] = lun->unpacked_lun >> 8 | 0x40 ;
1299 evt->event.lun[3] = lun->unpacked_lun & 0xFF;
1302 llist_add(&evt->list, &vs->vs_event_list);
1303 vhost_work_queue(&vs->dev, &vs->vs_event_work);
1306 static void vhost_scsi_evt_handle_kick(struct vhost_work *work)
1308 struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
1310 struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
1312 mutex_lock(&vq->mutex);
1313 if (!vq->private_data)
1316 if (vs->vs_events_missed)
1317 vhost_scsi_send_evt(vs, NULL, NULL, VIRTIO_SCSI_T_NO_EVENT, 0);
1319 mutex_unlock(&vq->mutex);
1322 static void vhost_scsi_handle_kick(struct vhost_work *work)
1324 struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
1326 struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
1328 vhost_scsi_handle_vq(vs, vq);
1331 static void vhost_scsi_flush_vq(struct vhost_scsi *vs, int index)
1333 vhost_poll_flush(&vs->vqs[index].vq.poll);
1336 /* Callers must hold dev mutex */
1337 static void vhost_scsi_flush(struct vhost_scsi *vs)
1339 struct vhost_scsi_inflight *old_inflight[VHOST_SCSI_MAX_VQ];
1342 /* Init new inflight and remember the old inflight */
1343 vhost_scsi_init_inflight(vs, old_inflight);
1346 * The inflight->kref was initialized to 1. We decrement it here to
1347 * indicate the start of the flush operation so that it will reach 0
1348 * when all the reqs are finished.
1350 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
1351 kref_put(&old_inflight[i]->kref, vhost_scsi_done_inflight);
1353 /* Flush both the vhost poll and vhost work */
1354 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
1355 vhost_scsi_flush_vq(vs, i);
1356 vhost_work_flush(&vs->dev, &vs->vs_completion_work);
1357 vhost_work_flush(&vs->dev, &vs->vs_event_work);
1359 /* Wait for all reqs issued before the flush to be finished */
1360 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
1361 wait_for_completion(&old_inflight[i]->comp);
1365 * Called from vhost_scsi_ioctl() context to walk the list of available
1366 * vhost_scsi_tpg with an active struct vhost_scsi_nexus
1368 * The lock nesting rule is:
1369 * vhost_scsi_mutex -> vs->dev.mutex -> tpg->tv_tpg_mutex -> vq->mutex
1372 vhost_scsi_set_endpoint(struct vhost_scsi *vs,
1373 struct vhost_scsi_target *t)
1375 struct se_portal_group *se_tpg;
1376 struct vhost_scsi_tport *tv_tport;
1377 struct vhost_scsi_tpg *tpg;
1378 struct vhost_scsi_tpg **vs_tpg;
1379 struct vhost_virtqueue *vq;
1380 int index, ret, i, len;
1383 mutex_lock(&vhost_scsi_mutex);
1384 mutex_lock(&vs->dev.mutex);
1386 /* Verify that ring has been setup correctly. */
1387 for (index = 0; index < vs->dev.nvqs; ++index) {
1388 /* Verify that ring has been setup correctly. */
1389 if (!vhost_vq_access_ok(&vs->vqs[index].vq)) {
1395 len = sizeof(vs_tpg[0]) * VHOST_SCSI_MAX_TARGET;
1396 vs_tpg = kzalloc(len, GFP_KERNEL);
1402 memcpy(vs_tpg, vs->vs_tpg, len);
1404 list_for_each_entry(tpg, &vhost_scsi_list, tv_tpg_list) {
1405 mutex_lock(&tpg->tv_tpg_mutex);
1406 if (!tpg->tpg_nexus) {
1407 mutex_unlock(&tpg->tv_tpg_mutex);
1410 if (tpg->tv_tpg_vhost_count != 0) {
1411 mutex_unlock(&tpg->tv_tpg_mutex);
1414 tv_tport = tpg->tport;
1416 if (!strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
1417 if (vs->vs_tpg && vs->vs_tpg[tpg->tport_tpgt]) {
1419 mutex_unlock(&tpg->tv_tpg_mutex);
1424 * In order to ensure individual vhost-scsi configfs
1425 * groups cannot be removed while in use by vhost ioctl,
1426 * go ahead and take an explicit se_tpg->tpg_group.cg_item
1429 se_tpg = &tpg->se_tpg;
1430 ret = target_depend_item(&se_tpg->tpg_group.cg_item);
1432 pr_warn("target_depend_item() failed: %d\n", ret);
1434 mutex_unlock(&tpg->tv_tpg_mutex);
1437 tpg->tv_tpg_vhost_count++;
1438 tpg->vhost_scsi = vs;
1439 vs_tpg[tpg->tport_tpgt] = tpg;
1442 mutex_unlock(&tpg->tv_tpg_mutex);
1446 memcpy(vs->vs_vhost_wwpn, t->vhost_wwpn,
1447 sizeof(vs->vs_vhost_wwpn));
1448 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
1449 vq = &vs->vqs[i].vq;
1450 mutex_lock(&vq->mutex);
1451 vq->private_data = vs_tpg;
1452 vhost_vq_init_access(vq);
1453 mutex_unlock(&vq->mutex);
1461 * Act as synchronize_rcu to make sure access to
1462 * old vs->vs_tpg is finished.
1464 vhost_scsi_flush(vs);
1466 vs->vs_tpg = vs_tpg;
1469 mutex_unlock(&vs->dev.mutex);
1470 mutex_unlock(&vhost_scsi_mutex);
1475 vhost_scsi_clear_endpoint(struct vhost_scsi *vs,
1476 struct vhost_scsi_target *t)
1478 struct se_portal_group *se_tpg;
1479 struct vhost_scsi_tport *tv_tport;
1480 struct vhost_scsi_tpg *tpg;
1481 struct vhost_virtqueue *vq;
1486 mutex_lock(&vhost_scsi_mutex);
1487 mutex_lock(&vs->dev.mutex);
1488 /* Verify that ring has been setup correctly. */
1489 for (index = 0; index < vs->dev.nvqs; ++index) {
1490 if (!vhost_vq_access_ok(&vs->vqs[index].vq)) {
1501 for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) {
1503 tpg = vs->vs_tpg[target];
1507 mutex_lock(&tpg->tv_tpg_mutex);
1508 tv_tport = tpg->tport;
1514 if (strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
1515 pr_warn("tv_tport->tport_name: %s, tpg->tport_tpgt: %hu"
1516 " does not match t->vhost_wwpn: %s, t->vhost_tpgt: %hu\n",
1517 tv_tport->tport_name, tpg->tport_tpgt,
1518 t->vhost_wwpn, t->vhost_tpgt);
1522 tpg->tv_tpg_vhost_count--;
1523 tpg->vhost_scsi = NULL;
1524 vs->vs_tpg[target] = NULL;
1526 mutex_unlock(&tpg->tv_tpg_mutex);
1528 * Release se_tpg->tpg_group.cg_item configfs dependency now
1529 * to allow vhost-scsi WWPN se_tpg->tpg_group shutdown to occur.
1531 se_tpg = &tpg->se_tpg;
1532 target_undepend_item(&se_tpg->tpg_group.cg_item);
1535 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
1536 vq = &vs->vqs[i].vq;
1537 mutex_lock(&vq->mutex);
1538 vq->private_data = NULL;
1539 mutex_unlock(&vq->mutex);
1543 * Act as synchronize_rcu to make sure access to
1544 * old vs->vs_tpg is finished.
1546 vhost_scsi_flush(vs);
1549 WARN_ON(vs->vs_events_nr);
1550 mutex_unlock(&vs->dev.mutex);
1551 mutex_unlock(&vhost_scsi_mutex);
1555 mutex_unlock(&tpg->tv_tpg_mutex);
1557 mutex_unlock(&vs->dev.mutex);
1558 mutex_unlock(&vhost_scsi_mutex);
1562 static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features)
1564 struct vhost_virtqueue *vq;
1567 if (features & ~VHOST_SCSI_FEATURES)
1570 mutex_lock(&vs->dev.mutex);
1571 if ((features & (1 << VHOST_F_LOG_ALL)) &&
1572 !vhost_log_access_ok(&vs->dev)) {
1573 mutex_unlock(&vs->dev.mutex);
1577 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
1578 vq = &vs->vqs[i].vq;
1579 mutex_lock(&vq->mutex);
1580 vq->acked_features = features;
1581 mutex_unlock(&vq->mutex);
1583 mutex_unlock(&vs->dev.mutex);
1587 static int vhost_scsi_open(struct inode *inode, struct file *f)
1589 struct vhost_scsi *vs;
1590 struct vhost_virtqueue **vqs;
1593 vs = kzalloc(sizeof(*vs), GFP_KERNEL | __GFP_NOWARN | __GFP_RETRY_MAYFAIL);
1595 vs = vzalloc(sizeof(*vs));
1600 vqs = kmalloc_array(VHOST_SCSI_MAX_VQ, sizeof(*vqs), GFP_KERNEL);
1604 vhost_work_init(&vs->vs_completion_work, vhost_scsi_complete_cmd_work);
1605 vhost_work_init(&vs->vs_event_work, vhost_scsi_evt_work);
1607 vs->vs_events_nr = 0;
1608 vs->vs_events_missed = false;
1610 vqs[VHOST_SCSI_VQ_CTL] = &vs->vqs[VHOST_SCSI_VQ_CTL].vq;
1611 vqs[VHOST_SCSI_VQ_EVT] = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
1612 vs->vqs[VHOST_SCSI_VQ_CTL].vq.handle_kick = vhost_scsi_ctl_handle_kick;
1613 vs->vqs[VHOST_SCSI_VQ_EVT].vq.handle_kick = vhost_scsi_evt_handle_kick;
1614 for (i = VHOST_SCSI_VQ_IO; i < VHOST_SCSI_MAX_VQ; i++) {
1615 vqs[i] = &vs->vqs[i].vq;
1616 vs->vqs[i].vq.handle_kick = vhost_scsi_handle_kick;
1618 vhost_dev_init(&vs->dev, vqs, VHOST_SCSI_MAX_VQ, UIO_MAXIOV,
1619 VHOST_SCSI_WEIGHT, 0);
1621 vhost_scsi_init_inflight(vs, NULL);
1623 f->private_data = vs;
1632 static int vhost_scsi_release(struct inode *inode, struct file *f)
1634 struct vhost_scsi *vs = f->private_data;
1635 struct vhost_scsi_target t;
1637 mutex_lock(&vs->dev.mutex);
1638 memcpy(t.vhost_wwpn, vs->vs_vhost_wwpn, sizeof(t.vhost_wwpn));
1639 mutex_unlock(&vs->dev.mutex);
1640 vhost_scsi_clear_endpoint(vs, &t);
1641 vhost_dev_stop(&vs->dev);
1642 vhost_dev_cleanup(&vs->dev);
1643 /* Jobs can re-queue themselves in evt kick handler. Do extra flush. */
1644 vhost_scsi_flush(vs);
1651 vhost_scsi_ioctl(struct file *f,
1655 struct vhost_scsi *vs = f->private_data;
1656 struct vhost_scsi_target backend;
1657 void __user *argp = (void __user *)arg;
1658 u64 __user *featurep = argp;
1659 u32 __user *eventsp = argp;
1662 int r, abi_version = VHOST_SCSI_ABI_VERSION;
1663 struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
1666 case VHOST_SCSI_SET_ENDPOINT:
1667 if (copy_from_user(&backend, argp, sizeof backend))
1669 if (backend.reserved != 0)
1672 return vhost_scsi_set_endpoint(vs, &backend);
1673 case VHOST_SCSI_CLEAR_ENDPOINT:
1674 if (copy_from_user(&backend, argp, sizeof backend))
1676 if (backend.reserved != 0)
1679 return vhost_scsi_clear_endpoint(vs, &backend);
1680 case VHOST_SCSI_GET_ABI_VERSION:
1681 if (copy_to_user(argp, &abi_version, sizeof abi_version))
1684 case VHOST_SCSI_SET_EVENTS_MISSED:
1685 if (get_user(events_missed, eventsp))
1687 mutex_lock(&vq->mutex);
1688 vs->vs_events_missed = events_missed;
1689 mutex_unlock(&vq->mutex);
1691 case VHOST_SCSI_GET_EVENTS_MISSED:
1692 mutex_lock(&vq->mutex);
1693 events_missed = vs->vs_events_missed;
1694 mutex_unlock(&vq->mutex);
1695 if (put_user(events_missed, eventsp))
1698 case VHOST_GET_FEATURES:
1699 features = VHOST_SCSI_FEATURES;
1700 if (copy_to_user(featurep, &features, sizeof features))
1703 case VHOST_SET_FEATURES:
1704 if (copy_from_user(&features, featurep, sizeof features))
1706 return vhost_scsi_set_features(vs, features);
1708 mutex_lock(&vs->dev.mutex);
1709 r = vhost_dev_ioctl(&vs->dev, ioctl, argp);
1710 /* TODO: flush backend after dev ioctl. */
1711 if (r == -ENOIOCTLCMD)
1712 r = vhost_vring_ioctl(&vs->dev, ioctl, argp);
1713 mutex_unlock(&vs->dev.mutex);
1718 #ifdef CONFIG_COMPAT
1719 static long vhost_scsi_compat_ioctl(struct file *f, unsigned int ioctl,
1722 return vhost_scsi_ioctl(f, ioctl, (unsigned long)compat_ptr(arg));
1726 static const struct file_operations vhost_scsi_fops = {
1727 .owner = THIS_MODULE,
1728 .release = vhost_scsi_release,
1729 .unlocked_ioctl = vhost_scsi_ioctl,
1730 #ifdef CONFIG_COMPAT
1731 .compat_ioctl = vhost_scsi_compat_ioctl,
1733 .open = vhost_scsi_open,
1734 .llseek = noop_llseek,
1737 static struct miscdevice vhost_scsi_misc = {
1743 static int __init vhost_scsi_register(void)
1745 return misc_register(&vhost_scsi_misc);
1748 static void vhost_scsi_deregister(void)
1750 misc_deregister(&vhost_scsi_misc);
1753 static char *vhost_scsi_dump_proto_id(struct vhost_scsi_tport *tport)
1755 switch (tport->tport_proto_id) {
1756 case SCSI_PROTOCOL_SAS:
1758 case SCSI_PROTOCOL_FCP:
1760 case SCSI_PROTOCOL_ISCSI:
1770 vhost_scsi_do_plug(struct vhost_scsi_tpg *tpg,
1771 struct se_lun *lun, bool plug)
1774 struct vhost_scsi *vs = tpg->vhost_scsi;
1775 struct vhost_virtqueue *vq;
1781 mutex_lock(&vs->dev.mutex);
1784 reason = VIRTIO_SCSI_EVT_RESET_RESCAN;
1786 reason = VIRTIO_SCSI_EVT_RESET_REMOVED;
1788 vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
1789 mutex_lock(&vq->mutex);
1790 if (vhost_has_feature(vq, VIRTIO_SCSI_F_HOTPLUG))
1791 vhost_scsi_send_evt(vs, tpg, lun,
1792 VIRTIO_SCSI_T_TRANSPORT_RESET, reason);
1793 mutex_unlock(&vq->mutex);
1794 mutex_unlock(&vs->dev.mutex);
1797 static void vhost_scsi_hotplug(struct vhost_scsi_tpg *tpg, struct se_lun *lun)
1799 vhost_scsi_do_plug(tpg, lun, true);
1802 static void vhost_scsi_hotunplug(struct vhost_scsi_tpg *tpg, struct se_lun *lun)
1804 vhost_scsi_do_plug(tpg, lun, false);
1807 static int vhost_scsi_port_link(struct se_portal_group *se_tpg,
1810 struct vhost_scsi_tpg *tpg = container_of(se_tpg,
1811 struct vhost_scsi_tpg, se_tpg);
1813 mutex_lock(&vhost_scsi_mutex);
1815 mutex_lock(&tpg->tv_tpg_mutex);
1816 tpg->tv_tpg_port_count++;
1817 mutex_unlock(&tpg->tv_tpg_mutex);
1819 vhost_scsi_hotplug(tpg, lun);
1821 mutex_unlock(&vhost_scsi_mutex);
1826 static void vhost_scsi_port_unlink(struct se_portal_group *se_tpg,
1829 struct vhost_scsi_tpg *tpg = container_of(se_tpg,
1830 struct vhost_scsi_tpg, se_tpg);
1832 mutex_lock(&vhost_scsi_mutex);
1834 mutex_lock(&tpg->tv_tpg_mutex);
1835 tpg->tv_tpg_port_count--;
1836 mutex_unlock(&tpg->tv_tpg_mutex);
1838 vhost_scsi_hotunplug(tpg, lun);
1840 mutex_unlock(&vhost_scsi_mutex);
1843 static void vhost_scsi_free_cmd_map_res(struct se_session *se_sess)
1845 struct vhost_scsi_cmd *tv_cmd;
1848 if (!se_sess->sess_cmd_map)
1851 for (i = 0; i < VHOST_SCSI_DEFAULT_TAGS; i++) {
1852 tv_cmd = &((struct vhost_scsi_cmd *)se_sess->sess_cmd_map)[i];
1854 kfree(tv_cmd->tvc_sgl);
1855 kfree(tv_cmd->tvc_prot_sgl);
1856 kfree(tv_cmd->tvc_upages);
1860 static ssize_t vhost_scsi_tpg_attrib_fabric_prot_type_store(
1861 struct config_item *item, const char *page, size_t count)
1863 struct se_portal_group *se_tpg = attrib_to_tpg(item);
1864 struct vhost_scsi_tpg *tpg = container_of(se_tpg,
1865 struct vhost_scsi_tpg, se_tpg);
1867 int ret = kstrtoul(page, 0, &val);
1870 pr_err("kstrtoul() returned %d for fabric_prot_type\n", ret);
1873 if (val != 0 && val != 1 && val != 3) {
1874 pr_err("Invalid vhost_scsi fabric_prot_type: %lu\n", val);
1877 tpg->tv_fabric_prot_type = val;
1882 static ssize_t vhost_scsi_tpg_attrib_fabric_prot_type_show(
1883 struct config_item *item, char *page)
1885 struct se_portal_group *se_tpg = attrib_to_tpg(item);
1886 struct vhost_scsi_tpg *tpg = container_of(se_tpg,
1887 struct vhost_scsi_tpg, se_tpg);
1889 return sprintf(page, "%d\n", tpg->tv_fabric_prot_type);
1892 CONFIGFS_ATTR(vhost_scsi_tpg_attrib_, fabric_prot_type);
1894 static struct configfs_attribute *vhost_scsi_tpg_attrib_attrs[] = {
1895 &vhost_scsi_tpg_attrib_attr_fabric_prot_type,
1899 static int vhost_scsi_nexus_cb(struct se_portal_group *se_tpg,
1900 struct se_session *se_sess, void *p)
1902 struct vhost_scsi_cmd *tv_cmd;
1905 for (i = 0; i < VHOST_SCSI_DEFAULT_TAGS; i++) {
1906 tv_cmd = &((struct vhost_scsi_cmd *)se_sess->sess_cmd_map)[i];
1908 tv_cmd->tvc_sgl = kcalloc(VHOST_SCSI_PREALLOC_SGLS,
1909 sizeof(struct scatterlist),
1911 if (!tv_cmd->tvc_sgl) {
1912 pr_err("Unable to allocate tv_cmd->tvc_sgl\n");
1916 tv_cmd->tvc_upages = kcalloc(VHOST_SCSI_PREALLOC_UPAGES,
1917 sizeof(struct page *),
1919 if (!tv_cmd->tvc_upages) {
1920 pr_err("Unable to allocate tv_cmd->tvc_upages\n");
1924 tv_cmd->tvc_prot_sgl = kcalloc(VHOST_SCSI_PREALLOC_PROT_SGLS,
1925 sizeof(struct scatterlist),
1927 if (!tv_cmd->tvc_prot_sgl) {
1928 pr_err("Unable to allocate tv_cmd->tvc_prot_sgl\n");
1934 vhost_scsi_free_cmd_map_res(se_sess);
1938 static int vhost_scsi_make_nexus(struct vhost_scsi_tpg *tpg,
1941 struct vhost_scsi_nexus *tv_nexus;
1943 mutex_lock(&tpg->tv_tpg_mutex);
1944 if (tpg->tpg_nexus) {
1945 mutex_unlock(&tpg->tv_tpg_mutex);
1946 pr_debug("tpg->tpg_nexus already exists\n");
1950 tv_nexus = kzalloc(sizeof(*tv_nexus), GFP_KERNEL);
1952 mutex_unlock(&tpg->tv_tpg_mutex);
1953 pr_err("Unable to allocate struct vhost_scsi_nexus\n");
1957 * Since we are running in 'demo mode' this call with generate a
1958 * struct se_node_acl for the vhost_scsi struct se_portal_group with
1959 * the SCSI Initiator port name of the passed configfs group 'name'.
1961 tv_nexus->tvn_se_sess = target_setup_session(&tpg->se_tpg,
1962 VHOST_SCSI_DEFAULT_TAGS,
1963 sizeof(struct vhost_scsi_cmd),
1964 TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS,
1965 (unsigned char *)name, tv_nexus,
1966 vhost_scsi_nexus_cb);
1967 if (IS_ERR(tv_nexus->tvn_se_sess)) {
1968 mutex_unlock(&tpg->tv_tpg_mutex);
1972 tpg->tpg_nexus = tv_nexus;
1974 mutex_unlock(&tpg->tv_tpg_mutex);
1978 static int vhost_scsi_drop_nexus(struct vhost_scsi_tpg *tpg)
1980 struct se_session *se_sess;
1981 struct vhost_scsi_nexus *tv_nexus;
1983 mutex_lock(&tpg->tv_tpg_mutex);
1984 tv_nexus = tpg->tpg_nexus;
1986 mutex_unlock(&tpg->tv_tpg_mutex);
1990 se_sess = tv_nexus->tvn_se_sess;
1992 mutex_unlock(&tpg->tv_tpg_mutex);
1996 if (tpg->tv_tpg_port_count != 0) {
1997 mutex_unlock(&tpg->tv_tpg_mutex);
1998 pr_err("Unable to remove TCM_vhost I_T Nexus with"
1999 " active TPG port count: %d\n",
2000 tpg->tv_tpg_port_count);
2004 if (tpg->tv_tpg_vhost_count != 0) {
2005 mutex_unlock(&tpg->tv_tpg_mutex);
2006 pr_err("Unable to remove TCM_vhost I_T Nexus with"
2007 " active TPG vhost count: %d\n",
2008 tpg->tv_tpg_vhost_count);
2012 pr_debug("TCM_vhost_ConfigFS: Removing I_T Nexus to emulated"
2013 " %s Initiator Port: %s\n", vhost_scsi_dump_proto_id(tpg->tport),
2014 tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
2016 vhost_scsi_free_cmd_map_res(se_sess);
2018 * Release the SCSI I_T Nexus to the emulated vhost Target Port
2020 target_remove_session(se_sess);
2021 tpg->tpg_nexus = NULL;
2022 mutex_unlock(&tpg->tv_tpg_mutex);
2028 static ssize_t vhost_scsi_tpg_nexus_show(struct config_item *item, char *page)
2030 struct se_portal_group *se_tpg = to_tpg(item);
2031 struct vhost_scsi_tpg *tpg = container_of(se_tpg,
2032 struct vhost_scsi_tpg, se_tpg);
2033 struct vhost_scsi_nexus *tv_nexus;
2036 mutex_lock(&tpg->tv_tpg_mutex);
2037 tv_nexus = tpg->tpg_nexus;
2039 mutex_unlock(&tpg->tv_tpg_mutex);
2042 ret = snprintf(page, PAGE_SIZE, "%s\n",
2043 tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
2044 mutex_unlock(&tpg->tv_tpg_mutex);
2049 static ssize_t vhost_scsi_tpg_nexus_store(struct config_item *item,
2050 const char *page, size_t count)
2052 struct se_portal_group *se_tpg = to_tpg(item);
2053 struct vhost_scsi_tpg *tpg = container_of(se_tpg,
2054 struct vhost_scsi_tpg, se_tpg);
2055 struct vhost_scsi_tport *tport_wwn = tpg->tport;
2056 unsigned char i_port[VHOST_SCSI_NAMELEN], *ptr, *port_ptr;
2059 * Shutdown the active I_T nexus if 'NULL' is passed..
2061 if (!strncmp(page, "NULL", 4)) {
2062 ret = vhost_scsi_drop_nexus(tpg);
2063 return (!ret) ? count : ret;
2066 * Otherwise make sure the passed virtual Initiator port WWN matches
2067 * the fabric protocol_id set in vhost_scsi_make_tport(), and call
2068 * vhost_scsi_make_nexus().
2070 if (strlen(page) >= VHOST_SCSI_NAMELEN) {
2071 pr_err("Emulated NAA Sas Address: %s, exceeds"
2072 " max: %d\n", page, VHOST_SCSI_NAMELEN);
2075 snprintf(&i_port[0], VHOST_SCSI_NAMELEN, "%s", page);
2077 ptr = strstr(i_port, "naa.");
2079 if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_SAS) {
2080 pr_err("Passed SAS Initiator Port %s does not"
2081 " match target port protoid: %s\n", i_port,
2082 vhost_scsi_dump_proto_id(tport_wwn));
2085 port_ptr = &i_port[0];
2088 ptr = strstr(i_port, "fc.");
2090 if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_FCP) {
2091 pr_err("Passed FCP Initiator Port %s does not"
2092 " match target port protoid: %s\n", i_port,
2093 vhost_scsi_dump_proto_id(tport_wwn));
2096 port_ptr = &i_port[3]; /* Skip over "fc." */
2099 ptr = strstr(i_port, "iqn.");
2101 if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_ISCSI) {
2102 pr_err("Passed iSCSI Initiator Port %s does not"
2103 " match target port protoid: %s\n", i_port,
2104 vhost_scsi_dump_proto_id(tport_wwn));
2107 port_ptr = &i_port[0];
2110 pr_err("Unable to locate prefix for emulated Initiator Port:"
2114 * Clear any trailing newline for the NAA WWN
2117 if (i_port[strlen(i_port)-1] == '\n')
2118 i_port[strlen(i_port)-1] = '\0';
2120 ret = vhost_scsi_make_nexus(tpg, port_ptr);
2127 CONFIGFS_ATTR(vhost_scsi_tpg_, nexus);
2129 static struct configfs_attribute *vhost_scsi_tpg_attrs[] = {
2130 &vhost_scsi_tpg_attr_nexus,
2134 static struct se_portal_group *
2135 vhost_scsi_make_tpg(struct se_wwn *wwn, const char *name)
2137 struct vhost_scsi_tport *tport = container_of(wwn,
2138 struct vhost_scsi_tport, tport_wwn);
2140 struct vhost_scsi_tpg *tpg;
2144 if (strstr(name, "tpgt_") != name)
2145 return ERR_PTR(-EINVAL);
2146 if (kstrtou16(name + 5, 10, &tpgt) || tpgt >= VHOST_SCSI_MAX_TARGET)
2147 return ERR_PTR(-EINVAL);
2149 tpg = kzalloc(sizeof(*tpg), GFP_KERNEL);
2151 pr_err("Unable to allocate struct vhost_scsi_tpg");
2152 return ERR_PTR(-ENOMEM);
2154 mutex_init(&tpg->tv_tpg_mutex);
2155 INIT_LIST_HEAD(&tpg->tv_tpg_list);
2157 tpg->tport_tpgt = tpgt;
2159 ret = core_tpg_register(wwn, &tpg->se_tpg, tport->tport_proto_id);
2164 mutex_lock(&vhost_scsi_mutex);
2165 list_add_tail(&tpg->tv_tpg_list, &vhost_scsi_list);
2166 mutex_unlock(&vhost_scsi_mutex);
2168 return &tpg->se_tpg;
2171 static void vhost_scsi_drop_tpg(struct se_portal_group *se_tpg)
2173 struct vhost_scsi_tpg *tpg = container_of(se_tpg,
2174 struct vhost_scsi_tpg, se_tpg);
2176 mutex_lock(&vhost_scsi_mutex);
2177 list_del(&tpg->tv_tpg_list);
2178 mutex_unlock(&vhost_scsi_mutex);
2180 * Release the virtual I_T Nexus for this vhost TPG
2182 vhost_scsi_drop_nexus(tpg);
2184 * Deregister the se_tpg from TCM..
2186 core_tpg_deregister(se_tpg);
2190 static struct se_wwn *
2191 vhost_scsi_make_tport(struct target_fabric_configfs *tf,
2192 struct config_group *group,
2195 struct vhost_scsi_tport *tport;
2200 /* if (vhost_scsi_parse_wwn(name, &wwpn, 1) < 0)
2201 return ERR_PTR(-EINVAL); */
2203 tport = kzalloc(sizeof(*tport), GFP_KERNEL);
2205 pr_err("Unable to allocate struct vhost_scsi_tport");
2206 return ERR_PTR(-ENOMEM);
2208 tport->tport_wwpn = wwpn;
2210 * Determine the emulated Protocol Identifier and Target Port Name
2211 * based on the incoming configfs directory name.
2213 ptr = strstr(name, "naa.");
2215 tport->tport_proto_id = SCSI_PROTOCOL_SAS;
2218 ptr = strstr(name, "fc.");
2220 tport->tport_proto_id = SCSI_PROTOCOL_FCP;
2221 off = 3; /* Skip over "fc." */
2224 ptr = strstr(name, "iqn.");
2226 tport->tport_proto_id = SCSI_PROTOCOL_ISCSI;
2230 pr_err("Unable to locate prefix for emulated Target Port:"
2233 return ERR_PTR(-EINVAL);
2236 if (strlen(name) >= VHOST_SCSI_NAMELEN) {
2237 pr_err("Emulated %s Address: %s, exceeds"
2238 " max: %d\n", name, vhost_scsi_dump_proto_id(tport),
2239 VHOST_SCSI_NAMELEN);
2241 return ERR_PTR(-EINVAL);
2243 snprintf(&tport->tport_name[0], VHOST_SCSI_NAMELEN, "%s", &name[off]);
2245 pr_debug("TCM_VHost_ConfigFS: Allocated emulated Target"
2246 " %s Address: %s\n", vhost_scsi_dump_proto_id(tport), name);
2248 return &tport->tport_wwn;
2251 static void vhost_scsi_drop_tport(struct se_wwn *wwn)
2253 struct vhost_scsi_tport *tport = container_of(wwn,
2254 struct vhost_scsi_tport, tport_wwn);
2256 pr_debug("TCM_VHost_ConfigFS: Deallocating emulated Target"
2257 " %s Address: %s\n", vhost_scsi_dump_proto_id(tport),
2264 vhost_scsi_wwn_version_show(struct config_item *item, char *page)
2266 return sprintf(page, "TCM_VHOST fabric module %s on %s/%s"
2267 "on "UTS_RELEASE"\n", VHOST_SCSI_VERSION, utsname()->sysname,
2268 utsname()->machine);
2271 CONFIGFS_ATTR_RO(vhost_scsi_wwn_, version);
2273 static struct configfs_attribute *vhost_scsi_wwn_attrs[] = {
2274 &vhost_scsi_wwn_attr_version,
2278 static const struct target_core_fabric_ops vhost_scsi_ops = {
2279 .module = THIS_MODULE,
2280 .fabric_name = "vhost",
2281 .max_data_sg_nents = VHOST_SCSI_PREALLOC_SGLS,
2282 .tpg_get_wwn = vhost_scsi_get_fabric_wwn,
2283 .tpg_get_tag = vhost_scsi_get_tpgt,
2284 .tpg_check_demo_mode = vhost_scsi_check_true,
2285 .tpg_check_demo_mode_cache = vhost_scsi_check_true,
2286 .tpg_check_demo_mode_write_protect = vhost_scsi_check_false,
2287 .tpg_check_prod_mode_write_protect = vhost_scsi_check_false,
2288 .tpg_check_prot_fabric_only = vhost_scsi_check_prot_fabric_only,
2289 .tpg_get_inst_index = vhost_scsi_tpg_get_inst_index,
2290 .release_cmd = vhost_scsi_release_cmd,
2291 .check_stop_free = vhost_scsi_check_stop_free,
2292 .sess_get_index = vhost_scsi_sess_get_index,
2293 .sess_get_initiator_sid = NULL,
2294 .write_pending = vhost_scsi_write_pending,
2295 .set_default_node_attributes = vhost_scsi_set_default_node_attrs,
2296 .get_cmd_state = vhost_scsi_get_cmd_state,
2297 .queue_data_in = vhost_scsi_queue_data_in,
2298 .queue_status = vhost_scsi_queue_status,
2299 .queue_tm_rsp = vhost_scsi_queue_tm_rsp,
2300 .aborted_task = vhost_scsi_aborted_task,
2302 * Setup callers for generic logic in target_core_fabric_configfs.c
2304 .fabric_make_wwn = vhost_scsi_make_tport,
2305 .fabric_drop_wwn = vhost_scsi_drop_tport,
2306 .fabric_make_tpg = vhost_scsi_make_tpg,
2307 .fabric_drop_tpg = vhost_scsi_drop_tpg,
2308 .fabric_post_link = vhost_scsi_port_link,
2309 .fabric_pre_unlink = vhost_scsi_port_unlink,
2311 .tfc_wwn_attrs = vhost_scsi_wwn_attrs,
2312 .tfc_tpg_base_attrs = vhost_scsi_tpg_attrs,
2313 .tfc_tpg_attrib_attrs = vhost_scsi_tpg_attrib_attrs,
2316 static int __init vhost_scsi_init(void)
2320 pr_debug("TCM_VHOST fabric module %s on %s/%s"
2321 " on "UTS_RELEASE"\n", VHOST_SCSI_VERSION, utsname()->sysname,
2322 utsname()->machine);
2325 * Use our own dedicated workqueue for submitting I/O into
2326 * target core to avoid contention within system_wq.
2328 vhost_scsi_workqueue = alloc_workqueue("vhost_scsi", 0, 0);
2329 if (!vhost_scsi_workqueue)
2332 ret = vhost_scsi_register();
2334 goto out_destroy_workqueue;
2336 ret = target_register_template(&vhost_scsi_ops);
2338 goto out_vhost_scsi_deregister;
2342 out_vhost_scsi_deregister:
2343 vhost_scsi_deregister();
2344 out_destroy_workqueue:
2345 destroy_workqueue(vhost_scsi_workqueue);
2350 static void vhost_scsi_exit(void)
2352 target_unregister_template(&vhost_scsi_ops);
2353 vhost_scsi_deregister();
2354 destroy_workqueue(vhost_scsi_workqueue);
2357 MODULE_DESCRIPTION("VHOST_SCSI series fabric driver");
2358 MODULE_ALIAS("tcm_vhost");
2359 MODULE_LICENSE("GPL");
2360 module_init(vhost_scsi_init);
2361 module_exit(vhost_scsi_exit);