GNU Linux-libre 5.4.241-gnu1
[releases.git] / drivers / vhost / scsi.c
1 /*******************************************************************************
2  * Vhost kernel TCM fabric driver for virtio SCSI initiators
3  *
4  * (C) Copyright 2010-2013 Datera, Inc.
5  * (C) Copyright 2010-2012 IBM Corp.
6  *
7  * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
8  *
9  * Authors: Nicholas A. Bellinger <nab@daterainc.com>
10  *          Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
11  *
12  * This program is free software; you can redistribute it and/or modify
13  * it under the terms of the GNU General Public License as published by
14  * the Free Software Foundation; either version 2 of the License, or
15  * (at your option) any later version.
16  *
17  * This program is distributed in the hope that it will be useful,
18  * but WITHOUT ANY WARRANTY; without even the implied warranty of
19  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
20  * GNU General Public License for more details.
21  *
22  ****************************************************************************/
23
24 #include <linux/module.h>
25 #include <linux/moduleparam.h>
26 #include <generated/utsrelease.h>
27 #include <linux/utsname.h>
28 #include <linux/init.h>
29 #include <linux/slab.h>
30 #include <linux/kthread.h>
31 #include <linux/types.h>
32 #include <linux/string.h>
33 #include <linux/configfs.h>
34 #include <linux/ctype.h>
35 #include <linux/compat.h>
36 #include <linux/eventfd.h>
37 #include <linux/fs.h>
38 #include <linux/vmalloc.h>
39 #include <linux/miscdevice.h>
40 #include <asm/unaligned.h>
41 #include <scsi/scsi_common.h>
42 #include <scsi/scsi_proto.h>
43 #include <target/target_core_base.h>
44 #include <target/target_core_fabric.h>
45 #include <linux/vhost.h>
46 #include <linux/virtio_scsi.h>
47 #include <linux/llist.h>
48 #include <linux/bitmap.h>
49
50 #include "vhost.h"
51
52 #define VHOST_SCSI_VERSION  "v0.1"
53 #define VHOST_SCSI_NAMELEN 256
54 #define VHOST_SCSI_MAX_CDB_SIZE 32
55 #define VHOST_SCSI_DEFAULT_TAGS 256
56 #define VHOST_SCSI_PREALLOC_SGLS 2048
57 #define VHOST_SCSI_PREALLOC_UPAGES 2048
58 #define VHOST_SCSI_PREALLOC_PROT_SGLS 2048
59
60 /* Max number of requests before requeueing the job.
61  * Using this limit prevents one virtqueue from starving others with
62  * request.
63  */
64 #define VHOST_SCSI_WEIGHT 256
65
66 struct vhost_scsi_inflight {
67         /* Wait for the flush operation to finish */
68         struct completion comp;
69         /* Refcount for the inflight reqs */
70         struct kref kref;
71 };
72
73 struct vhost_scsi_cmd {
74         /* Descriptor from vhost_get_vq_desc() for virt_queue segment */
75         int tvc_vq_desc;
76         /* virtio-scsi initiator task attribute */
77         int tvc_task_attr;
78         /* virtio-scsi response incoming iovecs */
79         int tvc_in_iovs;
80         /* virtio-scsi initiator data direction */
81         enum dma_data_direction tvc_data_direction;
82         /* Expected data transfer length from virtio-scsi header */
83         u32 tvc_exp_data_len;
84         /* The Tag from include/linux/virtio_scsi.h:struct virtio_scsi_cmd_req */
85         u64 tvc_tag;
86         /* The number of scatterlists associated with this cmd */
87         u32 tvc_sgl_count;
88         u32 tvc_prot_sgl_count;
89         /* Saved unpacked SCSI LUN for vhost_scsi_submission_work() */
90         u32 tvc_lun;
91         /* Pointer to the SGL formatted memory from virtio-scsi */
92         struct scatterlist *tvc_sgl;
93         struct scatterlist *tvc_prot_sgl;
94         struct page **tvc_upages;
95         /* Pointer to response header iovec */
96         struct iovec tvc_resp_iov;
97         /* Pointer to vhost_scsi for our device */
98         struct vhost_scsi *tvc_vhost;
99         /* Pointer to vhost_virtqueue for the cmd */
100         struct vhost_virtqueue *tvc_vq;
101         /* Pointer to vhost nexus memory */
102         struct vhost_scsi_nexus *tvc_nexus;
103         /* The TCM I/O descriptor that is accessed via container_of() */
104         struct se_cmd tvc_se_cmd;
105         /* work item used for cmwq dispatch to vhost_scsi_submission_work() */
106         struct work_struct work;
107         /* Copy of the incoming SCSI command descriptor block (CDB) */
108         unsigned char tvc_cdb[VHOST_SCSI_MAX_CDB_SIZE];
109         /* Sense buffer that will be mapped into outgoing status */
110         unsigned char tvc_sense_buf[TRANSPORT_SENSE_BUFFER];
111         /* Completed commands list, serviced from vhost worker thread */
112         struct llist_node tvc_completion_list;
113         /* Used to track inflight cmd */
114         struct vhost_scsi_inflight *inflight;
115 };
116
117 struct vhost_scsi_nexus {
118         /* Pointer to TCM session for I_T Nexus */
119         struct se_session *tvn_se_sess;
120 };
121
122 struct vhost_scsi_tpg {
123         /* Vhost port target portal group tag for TCM */
124         u16 tport_tpgt;
125         /* Used to track number of TPG Port/Lun Links wrt to explict I_T Nexus shutdown */
126         int tv_tpg_port_count;
127         /* Used for vhost_scsi device reference to tpg_nexus, protected by tv_tpg_mutex */
128         int tv_tpg_vhost_count;
129         /* Used for enabling T10-PI with legacy devices */
130         int tv_fabric_prot_type;
131         /* list for vhost_scsi_list */
132         struct list_head tv_tpg_list;
133         /* Used to protect access for tpg_nexus */
134         struct mutex tv_tpg_mutex;
135         /* Pointer to the TCM VHost I_T Nexus for this TPG endpoint */
136         struct vhost_scsi_nexus *tpg_nexus;
137         /* Pointer back to vhost_scsi_tport */
138         struct vhost_scsi_tport *tport;
139         /* Returned by vhost_scsi_make_tpg() */
140         struct se_portal_group se_tpg;
141         /* Pointer back to vhost_scsi, protected by tv_tpg_mutex */
142         struct vhost_scsi *vhost_scsi;
143 };
144
145 struct vhost_scsi_tport {
146         /* SCSI protocol the tport is providing */
147         u8 tport_proto_id;
148         /* Binary World Wide unique Port Name for Vhost Target port */
149         u64 tport_wwpn;
150         /* ASCII formatted WWPN for Vhost Target port */
151         char tport_name[VHOST_SCSI_NAMELEN];
152         /* Returned by vhost_scsi_make_tport() */
153         struct se_wwn tport_wwn;
154 };
155
156 struct vhost_scsi_evt {
157         /* event to be sent to guest */
158         struct virtio_scsi_event event;
159         /* event list, serviced from vhost worker thread */
160         struct llist_node list;
161 };
162
163 enum {
164         VHOST_SCSI_VQ_CTL = 0,
165         VHOST_SCSI_VQ_EVT = 1,
166         VHOST_SCSI_VQ_IO = 2,
167 };
168
169 /* Note: can't set VIRTIO_F_VERSION_1 yet, since that implies ANY_LAYOUT. */
170 enum {
171         VHOST_SCSI_FEATURES = VHOST_FEATURES | (1ULL << VIRTIO_SCSI_F_HOTPLUG) |
172                                                (1ULL << VIRTIO_SCSI_F_T10_PI)
173 };
174
175 #define VHOST_SCSI_MAX_TARGET   256
176 #define VHOST_SCSI_MAX_VQ       128
177 #define VHOST_SCSI_MAX_EVENT    128
178
179 struct vhost_scsi_virtqueue {
180         struct vhost_virtqueue vq;
181         /*
182          * Reference counting for inflight reqs, used for flush operation. At
183          * each time, one reference tracks new commands submitted, while we
184          * wait for another one to reach 0.
185          */
186         struct vhost_scsi_inflight inflights[2];
187         /*
188          * Indicate current inflight in use, protected by vq->mutex.
189          * Writers must also take dev mutex and flush under it.
190          */
191         int inflight_idx;
192 };
193
194 struct vhost_scsi {
195         /* Protected by vhost_scsi->dev.mutex */
196         struct vhost_scsi_tpg **vs_tpg;
197         char vs_vhost_wwpn[TRANSPORT_IQN_LEN];
198
199         struct vhost_dev dev;
200         struct vhost_scsi_virtqueue vqs[VHOST_SCSI_MAX_VQ];
201
202         struct vhost_work vs_completion_work; /* cmd completion work item */
203         struct llist_head vs_completion_list; /* cmd completion queue */
204
205         struct vhost_work vs_event_work; /* evt injection work item */
206         struct llist_head vs_event_list; /* evt injection queue */
207
208         bool vs_events_missed; /* any missed events, protected by vq->mutex */
209         int vs_events_nr; /* num of pending events, protected by vq->mutex */
210 };
211
212 /*
213  * Context for processing request and control queue operations.
214  */
215 struct vhost_scsi_ctx {
216         int head;
217         unsigned int out, in;
218         size_t req_size, rsp_size;
219         size_t out_size, in_size;
220         u8 *target, *lunp;
221         void *req;
222         struct iov_iter out_iter;
223 };
224
225 static struct workqueue_struct *vhost_scsi_workqueue;
226
227 /* Global spinlock to protect vhost_scsi TPG list for vhost IOCTL access */
228 static DEFINE_MUTEX(vhost_scsi_mutex);
229 static LIST_HEAD(vhost_scsi_list);
230
231 static void vhost_scsi_done_inflight(struct kref *kref)
232 {
233         struct vhost_scsi_inflight *inflight;
234
235         inflight = container_of(kref, struct vhost_scsi_inflight, kref);
236         complete(&inflight->comp);
237 }
238
239 static void vhost_scsi_init_inflight(struct vhost_scsi *vs,
240                                     struct vhost_scsi_inflight *old_inflight[])
241 {
242         struct vhost_scsi_inflight *new_inflight;
243         struct vhost_virtqueue *vq;
244         int idx, i;
245
246         for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
247                 vq = &vs->vqs[i].vq;
248
249                 mutex_lock(&vq->mutex);
250
251                 /* store old infight */
252                 idx = vs->vqs[i].inflight_idx;
253                 if (old_inflight)
254                         old_inflight[i] = &vs->vqs[i].inflights[idx];
255
256                 /* setup new infight */
257                 vs->vqs[i].inflight_idx = idx ^ 1;
258                 new_inflight = &vs->vqs[i].inflights[idx ^ 1];
259                 kref_init(&new_inflight->kref);
260                 init_completion(&new_inflight->comp);
261
262                 mutex_unlock(&vq->mutex);
263         }
264 }
265
266 static struct vhost_scsi_inflight *
267 vhost_scsi_get_inflight(struct vhost_virtqueue *vq)
268 {
269         struct vhost_scsi_inflight *inflight;
270         struct vhost_scsi_virtqueue *svq;
271
272         svq = container_of(vq, struct vhost_scsi_virtqueue, vq);
273         inflight = &svq->inflights[svq->inflight_idx];
274         kref_get(&inflight->kref);
275
276         return inflight;
277 }
278
279 static void vhost_scsi_put_inflight(struct vhost_scsi_inflight *inflight)
280 {
281         kref_put(&inflight->kref, vhost_scsi_done_inflight);
282 }
283
284 static int vhost_scsi_check_true(struct se_portal_group *se_tpg)
285 {
286         return 1;
287 }
288
289 static int vhost_scsi_check_false(struct se_portal_group *se_tpg)
290 {
291         return 0;
292 }
293
294 static char *vhost_scsi_get_fabric_wwn(struct se_portal_group *se_tpg)
295 {
296         struct vhost_scsi_tpg *tpg = container_of(se_tpg,
297                                 struct vhost_scsi_tpg, se_tpg);
298         struct vhost_scsi_tport *tport = tpg->tport;
299
300         return &tport->tport_name[0];
301 }
302
303 static u16 vhost_scsi_get_tpgt(struct se_portal_group *se_tpg)
304 {
305         struct vhost_scsi_tpg *tpg = container_of(se_tpg,
306                                 struct vhost_scsi_tpg, se_tpg);
307         return tpg->tport_tpgt;
308 }
309
310 static int vhost_scsi_check_prot_fabric_only(struct se_portal_group *se_tpg)
311 {
312         struct vhost_scsi_tpg *tpg = container_of(se_tpg,
313                                 struct vhost_scsi_tpg, se_tpg);
314
315         return tpg->tv_fabric_prot_type;
316 }
317
318 static u32 vhost_scsi_tpg_get_inst_index(struct se_portal_group *se_tpg)
319 {
320         return 1;
321 }
322
323 static void vhost_scsi_release_cmd_res(struct se_cmd *se_cmd)
324 {
325         struct vhost_scsi_cmd *tv_cmd = container_of(se_cmd,
326                                 struct vhost_scsi_cmd, tvc_se_cmd);
327         struct se_session *se_sess = tv_cmd->tvc_nexus->tvn_se_sess;
328         int i;
329
330         if (tv_cmd->tvc_sgl_count) {
331                 for (i = 0; i < tv_cmd->tvc_sgl_count; i++)
332                         put_page(sg_page(&tv_cmd->tvc_sgl[i]));
333         }
334         if (tv_cmd->tvc_prot_sgl_count) {
335                 for (i = 0; i < tv_cmd->tvc_prot_sgl_count; i++)
336                         put_page(sg_page(&tv_cmd->tvc_prot_sgl[i]));
337         }
338
339         vhost_scsi_put_inflight(tv_cmd->inflight);
340         target_free_tag(se_sess, se_cmd);
341 }
342
343 static void vhost_scsi_release_cmd(struct se_cmd *se_cmd)
344 {
345         struct vhost_scsi_cmd *cmd = container_of(se_cmd,
346                                         struct vhost_scsi_cmd, tvc_se_cmd);
347         struct vhost_scsi *vs = cmd->tvc_vhost;
348
349         llist_add(&cmd->tvc_completion_list, &vs->vs_completion_list);
350         vhost_work_queue(&vs->dev, &vs->vs_completion_work);
351 }
352
353 static u32 vhost_scsi_sess_get_index(struct se_session *se_sess)
354 {
355         return 0;
356 }
357
358 static int vhost_scsi_write_pending(struct se_cmd *se_cmd)
359 {
360         /* Go ahead and process the write immediately */
361         target_execute_cmd(se_cmd);
362         return 0;
363 }
364
365 static void vhost_scsi_set_default_node_attrs(struct se_node_acl *nacl)
366 {
367         return;
368 }
369
370 static int vhost_scsi_get_cmd_state(struct se_cmd *se_cmd)
371 {
372         return 0;
373 }
374
375 static int vhost_scsi_queue_data_in(struct se_cmd *se_cmd)
376 {
377         transport_generic_free_cmd(se_cmd, 0);
378         return 0;
379 }
380
381 static int vhost_scsi_queue_status(struct se_cmd *se_cmd)
382 {
383         transport_generic_free_cmd(se_cmd, 0);
384         return 0;
385 }
386
387 static void vhost_scsi_queue_tm_rsp(struct se_cmd *se_cmd)
388 {
389         return;
390 }
391
392 static void vhost_scsi_aborted_task(struct se_cmd *se_cmd)
393 {
394         return;
395 }
396
397 static void vhost_scsi_free_evt(struct vhost_scsi *vs, struct vhost_scsi_evt *evt)
398 {
399         vs->vs_events_nr--;
400         kfree(evt);
401 }
402
403 static struct vhost_scsi_evt *
404 vhost_scsi_allocate_evt(struct vhost_scsi *vs,
405                        u32 event, u32 reason)
406 {
407         struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
408         struct vhost_scsi_evt *evt;
409
410         if (vs->vs_events_nr > VHOST_SCSI_MAX_EVENT) {
411                 vs->vs_events_missed = true;
412                 return NULL;
413         }
414
415         evt = kzalloc(sizeof(*evt), GFP_KERNEL);
416         if (!evt) {
417                 vq_err(vq, "Failed to allocate vhost_scsi_evt\n");
418                 vs->vs_events_missed = true;
419                 return NULL;
420         }
421
422         evt->event.event = cpu_to_vhost32(vq, event);
423         evt->event.reason = cpu_to_vhost32(vq, reason);
424         vs->vs_events_nr++;
425
426         return evt;
427 }
428
429 static int vhost_scsi_check_stop_free(struct se_cmd *se_cmd)
430 {
431         return target_put_sess_cmd(se_cmd);
432 }
433
434 static void
435 vhost_scsi_do_evt_work(struct vhost_scsi *vs, struct vhost_scsi_evt *evt)
436 {
437         struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
438         struct virtio_scsi_event *event = &evt->event;
439         struct virtio_scsi_event __user *eventp;
440         unsigned out, in;
441         int head, ret;
442
443         if (!vq->private_data) {
444                 vs->vs_events_missed = true;
445                 return;
446         }
447
448 again:
449         vhost_disable_notify(&vs->dev, vq);
450         head = vhost_get_vq_desc(vq, vq->iov,
451                         ARRAY_SIZE(vq->iov), &out, &in,
452                         NULL, NULL);
453         if (head < 0) {
454                 vs->vs_events_missed = true;
455                 return;
456         }
457         if (head == vq->num) {
458                 if (vhost_enable_notify(&vs->dev, vq))
459                         goto again;
460                 vs->vs_events_missed = true;
461                 return;
462         }
463
464         if ((vq->iov[out].iov_len != sizeof(struct virtio_scsi_event))) {
465                 vq_err(vq, "Expecting virtio_scsi_event, got %zu bytes\n",
466                                 vq->iov[out].iov_len);
467                 vs->vs_events_missed = true;
468                 return;
469         }
470
471         if (vs->vs_events_missed) {
472                 event->event |= cpu_to_vhost32(vq, VIRTIO_SCSI_T_EVENTS_MISSED);
473                 vs->vs_events_missed = false;
474         }
475
476         eventp = vq->iov[out].iov_base;
477         ret = __copy_to_user(eventp, event, sizeof(*event));
478         if (!ret)
479                 vhost_add_used_and_signal(&vs->dev, vq, head, 0);
480         else
481                 vq_err(vq, "Faulted on vhost_scsi_send_event\n");
482 }
483
484 static void vhost_scsi_evt_work(struct vhost_work *work)
485 {
486         struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
487                                         vs_event_work);
488         struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
489         struct vhost_scsi_evt *evt, *t;
490         struct llist_node *llnode;
491
492         mutex_lock(&vq->mutex);
493         llnode = llist_del_all(&vs->vs_event_list);
494         llist_for_each_entry_safe(evt, t, llnode, list) {
495                 vhost_scsi_do_evt_work(vs, evt);
496                 vhost_scsi_free_evt(vs, evt);
497         }
498         mutex_unlock(&vq->mutex);
499 }
500
501 /* Fill in status and signal that we are done processing this command
502  *
503  * This is scheduled in the vhost work queue so we are called with the owner
504  * process mm and can access the vring.
505  */
506 static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
507 {
508         struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
509                                         vs_completion_work);
510         DECLARE_BITMAP(signal, VHOST_SCSI_MAX_VQ);
511         struct virtio_scsi_cmd_resp v_rsp;
512         struct vhost_scsi_cmd *cmd, *t;
513         struct llist_node *llnode;
514         struct se_cmd *se_cmd;
515         struct iov_iter iov_iter;
516         int ret, vq;
517
518         bitmap_zero(signal, VHOST_SCSI_MAX_VQ);
519         llnode = llist_del_all(&vs->vs_completion_list);
520         llist_for_each_entry_safe(cmd, t, llnode, tvc_completion_list) {
521                 se_cmd = &cmd->tvc_se_cmd;
522
523                 pr_debug("%s tv_cmd %p resid %u status %#02x\n", __func__,
524                         cmd, se_cmd->residual_count, se_cmd->scsi_status);
525
526                 memset(&v_rsp, 0, sizeof(v_rsp));
527                 v_rsp.resid = cpu_to_vhost32(cmd->tvc_vq, se_cmd->residual_count);
528                 /* TODO is status_qualifier field needed? */
529                 v_rsp.status = se_cmd->scsi_status;
530                 v_rsp.sense_len = cpu_to_vhost32(cmd->tvc_vq,
531                                                  se_cmd->scsi_sense_length);
532                 memcpy(v_rsp.sense, cmd->tvc_sense_buf,
533                        se_cmd->scsi_sense_length);
534
535                 iov_iter_init(&iov_iter, READ, &cmd->tvc_resp_iov,
536                               cmd->tvc_in_iovs, sizeof(v_rsp));
537                 ret = copy_to_iter(&v_rsp, sizeof(v_rsp), &iov_iter);
538                 if (likely(ret == sizeof(v_rsp))) {
539                         struct vhost_scsi_virtqueue *q;
540                         vhost_add_used(cmd->tvc_vq, cmd->tvc_vq_desc, 0);
541                         q = container_of(cmd->tvc_vq, struct vhost_scsi_virtqueue, vq);
542                         vq = q - vs->vqs;
543                         __set_bit(vq, signal);
544                 } else
545                         pr_err("Faulted on virtio_scsi_cmd_resp\n");
546
547                 vhost_scsi_release_cmd_res(se_cmd);
548         }
549
550         vq = -1;
551         while ((vq = find_next_bit(signal, VHOST_SCSI_MAX_VQ, vq + 1))
552                 < VHOST_SCSI_MAX_VQ)
553                 vhost_signal(&vs->dev, &vs->vqs[vq].vq);
554 }
555
556 static struct vhost_scsi_cmd *
557 vhost_scsi_get_tag(struct vhost_virtqueue *vq, struct vhost_scsi_tpg *tpg,
558                    unsigned char *cdb, u64 scsi_tag, u16 lun, u8 task_attr,
559                    u32 exp_data_len, int data_direction)
560 {
561         struct vhost_scsi_cmd *cmd;
562         struct vhost_scsi_nexus *tv_nexus;
563         struct se_session *se_sess;
564         struct scatterlist *sg, *prot_sg;
565         struct page **pages;
566         int tag, cpu;
567
568         tv_nexus = tpg->tpg_nexus;
569         if (!tv_nexus) {
570                 pr_err("Unable to locate active struct vhost_scsi_nexus\n");
571                 return ERR_PTR(-EIO);
572         }
573         se_sess = tv_nexus->tvn_se_sess;
574
575         tag = sbitmap_queue_get(&se_sess->sess_tag_pool, &cpu);
576         if (tag < 0) {
577                 pr_err("Unable to obtain tag for vhost_scsi_cmd\n");
578                 return ERR_PTR(-ENOMEM);
579         }
580
581         cmd = &((struct vhost_scsi_cmd *)se_sess->sess_cmd_map)[tag];
582         sg = cmd->tvc_sgl;
583         prot_sg = cmd->tvc_prot_sgl;
584         pages = cmd->tvc_upages;
585         memset(cmd, 0, sizeof(*cmd));
586         cmd->tvc_sgl = sg;
587         cmd->tvc_prot_sgl = prot_sg;
588         cmd->tvc_upages = pages;
589         cmd->tvc_se_cmd.map_tag = tag;
590         cmd->tvc_se_cmd.map_cpu = cpu;
591         cmd->tvc_tag = scsi_tag;
592         cmd->tvc_lun = lun;
593         cmd->tvc_task_attr = task_attr;
594         cmd->tvc_exp_data_len = exp_data_len;
595         cmd->tvc_data_direction = data_direction;
596         cmd->tvc_nexus = tv_nexus;
597         cmd->inflight = vhost_scsi_get_inflight(vq);
598
599         memcpy(cmd->tvc_cdb, cdb, VHOST_SCSI_MAX_CDB_SIZE);
600
601         return cmd;
602 }
603
604 /*
605  * Map a user memory range into a scatterlist
606  *
607  * Returns the number of scatterlist entries used or -errno on error.
608  */
609 static int
610 vhost_scsi_map_to_sgl(struct vhost_scsi_cmd *cmd,
611                       struct iov_iter *iter,
612                       struct scatterlist *sgl,
613                       bool write)
614 {
615         struct page **pages = cmd->tvc_upages;
616         struct scatterlist *sg = sgl;
617         ssize_t bytes;
618         size_t offset;
619         unsigned int npages = 0;
620
621         bytes = iov_iter_get_pages(iter, pages, LONG_MAX,
622                                 VHOST_SCSI_PREALLOC_UPAGES, &offset);
623         /* No pages were pinned */
624         if (bytes <= 0)
625                 return bytes < 0 ? bytes : -EFAULT;
626
627         iov_iter_advance(iter, bytes);
628
629         while (bytes) {
630                 unsigned n = min_t(unsigned, PAGE_SIZE - offset, bytes);
631                 sg_set_page(sg++, pages[npages++], n, offset);
632                 bytes -= n;
633                 offset = 0;
634         }
635         return npages;
636 }
637
638 static int
639 vhost_scsi_calc_sgls(struct iov_iter *iter, size_t bytes, int max_sgls)
640 {
641         int sgl_count = 0;
642
643         if (!iter || !iter->iov) {
644                 pr_err("%s: iter->iov is NULL, but expected bytes: %zu"
645                        " present\n", __func__, bytes);
646                 return -EINVAL;
647         }
648
649         sgl_count = iov_iter_npages(iter, 0xffff);
650         if (sgl_count > max_sgls) {
651                 pr_err("%s: requested sgl_count: %d exceeds pre-allocated"
652                        " max_sgls: %d\n", __func__, sgl_count, max_sgls);
653                 return -EINVAL;
654         }
655         return sgl_count;
656 }
657
658 static int
659 vhost_scsi_iov_to_sgl(struct vhost_scsi_cmd *cmd, bool write,
660                       struct iov_iter *iter,
661                       struct scatterlist *sg, int sg_count)
662 {
663         struct scatterlist *p = sg;
664         int ret;
665
666         while (iov_iter_count(iter)) {
667                 ret = vhost_scsi_map_to_sgl(cmd, iter, sg, write);
668                 if (ret < 0) {
669                         while (p < sg) {
670                                 struct page *page = sg_page(p++);
671                                 if (page)
672                                         put_page(page);
673                         }
674                         return ret;
675                 }
676                 sg += ret;
677         }
678         return 0;
679 }
680
681 static int
682 vhost_scsi_mapal(struct vhost_scsi_cmd *cmd,
683                  size_t prot_bytes, struct iov_iter *prot_iter,
684                  size_t data_bytes, struct iov_iter *data_iter)
685 {
686         int sgl_count, ret;
687         bool write = (cmd->tvc_data_direction == DMA_FROM_DEVICE);
688
689         if (prot_bytes) {
690                 sgl_count = vhost_scsi_calc_sgls(prot_iter, prot_bytes,
691                                                  VHOST_SCSI_PREALLOC_PROT_SGLS);
692                 if (sgl_count < 0)
693                         return sgl_count;
694
695                 sg_init_table(cmd->tvc_prot_sgl, sgl_count);
696                 cmd->tvc_prot_sgl_count = sgl_count;
697                 pr_debug("%s prot_sg %p prot_sgl_count %u\n", __func__,
698                          cmd->tvc_prot_sgl, cmd->tvc_prot_sgl_count);
699
700                 ret = vhost_scsi_iov_to_sgl(cmd, write, prot_iter,
701                                             cmd->tvc_prot_sgl,
702                                             cmd->tvc_prot_sgl_count);
703                 if (ret < 0) {
704                         cmd->tvc_prot_sgl_count = 0;
705                         return ret;
706                 }
707         }
708         sgl_count = vhost_scsi_calc_sgls(data_iter, data_bytes,
709                                          VHOST_SCSI_PREALLOC_SGLS);
710         if (sgl_count < 0)
711                 return sgl_count;
712
713         sg_init_table(cmd->tvc_sgl, sgl_count);
714         cmd->tvc_sgl_count = sgl_count;
715         pr_debug("%s data_sg %p data_sgl_count %u\n", __func__,
716                   cmd->tvc_sgl, cmd->tvc_sgl_count);
717
718         ret = vhost_scsi_iov_to_sgl(cmd, write, data_iter,
719                                     cmd->tvc_sgl, cmd->tvc_sgl_count);
720         if (ret < 0) {
721                 cmd->tvc_sgl_count = 0;
722                 return ret;
723         }
724         return 0;
725 }
726
727 static int vhost_scsi_to_tcm_attr(int attr)
728 {
729         switch (attr) {
730         case VIRTIO_SCSI_S_SIMPLE:
731                 return TCM_SIMPLE_TAG;
732         case VIRTIO_SCSI_S_ORDERED:
733                 return TCM_ORDERED_TAG;
734         case VIRTIO_SCSI_S_HEAD:
735                 return TCM_HEAD_TAG;
736         case VIRTIO_SCSI_S_ACA:
737                 return TCM_ACA_TAG;
738         default:
739                 break;
740         }
741         return TCM_SIMPLE_TAG;
742 }
743
744 static void vhost_scsi_submission_work(struct work_struct *work)
745 {
746         struct vhost_scsi_cmd *cmd =
747                 container_of(work, struct vhost_scsi_cmd, work);
748         struct vhost_scsi_nexus *tv_nexus;
749         struct se_cmd *se_cmd = &cmd->tvc_se_cmd;
750         struct scatterlist *sg_ptr, *sg_prot_ptr = NULL;
751         int rc;
752
753         /* FIXME: BIDI operation */
754         if (cmd->tvc_sgl_count) {
755                 sg_ptr = cmd->tvc_sgl;
756
757                 if (cmd->tvc_prot_sgl_count)
758                         sg_prot_ptr = cmd->tvc_prot_sgl;
759                 else
760                         se_cmd->prot_pto = true;
761         } else {
762                 sg_ptr = NULL;
763         }
764         tv_nexus = cmd->tvc_nexus;
765
766         se_cmd->tag = 0;
767         rc = target_submit_cmd_map_sgls(se_cmd, tv_nexus->tvn_se_sess,
768                         cmd->tvc_cdb, &cmd->tvc_sense_buf[0],
769                         cmd->tvc_lun, cmd->tvc_exp_data_len,
770                         vhost_scsi_to_tcm_attr(cmd->tvc_task_attr),
771                         cmd->tvc_data_direction, TARGET_SCF_ACK_KREF,
772                         sg_ptr, cmd->tvc_sgl_count, NULL, 0, sg_prot_ptr,
773                         cmd->tvc_prot_sgl_count);
774         if (rc < 0) {
775                 transport_send_check_condition_and_sense(se_cmd,
776                                 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
777                 transport_generic_free_cmd(se_cmd, 0);
778         }
779 }
780
781 static void
782 vhost_scsi_send_bad_target(struct vhost_scsi *vs,
783                            struct vhost_virtqueue *vq,
784                            int head, unsigned out)
785 {
786         struct virtio_scsi_cmd_resp __user *resp;
787         struct virtio_scsi_cmd_resp rsp;
788         int ret;
789
790         memset(&rsp, 0, sizeof(rsp));
791         rsp.response = VIRTIO_SCSI_S_BAD_TARGET;
792         resp = vq->iov[out].iov_base;
793         ret = __copy_to_user(resp, &rsp, sizeof(rsp));
794         if (!ret)
795                 vhost_add_used_and_signal(&vs->dev, vq, head, 0);
796         else
797                 pr_err("Faulted on virtio_scsi_cmd_resp\n");
798 }
799
800 static int
801 vhost_scsi_get_desc(struct vhost_scsi *vs, struct vhost_virtqueue *vq,
802                     struct vhost_scsi_ctx *vc)
803 {
804         int ret = -ENXIO;
805
806         vc->head = vhost_get_vq_desc(vq, vq->iov,
807                                      ARRAY_SIZE(vq->iov), &vc->out, &vc->in,
808                                      NULL, NULL);
809
810         pr_debug("vhost_get_vq_desc: head: %d, out: %u in: %u\n",
811                  vc->head, vc->out, vc->in);
812
813         /* On error, stop handling until the next kick. */
814         if (unlikely(vc->head < 0))
815                 goto done;
816
817         /* Nothing new?  Wait for eventfd to tell us they refilled. */
818         if (vc->head == vq->num) {
819                 if (unlikely(vhost_enable_notify(&vs->dev, vq))) {
820                         vhost_disable_notify(&vs->dev, vq);
821                         ret = -EAGAIN;
822                 }
823                 goto done;
824         }
825
826         /*
827          * Get the size of request and response buffers.
828          * FIXME: Not correct for BIDI operation
829          */
830         vc->out_size = iov_length(vq->iov, vc->out);
831         vc->in_size = iov_length(&vq->iov[vc->out], vc->in);
832
833         /*
834          * Copy over the virtio-scsi request header, which for a
835          * ANY_LAYOUT enabled guest may span multiple iovecs, or a
836          * single iovec may contain both the header + outgoing
837          * WRITE payloads.
838          *
839          * copy_from_iter() will advance out_iter, so that it will
840          * point at the start of the outgoing WRITE payload, if
841          * DMA_TO_DEVICE is set.
842          */
843         iov_iter_init(&vc->out_iter, WRITE, vq->iov, vc->out, vc->out_size);
844         ret = 0;
845
846 done:
847         return ret;
848 }
849
850 static int
851 vhost_scsi_chk_size(struct vhost_virtqueue *vq, struct vhost_scsi_ctx *vc)
852 {
853         if (unlikely(vc->in_size < vc->rsp_size)) {
854                 vq_err(vq,
855                        "Response buf too small, need min %zu bytes got %zu",
856                        vc->rsp_size, vc->in_size);
857                 return -EINVAL;
858         } else if (unlikely(vc->out_size < vc->req_size)) {
859                 vq_err(vq,
860                        "Request buf too small, need min %zu bytes got %zu",
861                        vc->req_size, vc->out_size);
862                 return -EIO;
863         }
864
865         return 0;
866 }
867
868 static int
869 vhost_scsi_get_req(struct vhost_virtqueue *vq, struct vhost_scsi_ctx *vc,
870                    struct vhost_scsi_tpg **tpgp)
871 {
872         int ret = -EIO;
873
874         if (unlikely(!copy_from_iter_full(vc->req, vc->req_size,
875                                           &vc->out_iter))) {
876                 vq_err(vq, "Faulted on copy_from_iter_full\n");
877         } else if (unlikely(*vc->lunp != 1)) {
878                 /* virtio-scsi spec requires byte 0 of the lun to be 1 */
879                 vq_err(vq, "Illegal virtio-scsi lun: %u\n", *vc->lunp);
880         } else {
881                 struct vhost_scsi_tpg **vs_tpg, *tpg;
882
883                 vs_tpg = vq->private_data;      /* validated at handler entry */
884
885                 tpg = READ_ONCE(vs_tpg[*vc->target]);
886                 if (unlikely(!tpg)) {
887                         vq_err(vq, "Target 0x%x does not exist\n", *vc->target);
888                 } else {
889                         if (tpgp)
890                                 *tpgp = tpg;
891                         ret = 0;
892                 }
893         }
894
895         return ret;
896 }
897
898 static void
899 vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
900 {
901         struct vhost_scsi_tpg **vs_tpg, *tpg;
902         struct virtio_scsi_cmd_req v_req;
903         struct virtio_scsi_cmd_req_pi v_req_pi;
904         struct vhost_scsi_ctx vc;
905         struct vhost_scsi_cmd *cmd;
906         struct iov_iter in_iter, prot_iter, data_iter;
907         u64 tag;
908         u32 exp_data_len, data_direction;
909         int ret, prot_bytes, c = 0;
910         u16 lun;
911         u8 task_attr;
912         bool t10_pi = vhost_has_feature(vq, VIRTIO_SCSI_F_T10_PI);
913         void *cdb;
914
915         mutex_lock(&vq->mutex);
916         /*
917          * We can handle the vq only after the endpoint is setup by calling the
918          * VHOST_SCSI_SET_ENDPOINT ioctl.
919          */
920         vs_tpg = vq->private_data;
921         if (!vs_tpg)
922                 goto out;
923
924         memset(&vc, 0, sizeof(vc));
925         vc.rsp_size = sizeof(struct virtio_scsi_cmd_resp);
926
927         vhost_disable_notify(&vs->dev, vq);
928
929         do {
930                 ret = vhost_scsi_get_desc(vs, vq, &vc);
931                 if (ret)
932                         goto err;
933
934                 /*
935                  * Setup pointers and values based upon different virtio-scsi
936                  * request header if T10_PI is enabled in KVM guest.
937                  */
938                 if (t10_pi) {
939                         vc.req = &v_req_pi;
940                         vc.req_size = sizeof(v_req_pi);
941                         vc.lunp = &v_req_pi.lun[0];
942                         vc.target = &v_req_pi.lun[1];
943                 } else {
944                         vc.req = &v_req;
945                         vc.req_size = sizeof(v_req);
946                         vc.lunp = &v_req.lun[0];
947                         vc.target = &v_req.lun[1];
948                 }
949
950                 /*
951                  * Validate the size of request and response buffers.
952                  * Check for a sane response buffer so we can report
953                  * early errors back to the guest.
954                  */
955                 ret = vhost_scsi_chk_size(vq, &vc);
956                 if (ret)
957                         goto err;
958
959                 ret = vhost_scsi_get_req(vq, &vc, &tpg);
960                 if (ret)
961                         goto err;
962
963                 ret = -EIO;     /* bad target on any error from here on */
964
965                 /*
966                  * Determine data_direction by calculating the total outgoing
967                  * iovec sizes + incoming iovec sizes vs. virtio-scsi request +
968                  * response headers respectively.
969                  *
970                  * For DMA_TO_DEVICE this is out_iter, which is already pointing
971                  * to the right place.
972                  *
973                  * For DMA_FROM_DEVICE, the iovec will be just past the end
974                  * of the virtio-scsi response header in either the same
975                  * or immediately following iovec.
976                  *
977                  * Any associated T10_PI bytes for the outgoing / incoming
978                  * payloads are included in calculation of exp_data_len here.
979                  */
980                 prot_bytes = 0;
981
982                 if (vc.out_size > vc.req_size) {
983                         data_direction = DMA_TO_DEVICE;
984                         exp_data_len = vc.out_size - vc.req_size;
985                         data_iter = vc.out_iter;
986                 } else if (vc.in_size > vc.rsp_size) {
987                         data_direction = DMA_FROM_DEVICE;
988                         exp_data_len = vc.in_size - vc.rsp_size;
989
990                         iov_iter_init(&in_iter, READ, &vq->iov[vc.out], vc.in,
991                                       vc.rsp_size + exp_data_len);
992                         iov_iter_advance(&in_iter, vc.rsp_size);
993                         data_iter = in_iter;
994                 } else {
995                         data_direction = DMA_NONE;
996                         exp_data_len = 0;
997                 }
998                 /*
999                  * If T10_PI header + payload is present, setup prot_iter values
1000                  * and recalculate data_iter for vhost_scsi_mapal() mapping to
1001                  * host scatterlists via get_user_pages_fast().
1002                  */
1003                 if (t10_pi) {
1004                         if (v_req_pi.pi_bytesout) {
1005                                 if (data_direction != DMA_TO_DEVICE) {
1006                                         vq_err(vq, "Received non zero pi_bytesout,"
1007                                                 " but wrong data_direction\n");
1008                                         goto err;
1009                                 }
1010                                 prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesout);
1011                         } else if (v_req_pi.pi_bytesin) {
1012                                 if (data_direction != DMA_FROM_DEVICE) {
1013                                         vq_err(vq, "Received non zero pi_bytesin,"
1014                                                 " but wrong data_direction\n");
1015                                         goto err;
1016                                 }
1017                                 prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesin);
1018                         }
1019                         /*
1020                          * Set prot_iter to data_iter and truncate it to
1021                          * prot_bytes, and advance data_iter past any
1022                          * preceeding prot_bytes that may be present.
1023                          *
1024                          * Also fix up the exp_data_len to reflect only the
1025                          * actual data payload length.
1026                          */
1027                         if (prot_bytes) {
1028                                 exp_data_len -= prot_bytes;
1029                                 prot_iter = data_iter;
1030                                 iov_iter_truncate(&prot_iter, prot_bytes);
1031                                 iov_iter_advance(&data_iter, prot_bytes);
1032                         }
1033                         tag = vhost64_to_cpu(vq, v_req_pi.tag);
1034                         task_attr = v_req_pi.task_attr;
1035                         cdb = &v_req_pi.cdb[0];
1036                         lun = ((v_req_pi.lun[2] << 8) | v_req_pi.lun[3]) & 0x3FFF;
1037                 } else {
1038                         tag = vhost64_to_cpu(vq, v_req.tag);
1039                         task_attr = v_req.task_attr;
1040                         cdb = &v_req.cdb[0];
1041                         lun = ((v_req.lun[2] << 8) | v_req.lun[3]) & 0x3FFF;
1042                 }
1043                 /*
1044                  * Check that the received CDB size does not exceeded our
1045                  * hardcoded max for vhost-scsi, then get a pre-allocated
1046                  * cmd descriptor for the new virtio-scsi tag.
1047                  *
1048                  * TODO what if cdb was too small for varlen cdb header?
1049                  */
1050                 if (unlikely(scsi_command_size(cdb) > VHOST_SCSI_MAX_CDB_SIZE)) {
1051                         vq_err(vq, "Received SCSI CDB with command_size: %d that"
1052                                 " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
1053                                 scsi_command_size(cdb), VHOST_SCSI_MAX_CDB_SIZE);
1054                                 goto err;
1055                 }
1056                 cmd = vhost_scsi_get_tag(vq, tpg, cdb, tag, lun, task_attr,
1057                                          exp_data_len + prot_bytes,
1058                                          data_direction);
1059                 if (IS_ERR(cmd)) {
1060                         vq_err(vq, "vhost_scsi_get_tag failed %ld\n",
1061                                PTR_ERR(cmd));
1062                         goto err;
1063                 }
1064                 cmd->tvc_vhost = vs;
1065                 cmd->tvc_vq = vq;
1066                 cmd->tvc_resp_iov = vq->iov[vc.out];
1067                 cmd->tvc_in_iovs = vc.in;
1068
1069                 pr_debug("vhost_scsi got command opcode: %#02x, lun: %d\n",
1070                          cmd->tvc_cdb[0], cmd->tvc_lun);
1071                 pr_debug("cmd: %p exp_data_len: %d, prot_bytes: %d data_direction:"
1072                          " %d\n", cmd, exp_data_len, prot_bytes, data_direction);
1073
1074                 if (data_direction != DMA_NONE) {
1075                         if (unlikely(vhost_scsi_mapal(cmd, prot_bytes,
1076                                                       &prot_iter, exp_data_len,
1077                                                       &data_iter))) {
1078                                 vq_err(vq, "Failed to map iov to sgl\n");
1079                                 vhost_scsi_release_cmd_res(&cmd->tvc_se_cmd);
1080                                 goto err;
1081                         }
1082                 }
1083                 /*
1084                  * Save the descriptor from vhost_get_vq_desc() to be used to
1085                  * complete the virtio-scsi request in TCM callback context via
1086                  * vhost_scsi_queue_data_in() and vhost_scsi_queue_status()
1087                  */
1088                 cmd->tvc_vq_desc = vc.head;
1089                 /*
1090                  * Dispatch cmd descriptor for cmwq execution in process
1091                  * context provided by vhost_scsi_workqueue.  This also ensures
1092                  * cmd is executed on the same kworker CPU as this vhost
1093                  * thread to gain positive L2 cache locality effects.
1094                  */
1095                 INIT_WORK(&cmd->work, vhost_scsi_submission_work);
1096                 queue_work(vhost_scsi_workqueue, &cmd->work);
1097                 ret = 0;
1098 err:
1099                 /*
1100                  * ENXIO:  No more requests, or read error, wait for next kick
1101                  * EINVAL: Invalid response buffer, drop the request
1102                  * EIO:    Respond with bad target
1103                  * EAGAIN: Pending request
1104                  */
1105                 if (ret == -ENXIO)
1106                         break;
1107                 else if (ret == -EIO)
1108                         vhost_scsi_send_bad_target(vs, vq, vc.head, vc.out);
1109         } while (likely(!vhost_exceeds_weight(vq, ++c, 0)));
1110 out:
1111         mutex_unlock(&vq->mutex);
1112 }
1113
1114 static void
1115 vhost_scsi_send_tmf_reject(struct vhost_scsi *vs,
1116                            struct vhost_virtqueue *vq,
1117                            struct vhost_scsi_ctx *vc)
1118 {
1119         struct virtio_scsi_ctrl_tmf_resp rsp;
1120         struct iov_iter iov_iter;
1121         int ret;
1122
1123         pr_debug("%s\n", __func__);
1124         memset(&rsp, 0, sizeof(rsp));
1125         rsp.response = VIRTIO_SCSI_S_FUNCTION_REJECTED;
1126
1127         iov_iter_init(&iov_iter, READ, &vq->iov[vc->out], vc->in, sizeof(rsp));
1128
1129         ret = copy_to_iter(&rsp, sizeof(rsp), &iov_iter);
1130         if (likely(ret == sizeof(rsp)))
1131                 vhost_add_used_and_signal(&vs->dev, vq, vc->head, 0);
1132         else
1133                 pr_err("Faulted on virtio_scsi_ctrl_tmf_resp\n");
1134 }
1135
1136 static void
1137 vhost_scsi_send_an_resp(struct vhost_scsi *vs,
1138                         struct vhost_virtqueue *vq,
1139                         struct vhost_scsi_ctx *vc)
1140 {
1141         struct virtio_scsi_ctrl_an_resp rsp;
1142         struct iov_iter iov_iter;
1143         int ret;
1144
1145         pr_debug("%s\n", __func__);
1146         memset(&rsp, 0, sizeof(rsp));   /* event_actual = 0 */
1147         rsp.response = VIRTIO_SCSI_S_OK;
1148
1149         iov_iter_init(&iov_iter, READ, &vq->iov[vc->out], vc->in, sizeof(rsp));
1150
1151         ret = copy_to_iter(&rsp, sizeof(rsp), &iov_iter);
1152         if (likely(ret == sizeof(rsp)))
1153                 vhost_add_used_and_signal(&vs->dev, vq, vc->head, 0);
1154         else
1155                 pr_err("Faulted on virtio_scsi_ctrl_an_resp\n");
1156 }
1157
1158 static void
1159 vhost_scsi_ctl_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
1160 {
1161         union {
1162                 __virtio32 type;
1163                 struct virtio_scsi_ctrl_an_req an;
1164                 struct virtio_scsi_ctrl_tmf_req tmf;
1165         } v_req;
1166         struct vhost_scsi_ctx vc;
1167         size_t typ_size;
1168         int ret, c = 0;
1169
1170         mutex_lock(&vq->mutex);
1171         /*
1172          * We can handle the vq only after the endpoint is setup by calling the
1173          * VHOST_SCSI_SET_ENDPOINT ioctl.
1174          */
1175         if (!vq->private_data)
1176                 goto out;
1177
1178         memset(&vc, 0, sizeof(vc));
1179
1180         vhost_disable_notify(&vs->dev, vq);
1181
1182         do {
1183                 ret = vhost_scsi_get_desc(vs, vq, &vc);
1184                 if (ret)
1185                         goto err;
1186
1187                 /*
1188                  * Get the request type first in order to setup
1189                  * other parameters dependent on the type.
1190                  */
1191                 vc.req = &v_req.type;
1192                 typ_size = sizeof(v_req.type);
1193
1194                 if (unlikely(!copy_from_iter_full(vc.req, typ_size,
1195                                                   &vc.out_iter))) {
1196                         vq_err(vq, "Faulted on copy_from_iter tmf type\n");
1197                         /*
1198                          * The size of the response buffer depends on the
1199                          * request type and must be validated against it.
1200                          * Since the request type is not known, don't send
1201                          * a response.
1202                          */
1203                         continue;
1204                 }
1205
1206                 switch (vhost32_to_cpu(vq, v_req.type)) {
1207                 case VIRTIO_SCSI_T_TMF:
1208                         vc.req = &v_req.tmf;
1209                         vc.req_size = sizeof(struct virtio_scsi_ctrl_tmf_req);
1210                         vc.rsp_size = sizeof(struct virtio_scsi_ctrl_tmf_resp);
1211                         vc.lunp = &v_req.tmf.lun[0];
1212                         vc.target = &v_req.tmf.lun[1];
1213                         break;
1214                 case VIRTIO_SCSI_T_AN_QUERY:
1215                 case VIRTIO_SCSI_T_AN_SUBSCRIBE:
1216                         vc.req = &v_req.an;
1217                         vc.req_size = sizeof(struct virtio_scsi_ctrl_an_req);
1218                         vc.rsp_size = sizeof(struct virtio_scsi_ctrl_an_resp);
1219                         vc.lunp = &v_req.an.lun[0];
1220                         vc.target = NULL;
1221                         break;
1222                 default:
1223                         vq_err(vq, "Unknown control request %d", v_req.type);
1224                         continue;
1225                 }
1226
1227                 /*
1228                  * Validate the size of request and response buffers.
1229                  * Check for a sane response buffer so we can report
1230                  * early errors back to the guest.
1231                  */
1232                 ret = vhost_scsi_chk_size(vq, &vc);
1233                 if (ret)
1234                         goto err;
1235
1236                 /*
1237                  * Get the rest of the request now that its size is known.
1238                  */
1239                 vc.req += typ_size;
1240                 vc.req_size -= typ_size;
1241
1242                 ret = vhost_scsi_get_req(vq, &vc, NULL);
1243                 if (ret)
1244                         goto err;
1245
1246                 if (v_req.type == VIRTIO_SCSI_T_TMF)
1247                         vhost_scsi_send_tmf_reject(vs, vq, &vc);
1248                 else
1249                         vhost_scsi_send_an_resp(vs, vq, &vc);
1250 err:
1251                 /*
1252                  * ENXIO:  No more requests, or read error, wait for next kick
1253                  * EINVAL: Invalid response buffer, drop the request
1254                  * EIO:    Respond with bad target
1255                  * EAGAIN: Pending request
1256                  */
1257                 if (ret == -ENXIO)
1258                         break;
1259                 else if (ret == -EIO)
1260                         vhost_scsi_send_bad_target(vs, vq, vc.head, vc.out);
1261         } while (likely(!vhost_exceeds_weight(vq, ++c, 0)));
1262 out:
1263         mutex_unlock(&vq->mutex);
1264 }
1265
1266 static void vhost_scsi_ctl_handle_kick(struct vhost_work *work)
1267 {
1268         struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
1269                                                 poll.work);
1270         struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
1271
1272         pr_debug("%s: The handling func for control queue.\n", __func__);
1273         vhost_scsi_ctl_handle_vq(vs, vq);
1274 }
1275
1276 static void
1277 vhost_scsi_send_evt(struct vhost_scsi *vs,
1278                    struct vhost_scsi_tpg *tpg,
1279                    struct se_lun *lun,
1280                    u32 event,
1281                    u32 reason)
1282 {
1283         struct vhost_scsi_evt *evt;
1284
1285         evt = vhost_scsi_allocate_evt(vs, event, reason);
1286         if (!evt)
1287                 return;
1288
1289         if (tpg && lun) {
1290                 /* TODO: share lun setup code with virtio-scsi.ko */
1291                 /*
1292                  * Note: evt->event is zeroed when we allocate it and
1293                  * lun[4-7] need to be zero according to virtio-scsi spec.
1294                  */
1295                 evt->event.lun[0] = 0x01;
1296                 evt->event.lun[1] = tpg->tport_tpgt;
1297                 if (lun->unpacked_lun >= 256)
1298                         evt->event.lun[2] = lun->unpacked_lun >> 8 | 0x40 ;
1299                 evt->event.lun[3] = lun->unpacked_lun & 0xFF;
1300         }
1301
1302         llist_add(&evt->list, &vs->vs_event_list);
1303         vhost_work_queue(&vs->dev, &vs->vs_event_work);
1304 }
1305
1306 static void vhost_scsi_evt_handle_kick(struct vhost_work *work)
1307 {
1308         struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
1309                                                 poll.work);
1310         struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
1311
1312         mutex_lock(&vq->mutex);
1313         if (!vq->private_data)
1314                 goto out;
1315
1316         if (vs->vs_events_missed)
1317                 vhost_scsi_send_evt(vs, NULL, NULL, VIRTIO_SCSI_T_NO_EVENT, 0);
1318 out:
1319         mutex_unlock(&vq->mutex);
1320 }
1321
1322 static void vhost_scsi_handle_kick(struct vhost_work *work)
1323 {
1324         struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
1325                                                 poll.work);
1326         struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
1327
1328         vhost_scsi_handle_vq(vs, vq);
1329 }
1330
1331 static void vhost_scsi_flush_vq(struct vhost_scsi *vs, int index)
1332 {
1333         vhost_poll_flush(&vs->vqs[index].vq.poll);
1334 }
1335
1336 /* Callers must hold dev mutex */
1337 static void vhost_scsi_flush(struct vhost_scsi *vs)
1338 {
1339         struct vhost_scsi_inflight *old_inflight[VHOST_SCSI_MAX_VQ];
1340         int i;
1341
1342         /* Init new inflight and remember the old inflight */
1343         vhost_scsi_init_inflight(vs, old_inflight);
1344
1345         /*
1346          * The inflight->kref was initialized to 1. We decrement it here to
1347          * indicate the start of the flush operation so that it will reach 0
1348          * when all the reqs are finished.
1349          */
1350         for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
1351                 kref_put(&old_inflight[i]->kref, vhost_scsi_done_inflight);
1352
1353         /* Flush both the vhost poll and vhost work */
1354         for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
1355                 vhost_scsi_flush_vq(vs, i);
1356         vhost_work_flush(&vs->dev, &vs->vs_completion_work);
1357         vhost_work_flush(&vs->dev, &vs->vs_event_work);
1358
1359         /* Wait for all reqs issued before the flush to be finished */
1360         for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
1361                 wait_for_completion(&old_inflight[i]->comp);
1362 }
1363
1364 /*
1365  * Called from vhost_scsi_ioctl() context to walk the list of available
1366  * vhost_scsi_tpg with an active struct vhost_scsi_nexus
1367  *
1368  *  The lock nesting rule is:
1369  *    vhost_scsi_mutex -> vs->dev.mutex -> tpg->tv_tpg_mutex -> vq->mutex
1370  */
1371 static int
1372 vhost_scsi_set_endpoint(struct vhost_scsi *vs,
1373                         struct vhost_scsi_target *t)
1374 {
1375         struct se_portal_group *se_tpg;
1376         struct vhost_scsi_tport *tv_tport;
1377         struct vhost_scsi_tpg *tpg;
1378         struct vhost_scsi_tpg **vs_tpg;
1379         struct vhost_virtqueue *vq;
1380         int index, ret, i, len;
1381         bool match = false;
1382
1383         mutex_lock(&vhost_scsi_mutex);
1384         mutex_lock(&vs->dev.mutex);
1385
1386         /* Verify that ring has been setup correctly. */
1387         for (index = 0; index < vs->dev.nvqs; ++index) {
1388                 /* Verify that ring has been setup correctly. */
1389                 if (!vhost_vq_access_ok(&vs->vqs[index].vq)) {
1390                         ret = -EFAULT;
1391                         goto out;
1392                 }
1393         }
1394
1395         len = sizeof(vs_tpg[0]) * VHOST_SCSI_MAX_TARGET;
1396         vs_tpg = kzalloc(len, GFP_KERNEL);
1397         if (!vs_tpg) {
1398                 ret = -ENOMEM;
1399                 goto out;
1400         }
1401         if (vs->vs_tpg)
1402                 memcpy(vs_tpg, vs->vs_tpg, len);
1403
1404         list_for_each_entry(tpg, &vhost_scsi_list, tv_tpg_list) {
1405                 mutex_lock(&tpg->tv_tpg_mutex);
1406                 if (!tpg->tpg_nexus) {
1407                         mutex_unlock(&tpg->tv_tpg_mutex);
1408                         continue;
1409                 }
1410                 if (tpg->tv_tpg_vhost_count != 0) {
1411                         mutex_unlock(&tpg->tv_tpg_mutex);
1412                         continue;
1413                 }
1414                 tv_tport = tpg->tport;
1415
1416                 if (!strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
1417                         if (vs->vs_tpg && vs->vs_tpg[tpg->tport_tpgt]) {
1418                                 kfree(vs_tpg);
1419                                 mutex_unlock(&tpg->tv_tpg_mutex);
1420                                 ret = -EEXIST;
1421                                 goto out;
1422                         }
1423                         /*
1424                          * In order to ensure individual vhost-scsi configfs
1425                          * groups cannot be removed while in use by vhost ioctl,
1426                          * go ahead and take an explicit se_tpg->tpg_group.cg_item
1427                          * dependency now.
1428                          */
1429                         se_tpg = &tpg->se_tpg;
1430                         ret = target_depend_item(&se_tpg->tpg_group.cg_item);
1431                         if (ret) {
1432                                 pr_warn("target_depend_item() failed: %d\n", ret);
1433                                 kfree(vs_tpg);
1434                                 mutex_unlock(&tpg->tv_tpg_mutex);
1435                                 goto out;
1436                         }
1437                         tpg->tv_tpg_vhost_count++;
1438                         tpg->vhost_scsi = vs;
1439                         vs_tpg[tpg->tport_tpgt] = tpg;
1440                         match = true;
1441                 }
1442                 mutex_unlock(&tpg->tv_tpg_mutex);
1443         }
1444
1445         if (match) {
1446                 memcpy(vs->vs_vhost_wwpn, t->vhost_wwpn,
1447                        sizeof(vs->vs_vhost_wwpn));
1448                 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
1449                         vq = &vs->vqs[i].vq;
1450                         mutex_lock(&vq->mutex);
1451                         vq->private_data = vs_tpg;
1452                         vhost_vq_init_access(vq);
1453                         mutex_unlock(&vq->mutex);
1454                 }
1455                 ret = 0;
1456         } else {
1457                 ret = -EEXIST;
1458         }
1459
1460         /*
1461          * Act as synchronize_rcu to make sure access to
1462          * old vs->vs_tpg is finished.
1463          */
1464         vhost_scsi_flush(vs);
1465         kfree(vs->vs_tpg);
1466         vs->vs_tpg = vs_tpg;
1467
1468 out:
1469         mutex_unlock(&vs->dev.mutex);
1470         mutex_unlock(&vhost_scsi_mutex);
1471         return ret;
1472 }
1473
1474 static int
1475 vhost_scsi_clear_endpoint(struct vhost_scsi *vs,
1476                           struct vhost_scsi_target *t)
1477 {
1478         struct se_portal_group *se_tpg;
1479         struct vhost_scsi_tport *tv_tport;
1480         struct vhost_scsi_tpg *tpg;
1481         struct vhost_virtqueue *vq;
1482         bool match = false;
1483         int index, ret, i;
1484         u8 target;
1485
1486         mutex_lock(&vhost_scsi_mutex);
1487         mutex_lock(&vs->dev.mutex);
1488         /* Verify that ring has been setup correctly. */
1489         for (index = 0; index < vs->dev.nvqs; ++index) {
1490                 if (!vhost_vq_access_ok(&vs->vqs[index].vq)) {
1491                         ret = -EFAULT;
1492                         goto err_dev;
1493                 }
1494         }
1495
1496         if (!vs->vs_tpg) {
1497                 ret = 0;
1498                 goto err_dev;
1499         }
1500
1501         for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) {
1502                 target = i;
1503                 tpg = vs->vs_tpg[target];
1504                 if (!tpg)
1505                         continue;
1506
1507                 mutex_lock(&tpg->tv_tpg_mutex);
1508                 tv_tport = tpg->tport;
1509                 if (!tv_tport) {
1510                         ret = -ENODEV;
1511                         goto err_tpg;
1512                 }
1513
1514                 if (strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
1515                         pr_warn("tv_tport->tport_name: %s, tpg->tport_tpgt: %hu"
1516                                 " does not match t->vhost_wwpn: %s, t->vhost_tpgt: %hu\n",
1517                                 tv_tport->tport_name, tpg->tport_tpgt,
1518                                 t->vhost_wwpn, t->vhost_tpgt);
1519                         ret = -EINVAL;
1520                         goto err_tpg;
1521                 }
1522                 tpg->tv_tpg_vhost_count--;
1523                 tpg->vhost_scsi = NULL;
1524                 vs->vs_tpg[target] = NULL;
1525                 match = true;
1526                 mutex_unlock(&tpg->tv_tpg_mutex);
1527                 /*
1528                  * Release se_tpg->tpg_group.cg_item configfs dependency now
1529                  * to allow vhost-scsi WWPN se_tpg->tpg_group shutdown to occur.
1530                  */
1531                 se_tpg = &tpg->se_tpg;
1532                 target_undepend_item(&se_tpg->tpg_group.cg_item);
1533         }
1534         if (match) {
1535                 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
1536                         vq = &vs->vqs[i].vq;
1537                         mutex_lock(&vq->mutex);
1538                         vq->private_data = NULL;
1539                         mutex_unlock(&vq->mutex);
1540                 }
1541         }
1542         /*
1543          * Act as synchronize_rcu to make sure access to
1544          * old vs->vs_tpg is finished.
1545          */
1546         vhost_scsi_flush(vs);
1547         kfree(vs->vs_tpg);
1548         vs->vs_tpg = NULL;
1549         WARN_ON(vs->vs_events_nr);
1550         mutex_unlock(&vs->dev.mutex);
1551         mutex_unlock(&vhost_scsi_mutex);
1552         return 0;
1553
1554 err_tpg:
1555         mutex_unlock(&tpg->tv_tpg_mutex);
1556 err_dev:
1557         mutex_unlock(&vs->dev.mutex);
1558         mutex_unlock(&vhost_scsi_mutex);
1559         return ret;
1560 }
1561
1562 static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features)
1563 {
1564         struct vhost_virtqueue *vq;
1565         int i;
1566
1567         if (features & ~VHOST_SCSI_FEATURES)
1568                 return -EOPNOTSUPP;
1569
1570         mutex_lock(&vs->dev.mutex);
1571         if ((features & (1 << VHOST_F_LOG_ALL)) &&
1572             !vhost_log_access_ok(&vs->dev)) {
1573                 mutex_unlock(&vs->dev.mutex);
1574                 return -EFAULT;
1575         }
1576
1577         for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
1578                 vq = &vs->vqs[i].vq;
1579                 mutex_lock(&vq->mutex);
1580                 vq->acked_features = features;
1581                 mutex_unlock(&vq->mutex);
1582         }
1583         mutex_unlock(&vs->dev.mutex);
1584         return 0;
1585 }
1586
1587 static int vhost_scsi_open(struct inode *inode, struct file *f)
1588 {
1589         struct vhost_scsi *vs;
1590         struct vhost_virtqueue **vqs;
1591         int r = -ENOMEM, i;
1592
1593         vs = kzalloc(sizeof(*vs), GFP_KERNEL | __GFP_NOWARN | __GFP_RETRY_MAYFAIL);
1594         if (!vs) {
1595                 vs = vzalloc(sizeof(*vs));
1596                 if (!vs)
1597                         goto err_vs;
1598         }
1599
1600         vqs = kmalloc_array(VHOST_SCSI_MAX_VQ, sizeof(*vqs), GFP_KERNEL);
1601         if (!vqs)
1602                 goto err_vqs;
1603
1604         vhost_work_init(&vs->vs_completion_work, vhost_scsi_complete_cmd_work);
1605         vhost_work_init(&vs->vs_event_work, vhost_scsi_evt_work);
1606
1607         vs->vs_events_nr = 0;
1608         vs->vs_events_missed = false;
1609
1610         vqs[VHOST_SCSI_VQ_CTL] = &vs->vqs[VHOST_SCSI_VQ_CTL].vq;
1611         vqs[VHOST_SCSI_VQ_EVT] = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
1612         vs->vqs[VHOST_SCSI_VQ_CTL].vq.handle_kick = vhost_scsi_ctl_handle_kick;
1613         vs->vqs[VHOST_SCSI_VQ_EVT].vq.handle_kick = vhost_scsi_evt_handle_kick;
1614         for (i = VHOST_SCSI_VQ_IO; i < VHOST_SCSI_MAX_VQ; i++) {
1615                 vqs[i] = &vs->vqs[i].vq;
1616                 vs->vqs[i].vq.handle_kick = vhost_scsi_handle_kick;
1617         }
1618         vhost_dev_init(&vs->dev, vqs, VHOST_SCSI_MAX_VQ, UIO_MAXIOV,
1619                        VHOST_SCSI_WEIGHT, 0);
1620
1621         vhost_scsi_init_inflight(vs, NULL);
1622
1623         f->private_data = vs;
1624         return 0;
1625
1626 err_vqs:
1627         kvfree(vs);
1628 err_vs:
1629         return r;
1630 }
1631
1632 static int vhost_scsi_release(struct inode *inode, struct file *f)
1633 {
1634         struct vhost_scsi *vs = f->private_data;
1635         struct vhost_scsi_target t;
1636
1637         mutex_lock(&vs->dev.mutex);
1638         memcpy(t.vhost_wwpn, vs->vs_vhost_wwpn, sizeof(t.vhost_wwpn));
1639         mutex_unlock(&vs->dev.mutex);
1640         vhost_scsi_clear_endpoint(vs, &t);
1641         vhost_dev_stop(&vs->dev);
1642         vhost_dev_cleanup(&vs->dev);
1643         /* Jobs can re-queue themselves in evt kick handler. Do extra flush. */
1644         vhost_scsi_flush(vs);
1645         kfree(vs->dev.vqs);
1646         kvfree(vs);
1647         return 0;
1648 }
1649
1650 static long
1651 vhost_scsi_ioctl(struct file *f,
1652                  unsigned int ioctl,
1653                  unsigned long arg)
1654 {
1655         struct vhost_scsi *vs = f->private_data;
1656         struct vhost_scsi_target backend;
1657         void __user *argp = (void __user *)arg;
1658         u64 __user *featurep = argp;
1659         u32 __user *eventsp = argp;
1660         u32 events_missed;
1661         u64 features;
1662         int r, abi_version = VHOST_SCSI_ABI_VERSION;
1663         struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
1664
1665         switch (ioctl) {
1666         case VHOST_SCSI_SET_ENDPOINT:
1667                 if (copy_from_user(&backend, argp, sizeof backend))
1668                         return -EFAULT;
1669                 if (backend.reserved != 0)
1670                         return -EOPNOTSUPP;
1671
1672                 return vhost_scsi_set_endpoint(vs, &backend);
1673         case VHOST_SCSI_CLEAR_ENDPOINT:
1674                 if (copy_from_user(&backend, argp, sizeof backend))
1675                         return -EFAULT;
1676                 if (backend.reserved != 0)
1677                         return -EOPNOTSUPP;
1678
1679                 return vhost_scsi_clear_endpoint(vs, &backend);
1680         case VHOST_SCSI_GET_ABI_VERSION:
1681                 if (copy_to_user(argp, &abi_version, sizeof abi_version))
1682                         return -EFAULT;
1683                 return 0;
1684         case VHOST_SCSI_SET_EVENTS_MISSED:
1685                 if (get_user(events_missed, eventsp))
1686                         return -EFAULT;
1687                 mutex_lock(&vq->mutex);
1688                 vs->vs_events_missed = events_missed;
1689                 mutex_unlock(&vq->mutex);
1690                 return 0;
1691         case VHOST_SCSI_GET_EVENTS_MISSED:
1692                 mutex_lock(&vq->mutex);
1693                 events_missed = vs->vs_events_missed;
1694                 mutex_unlock(&vq->mutex);
1695                 if (put_user(events_missed, eventsp))
1696                         return -EFAULT;
1697                 return 0;
1698         case VHOST_GET_FEATURES:
1699                 features = VHOST_SCSI_FEATURES;
1700                 if (copy_to_user(featurep, &features, sizeof features))
1701                         return -EFAULT;
1702                 return 0;
1703         case VHOST_SET_FEATURES:
1704                 if (copy_from_user(&features, featurep, sizeof features))
1705                         return -EFAULT;
1706                 return vhost_scsi_set_features(vs, features);
1707         default:
1708                 mutex_lock(&vs->dev.mutex);
1709                 r = vhost_dev_ioctl(&vs->dev, ioctl, argp);
1710                 /* TODO: flush backend after dev ioctl. */
1711                 if (r == -ENOIOCTLCMD)
1712                         r = vhost_vring_ioctl(&vs->dev, ioctl, argp);
1713                 mutex_unlock(&vs->dev.mutex);
1714                 return r;
1715         }
1716 }
1717
1718 #ifdef CONFIG_COMPAT
1719 static long vhost_scsi_compat_ioctl(struct file *f, unsigned int ioctl,
1720                                 unsigned long arg)
1721 {
1722         return vhost_scsi_ioctl(f, ioctl, (unsigned long)compat_ptr(arg));
1723 }
1724 #endif
1725
1726 static const struct file_operations vhost_scsi_fops = {
1727         .owner          = THIS_MODULE,
1728         .release        = vhost_scsi_release,
1729         .unlocked_ioctl = vhost_scsi_ioctl,
1730 #ifdef CONFIG_COMPAT
1731         .compat_ioctl   = vhost_scsi_compat_ioctl,
1732 #endif
1733         .open           = vhost_scsi_open,
1734         .llseek         = noop_llseek,
1735 };
1736
1737 static struct miscdevice vhost_scsi_misc = {
1738         MISC_DYNAMIC_MINOR,
1739         "vhost-scsi",
1740         &vhost_scsi_fops,
1741 };
1742
1743 static int __init vhost_scsi_register(void)
1744 {
1745         return misc_register(&vhost_scsi_misc);
1746 }
1747
1748 static void vhost_scsi_deregister(void)
1749 {
1750         misc_deregister(&vhost_scsi_misc);
1751 }
1752
1753 static char *vhost_scsi_dump_proto_id(struct vhost_scsi_tport *tport)
1754 {
1755         switch (tport->tport_proto_id) {
1756         case SCSI_PROTOCOL_SAS:
1757                 return "SAS";
1758         case SCSI_PROTOCOL_FCP:
1759                 return "FCP";
1760         case SCSI_PROTOCOL_ISCSI:
1761                 return "iSCSI";
1762         default:
1763                 break;
1764         }
1765
1766         return "Unknown";
1767 }
1768
1769 static void
1770 vhost_scsi_do_plug(struct vhost_scsi_tpg *tpg,
1771                   struct se_lun *lun, bool plug)
1772 {
1773
1774         struct vhost_scsi *vs = tpg->vhost_scsi;
1775         struct vhost_virtqueue *vq;
1776         u32 reason;
1777
1778         if (!vs)
1779                 return;
1780
1781         mutex_lock(&vs->dev.mutex);
1782
1783         if (plug)
1784                 reason = VIRTIO_SCSI_EVT_RESET_RESCAN;
1785         else
1786                 reason = VIRTIO_SCSI_EVT_RESET_REMOVED;
1787
1788         vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
1789         mutex_lock(&vq->mutex);
1790         if (vhost_has_feature(vq, VIRTIO_SCSI_F_HOTPLUG))
1791                 vhost_scsi_send_evt(vs, tpg, lun,
1792                                    VIRTIO_SCSI_T_TRANSPORT_RESET, reason);
1793         mutex_unlock(&vq->mutex);
1794         mutex_unlock(&vs->dev.mutex);
1795 }
1796
1797 static void vhost_scsi_hotplug(struct vhost_scsi_tpg *tpg, struct se_lun *lun)
1798 {
1799         vhost_scsi_do_plug(tpg, lun, true);
1800 }
1801
1802 static void vhost_scsi_hotunplug(struct vhost_scsi_tpg *tpg, struct se_lun *lun)
1803 {
1804         vhost_scsi_do_plug(tpg, lun, false);
1805 }
1806
1807 static int vhost_scsi_port_link(struct se_portal_group *se_tpg,
1808                                struct se_lun *lun)
1809 {
1810         struct vhost_scsi_tpg *tpg = container_of(se_tpg,
1811                                 struct vhost_scsi_tpg, se_tpg);
1812
1813         mutex_lock(&vhost_scsi_mutex);
1814
1815         mutex_lock(&tpg->tv_tpg_mutex);
1816         tpg->tv_tpg_port_count++;
1817         mutex_unlock(&tpg->tv_tpg_mutex);
1818
1819         vhost_scsi_hotplug(tpg, lun);
1820
1821         mutex_unlock(&vhost_scsi_mutex);
1822
1823         return 0;
1824 }
1825
1826 static void vhost_scsi_port_unlink(struct se_portal_group *se_tpg,
1827                                   struct se_lun *lun)
1828 {
1829         struct vhost_scsi_tpg *tpg = container_of(se_tpg,
1830                                 struct vhost_scsi_tpg, se_tpg);
1831
1832         mutex_lock(&vhost_scsi_mutex);
1833
1834         mutex_lock(&tpg->tv_tpg_mutex);
1835         tpg->tv_tpg_port_count--;
1836         mutex_unlock(&tpg->tv_tpg_mutex);
1837
1838         vhost_scsi_hotunplug(tpg, lun);
1839
1840         mutex_unlock(&vhost_scsi_mutex);
1841 }
1842
1843 static void vhost_scsi_free_cmd_map_res(struct se_session *se_sess)
1844 {
1845         struct vhost_scsi_cmd *tv_cmd;
1846         unsigned int i;
1847
1848         if (!se_sess->sess_cmd_map)
1849                 return;
1850
1851         for (i = 0; i < VHOST_SCSI_DEFAULT_TAGS; i++) {
1852                 tv_cmd = &((struct vhost_scsi_cmd *)se_sess->sess_cmd_map)[i];
1853
1854                 kfree(tv_cmd->tvc_sgl);
1855                 kfree(tv_cmd->tvc_prot_sgl);
1856                 kfree(tv_cmd->tvc_upages);
1857         }
1858 }
1859
1860 static ssize_t vhost_scsi_tpg_attrib_fabric_prot_type_store(
1861                 struct config_item *item, const char *page, size_t count)
1862 {
1863         struct se_portal_group *se_tpg = attrib_to_tpg(item);
1864         struct vhost_scsi_tpg *tpg = container_of(se_tpg,
1865                                 struct vhost_scsi_tpg, se_tpg);
1866         unsigned long val;
1867         int ret = kstrtoul(page, 0, &val);
1868
1869         if (ret) {
1870                 pr_err("kstrtoul() returned %d for fabric_prot_type\n", ret);
1871                 return ret;
1872         }
1873         if (val != 0 && val != 1 && val != 3) {
1874                 pr_err("Invalid vhost_scsi fabric_prot_type: %lu\n", val);
1875                 return -EINVAL;
1876         }
1877         tpg->tv_fabric_prot_type = val;
1878
1879         return count;
1880 }
1881
1882 static ssize_t vhost_scsi_tpg_attrib_fabric_prot_type_show(
1883                 struct config_item *item, char *page)
1884 {
1885         struct se_portal_group *se_tpg = attrib_to_tpg(item);
1886         struct vhost_scsi_tpg *tpg = container_of(se_tpg,
1887                                 struct vhost_scsi_tpg, se_tpg);
1888
1889         return sprintf(page, "%d\n", tpg->tv_fabric_prot_type);
1890 }
1891
1892 CONFIGFS_ATTR(vhost_scsi_tpg_attrib_, fabric_prot_type);
1893
1894 static struct configfs_attribute *vhost_scsi_tpg_attrib_attrs[] = {
1895         &vhost_scsi_tpg_attrib_attr_fabric_prot_type,
1896         NULL,
1897 };
1898
1899 static int vhost_scsi_nexus_cb(struct se_portal_group *se_tpg,
1900                                struct se_session *se_sess, void *p)
1901 {
1902         struct vhost_scsi_cmd *tv_cmd;
1903         unsigned int i;
1904
1905         for (i = 0; i < VHOST_SCSI_DEFAULT_TAGS; i++) {
1906                 tv_cmd = &((struct vhost_scsi_cmd *)se_sess->sess_cmd_map)[i];
1907
1908                 tv_cmd->tvc_sgl = kcalloc(VHOST_SCSI_PREALLOC_SGLS,
1909                                           sizeof(struct scatterlist),
1910                                           GFP_KERNEL);
1911                 if (!tv_cmd->tvc_sgl) {
1912                         pr_err("Unable to allocate tv_cmd->tvc_sgl\n");
1913                         goto out;
1914                 }
1915
1916                 tv_cmd->tvc_upages = kcalloc(VHOST_SCSI_PREALLOC_UPAGES,
1917                                              sizeof(struct page *),
1918                                              GFP_KERNEL);
1919                 if (!tv_cmd->tvc_upages) {
1920                         pr_err("Unable to allocate tv_cmd->tvc_upages\n");
1921                         goto out;
1922                 }
1923
1924                 tv_cmd->tvc_prot_sgl = kcalloc(VHOST_SCSI_PREALLOC_PROT_SGLS,
1925                                                sizeof(struct scatterlist),
1926                                                GFP_KERNEL);
1927                 if (!tv_cmd->tvc_prot_sgl) {
1928                         pr_err("Unable to allocate tv_cmd->tvc_prot_sgl\n");
1929                         goto out;
1930                 }
1931         }
1932         return 0;
1933 out:
1934         vhost_scsi_free_cmd_map_res(se_sess);
1935         return -ENOMEM;
1936 }
1937
1938 static int vhost_scsi_make_nexus(struct vhost_scsi_tpg *tpg,
1939                                 const char *name)
1940 {
1941         struct vhost_scsi_nexus *tv_nexus;
1942
1943         mutex_lock(&tpg->tv_tpg_mutex);
1944         if (tpg->tpg_nexus) {
1945                 mutex_unlock(&tpg->tv_tpg_mutex);
1946                 pr_debug("tpg->tpg_nexus already exists\n");
1947                 return -EEXIST;
1948         }
1949
1950         tv_nexus = kzalloc(sizeof(*tv_nexus), GFP_KERNEL);
1951         if (!tv_nexus) {
1952                 mutex_unlock(&tpg->tv_tpg_mutex);
1953                 pr_err("Unable to allocate struct vhost_scsi_nexus\n");
1954                 return -ENOMEM;
1955         }
1956         /*
1957          * Since we are running in 'demo mode' this call with generate a
1958          * struct se_node_acl for the vhost_scsi struct se_portal_group with
1959          * the SCSI Initiator port name of the passed configfs group 'name'.
1960          */
1961         tv_nexus->tvn_se_sess = target_setup_session(&tpg->se_tpg,
1962                                         VHOST_SCSI_DEFAULT_TAGS,
1963                                         sizeof(struct vhost_scsi_cmd),
1964                                         TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS,
1965                                         (unsigned char *)name, tv_nexus,
1966                                         vhost_scsi_nexus_cb);
1967         if (IS_ERR(tv_nexus->tvn_se_sess)) {
1968                 mutex_unlock(&tpg->tv_tpg_mutex);
1969                 kfree(tv_nexus);
1970                 return -ENOMEM;
1971         }
1972         tpg->tpg_nexus = tv_nexus;
1973
1974         mutex_unlock(&tpg->tv_tpg_mutex);
1975         return 0;
1976 }
1977
1978 static int vhost_scsi_drop_nexus(struct vhost_scsi_tpg *tpg)
1979 {
1980         struct se_session *se_sess;
1981         struct vhost_scsi_nexus *tv_nexus;
1982
1983         mutex_lock(&tpg->tv_tpg_mutex);
1984         tv_nexus = tpg->tpg_nexus;
1985         if (!tv_nexus) {
1986                 mutex_unlock(&tpg->tv_tpg_mutex);
1987                 return -ENODEV;
1988         }
1989
1990         se_sess = tv_nexus->tvn_se_sess;
1991         if (!se_sess) {
1992                 mutex_unlock(&tpg->tv_tpg_mutex);
1993                 return -ENODEV;
1994         }
1995
1996         if (tpg->tv_tpg_port_count != 0) {
1997                 mutex_unlock(&tpg->tv_tpg_mutex);
1998                 pr_err("Unable to remove TCM_vhost I_T Nexus with"
1999                         " active TPG port count: %d\n",
2000                         tpg->tv_tpg_port_count);
2001                 return -EBUSY;
2002         }
2003
2004         if (tpg->tv_tpg_vhost_count != 0) {
2005                 mutex_unlock(&tpg->tv_tpg_mutex);
2006                 pr_err("Unable to remove TCM_vhost I_T Nexus with"
2007                         " active TPG vhost count: %d\n",
2008                         tpg->tv_tpg_vhost_count);
2009                 return -EBUSY;
2010         }
2011
2012         pr_debug("TCM_vhost_ConfigFS: Removing I_T Nexus to emulated"
2013                 " %s Initiator Port: %s\n", vhost_scsi_dump_proto_id(tpg->tport),
2014                 tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
2015
2016         vhost_scsi_free_cmd_map_res(se_sess);
2017         /*
2018          * Release the SCSI I_T Nexus to the emulated vhost Target Port
2019          */
2020         target_remove_session(se_sess);
2021         tpg->tpg_nexus = NULL;
2022         mutex_unlock(&tpg->tv_tpg_mutex);
2023
2024         kfree(tv_nexus);
2025         return 0;
2026 }
2027
2028 static ssize_t vhost_scsi_tpg_nexus_show(struct config_item *item, char *page)
2029 {
2030         struct se_portal_group *se_tpg = to_tpg(item);
2031         struct vhost_scsi_tpg *tpg = container_of(se_tpg,
2032                                 struct vhost_scsi_tpg, se_tpg);
2033         struct vhost_scsi_nexus *tv_nexus;
2034         ssize_t ret;
2035
2036         mutex_lock(&tpg->tv_tpg_mutex);
2037         tv_nexus = tpg->tpg_nexus;
2038         if (!tv_nexus) {
2039                 mutex_unlock(&tpg->tv_tpg_mutex);
2040                 return -ENODEV;
2041         }
2042         ret = snprintf(page, PAGE_SIZE, "%s\n",
2043                         tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
2044         mutex_unlock(&tpg->tv_tpg_mutex);
2045
2046         return ret;
2047 }
2048
2049 static ssize_t vhost_scsi_tpg_nexus_store(struct config_item *item,
2050                 const char *page, size_t count)
2051 {
2052         struct se_portal_group *se_tpg = to_tpg(item);
2053         struct vhost_scsi_tpg *tpg = container_of(se_tpg,
2054                                 struct vhost_scsi_tpg, se_tpg);
2055         struct vhost_scsi_tport *tport_wwn = tpg->tport;
2056         unsigned char i_port[VHOST_SCSI_NAMELEN], *ptr, *port_ptr;
2057         int ret;
2058         /*
2059          * Shutdown the active I_T nexus if 'NULL' is passed..
2060          */
2061         if (!strncmp(page, "NULL", 4)) {
2062                 ret = vhost_scsi_drop_nexus(tpg);
2063                 return (!ret) ? count : ret;
2064         }
2065         /*
2066          * Otherwise make sure the passed virtual Initiator port WWN matches
2067          * the fabric protocol_id set in vhost_scsi_make_tport(), and call
2068          * vhost_scsi_make_nexus().
2069          */
2070         if (strlen(page) >= VHOST_SCSI_NAMELEN) {
2071                 pr_err("Emulated NAA Sas Address: %s, exceeds"
2072                                 " max: %d\n", page, VHOST_SCSI_NAMELEN);
2073                 return -EINVAL;
2074         }
2075         snprintf(&i_port[0], VHOST_SCSI_NAMELEN, "%s", page);
2076
2077         ptr = strstr(i_port, "naa.");
2078         if (ptr) {
2079                 if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_SAS) {
2080                         pr_err("Passed SAS Initiator Port %s does not"
2081                                 " match target port protoid: %s\n", i_port,
2082                                 vhost_scsi_dump_proto_id(tport_wwn));
2083                         return -EINVAL;
2084                 }
2085                 port_ptr = &i_port[0];
2086                 goto check_newline;
2087         }
2088         ptr = strstr(i_port, "fc.");
2089         if (ptr) {
2090                 if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_FCP) {
2091                         pr_err("Passed FCP Initiator Port %s does not"
2092                                 " match target port protoid: %s\n", i_port,
2093                                 vhost_scsi_dump_proto_id(tport_wwn));
2094                         return -EINVAL;
2095                 }
2096                 port_ptr = &i_port[3]; /* Skip over "fc." */
2097                 goto check_newline;
2098         }
2099         ptr = strstr(i_port, "iqn.");
2100         if (ptr) {
2101                 if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_ISCSI) {
2102                         pr_err("Passed iSCSI Initiator Port %s does not"
2103                                 " match target port protoid: %s\n", i_port,
2104                                 vhost_scsi_dump_proto_id(tport_wwn));
2105                         return -EINVAL;
2106                 }
2107                 port_ptr = &i_port[0];
2108                 goto check_newline;
2109         }
2110         pr_err("Unable to locate prefix for emulated Initiator Port:"
2111                         " %s\n", i_port);
2112         return -EINVAL;
2113         /*
2114          * Clear any trailing newline for the NAA WWN
2115          */
2116 check_newline:
2117         if (i_port[strlen(i_port)-1] == '\n')
2118                 i_port[strlen(i_port)-1] = '\0';
2119
2120         ret = vhost_scsi_make_nexus(tpg, port_ptr);
2121         if (ret < 0)
2122                 return ret;
2123
2124         return count;
2125 }
2126
2127 CONFIGFS_ATTR(vhost_scsi_tpg_, nexus);
2128
2129 static struct configfs_attribute *vhost_scsi_tpg_attrs[] = {
2130         &vhost_scsi_tpg_attr_nexus,
2131         NULL,
2132 };
2133
2134 static struct se_portal_group *
2135 vhost_scsi_make_tpg(struct se_wwn *wwn, const char *name)
2136 {
2137         struct vhost_scsi_tport *tport = container_of(wwn,
2138                         struct vhost_scsi_tport, tport_wwn);
2139
2140         struct vhost_scsi_tpg *tpg;
2141         u16 tpgt;
2142         int ret;
2143
2144         if (strstr(name, "tpgt_") != name)
2145                 return ERR_PTR(-EINVAL);
2146         if (kstrtou16(name + 5, 10, &tpgt) || tpgt >= VHOST_SCSI_MAX_TARGET)
2147                 return ERR_PTR(-EINVAL);
2148
2149         tpg = kzalloc(sizeof(*tpg), GFP_KERNEL);
2150         if (!tpg) {
2151                 pr_err("Unable to allocate struct vhost_scsi_tpg");
2152                 return ERR_PTR(-ENOMEM);
2153         }
2154         mutex_init(&tpg->tv_tpg_mutex);
2155         INIT_LIST_HEAD(&tpg->tv_tpg_list);
2156         tpg->tport = tport;
2157         tpg->tport_tpgt = tpgt;
2158
2159         ret = core_tpg_register(wwn, &tpg->se_tpg, tport->tport_proto_id);
2160         if (ret < 0) {
2161                 kfree(tpg);
2162                 return NULL;
2163         }
2164         mutex_lock(&vhost_scsi_mutex);
2165         list_add_tail(&tpg->tv_tpg_list, &vhost_scsi_list);
2166         mutex_unlock(&vhost_scsi_mutex);
2167
2168         return &tpg->se_tpg;
2169 }
2170
2171 static void vhost_scsi_drop_tpg(struct se_portal_group *se_tpg)
2172 {
2173         struct vhost_scsi_tpg *tpg = container_of(se_tpg,
2174                                 struct vhost_scsi_tpg, se_tpg);
2175
2176         mutex_lock(&vhost_scsi_mutex);
2177         list_del(&tpg->tv_tpg_list);
2178         mutex_unlock(&vhost_scsi_mutex);
2179         /*
2180          * Release the virtual I_T Nexus for this vhost TPG
2181          */
2182         vhost_scsi_drop_nexus(tpg);
2183         /*
2184          * Deregister the se_tpg from TCM..
2185          */
2186         core_tpg_deregister(se_tpg);
2187         kfree(tpg);
2188 }
2189
2190 static struct se_wwn *
2191 vhost_scsi_make_tport(struct target_fabric_configfs *tf,
2192                      struct config_group *group,
2193                      const char *name)
2194 {
2195         struct vhost_scsi_tport *tport;
2196         char *ptr;
2197         u64 wwpn = 0;
2198         int off = 0;
2199
2200         /* if (vhost_scsi_parse_wwn(name, &wwpn, 1) < 0)
2201                 return ERR_PTR(-EINVAL); */
2202
2203         tport = kzalloc(sizeof(*tport), GFP_KERNEL);
2204         if (!tport) {
2205                 pr_err("Unable to allocate struct vhost_scsi_tport");
2206                 return ERR_PTR(-ENOMEM);
2207         }
2208         tport->tport_wwpn = wwpn;
2209         /*
2210          * Determine the emulated Protocol Identifier and Target Port Name
2211          * based on the incoming configfs directory name.
2212          */
2213         ptr = strstr(name, "naa.");
2214         if (ptr) {
2215                 tport->tport_proto_id = SCSI_PROTOCOL_SAS;
2216                 goto check_len;
2217         }
2218         ptr = strstr(name, "fc.");
2219         if (ptr) {
2220                 tport->tport_proto_id = SCSI_PROTOCOL_FCP;
2221                 off = 3; /* Skip over "fc." */
2222                 goto check_len;
2223         }
2224         ptr = strstr(name, "iqn.");
2225         if (ptr) {
2226                 tport->tport_proto_id = SCSI_PROTOCOL_ISCSI;
2227                 goto check_len;
2228         }
2229
2230         pr_err("Unable to locate prefix for emulated Target Port:"
2231                         " %s\n", name);
2232         kfree(tport);
2233         return ERR_PTR(-EINVAL);
2234
2235 check_len:
2236         if (strlen(name) >= VHOST_SCSI_NAMELEN) {
2237                 pr_err("Emulated %s Address: %s, exceeds"
2238                         " max: %d\n", name, vhost_scsi_dump_proto_id(tport),
2239                         VHOST_SCSI_NAMELEN);
2240                 kfree(tport);
2241                 return ERR_PTR(-EINVAL);
2242         }
2243         snprintf(&tport->tport_name[0], VHOST_SCSI_NAMELEN, "%s", &name[off]);
2244
2245         pr_debug("TCM_VHost_ConfigFS: Allocated emulated Target"
2246                 " %s Address: %s\n", vhost_scsi_dump_proto_id(tport), name);
2247
2248         return &tport->tport_wwn;
2249 }
2250
2251 static void vhost_scsi_drop_tport(struct se_wwn *wwn)
2252 {
2253         struct vhost_scsi_tport *tport = container_of(wwn,
2254                                 struct vhost_scsi_tport, tport_wwn);
2255
2256         pr_debug("TCM_VHost_ConfigFS: Deallocating emulated Target"
2257                 " %s Address: %s\n", vhost_scsi_dump_proto_id(tport),
2258                 tport->tport_name);
2259
2260         kfree(tport);
2261 }
2262
2263 static ssize_t
2264 vhost_scsi_wwn_version_show(struct config_item *item, char *page)
2265 {
2266         return sprintf(page, "TCM_VHOST fabric module %s on %s/%s"
2267                 "on "UTS_RELEASE"\n", VHOST_SCSI_VERSION, utsname()->sysname,
2268                 utsname()->machine);
2269 }
2270
2271 CONFIGFS_ATTR_RO(vhost_scsi_wwn_, version);
2272
2273 static struct configfs_attribute *vhost_scsi_wwn_attrs[] = {
2274         &vhost_scsi_wwn_attr_version,
2275         NULL,
2276 };
2277
2278 static const struct target_core_fabric_ops vhost_scsi_ops = {
2279         .module                         = THIS_MODULE,
2280         .fabric_name                    = "vhost",
2281         .max_data_sg_nents              = VHOST_SCSI_PREALLOC_SGLS,
2282         .tpg_get_wwn                    = vhost_scsi_get_fabric_wwn,
2283         .tpg_get_tag                    = vhost_scsi_get_tpgt,
2284         .tpg_check_demo_mode            = vhost_scsi_check_true,
2285         .tpg_check_demo_mode_cache      = vhost_scsi_check_true,
2286         .tpg_check_demo_mode_write_protect = vhost_scsi_check_false,
2287         .tpg_check_prod_mode_write_protect = vhost_scsi_check_false,
2288         .tpg_check_prot_fabric_only     = vhost_scsi_check_prot_fabric_only,
2289         .tpg_get_inst_index             = vhost_scsi_tpg_get_inst_index,
2290         .release_cmd                    = vhost_scsi_release_cmd,
2291         .check_stop_free                = vhost_scsi_check_stop_free,
2292         .sess_get_index                 = vhost_scsi_sess_get_index,
2293         .sess_get_initiator_sid         = NULL,
2294         .write_pending                  = vhost_scsi_write_pending,
2295         .set_default_node_attributes    = vhost_scsi_set_default_node_attrs,
2296         .get_cmd_state                  = vhost_scsi_get_cmd_state,
2297         .queue_data_in                  = vhost_scsi_queue_data_in,
2298         .queue_status                   = vhost_scsi_queue_status,
2299         .queue_tm_rsp                   = vhost_scsi_queue_tm_rsp,
2300         .aborted_task                   = vhost_scsi_aborted_task,
2301         /*
2302          * Setup callers for generic logic in target_core_fabric_configfs.c
2303          */
2304         .fabric_make_wwn                = vhost_scsi_make_tport,
2305         .fabric_drop_wwn                = vhost_scsi_drop_tport,
2306         .fabric_make_tpg                = vhost_scsi_make_tpg,
2307         .fabric_drop_tpg                = vhost_scsi_drop_tpg,
2308         .fabric_post_link               = vhost_scsi_port_link,
2309         .fabric_pre_unlink              = vhost_scsi_port_unlink,
2310
2311         .tfc_wwn_attrs                  = vhost_scsi_wwn_attrs,
2312         .tfc_tpg_base_attrs             = vhost_scsi_tpg_attrs,
2313         .tfc_tpg_attrib_attrs           = vhost_scsi_tpg_attrib_attrs,
2314 };
2315
2316 static int __init vhost_scsi_init(void)
2317 {
2318         int ret = -ENOMEM;
2319
2320         pr_debug("TCM_VHOST fabric module %s on %s/%s"
2321                 " on "UTS_RELEASE"\n", VHOST_SCSI_VERSION, utsname()->sysname,
2322                 utsname()->machine);
2323
2324         /*
2325          * Use our own dedicated workqueue for submitting I/O into
2326          * target core to avoid contention within system_wq.
2327          */
2328         vhost_scsi_workqueue = alloc_workqueue("vhost_scsi", 0, 0);
2329         if (!vhost_scsi_workqueue)
2330                 goto out;
2331
2332         ret = vhost_scsi_register();
2333         if (ret < 0)
2334                 goto out_destroy_workqueue;
2335
2336         ret = target_register_template(&vhost_scsi_ops);
2337         if (ret < 0)
2338                 goto out_vhost_scsi_deregister;
2339
2340         return 0;
2341
2342 out_vhost_scsi_deregister:
2343         vhost_scsi_deregister();
2344 out_destroy_workqueue:
2345         destroy_workqueue(vhost_scsi_workqueue);
2346 out:
2347         return ret;
2348 };
2349
2350 static void vhost_scsi_exit(void)
2351 {
2352         target_unregister_template(&vhost_scsi_ops);
2353         vhost_scsi_deregister();
2354         destroy_workqueue(vhost_scsi_workqueue);
2355 };
2356
2357 MODULE_DESCRIPTION("VHOST_SCSI series fabric driver");
2358 MODULE_ALIAS("tcm_vhost");
2359 MODULE_LICENSE("GPL");
2360 module_init(vhost_scsi_init);
2361 module_exit(vhost_scsi_exit);