GNU Linux-libre 4.14.332-gnu1
[releases.git] / drivers / scsi / sg.c
1 /*
2  *  History:
3  *  Started: Aug 9 by Lawrence Foard (entropy@world.std.com),
4  *           to allow user process control of SCSI devices.
5  *  Development Sponsored by Killy Corp. NY NY
6  *
7  * Original driver (sg.c):
8  *        Copyright (C) 1992 Lawrence Foard
9  * Version 2 and 3 extensions to driver:
10  *        Copyright (C) 1998 - 2014 Douglas Gilbert
11  *
12  * This program is free software; you can redistribute it and/or modify
13  * it under the terms of the GNU General Public License as published by
14  * the Free Software Foundation; either version 2, or (at your option)
15  * any later version.
16  *
17  */
18
19 static int sg_version_num = 30536;      /* 2 digits for each component */
20 #define SG_VERSION_STR "3.5.36"
21
22 /*
23  *  D. P. Gilbert (dgilbert@interlog.com), notes:
24  *      - scsi logging is available via SCSI_LOG_TIMEOUT macros. First
25  *        the kernel/module needs to be built with CONFIG_SCSI_LOGGING
26  *        (otherwise the macros compile to empty statements).
27  *
28  */
29 #include <linux/module.h>
30
31 #include <linux/fs.h>
32 #include <linux/kernel.h>
33 #include <linux/sched.h>
34 #include <linux/string.h>
35 #include <linux/mm.h>
36 #include <linux/errno.h>
37 #include <linux/mtio.h>
38 #include <linux/ioctl.h>
39 #include <linux/slab.h>
40 #include <linux/fcntl.h>
41 #include <linux/init.h>
42 #include <linux/poll.h>
43 #include <linux/moduleparam.h>
44 #include <linux/cdev.h>
45 #include <linux/idr.h>
46 #include <linux/seq_file.h>
47 #include <linux/blkdev.h>
48 #include <linux/delay.h>
49 #include <linux/blktrace_api.h>
50 #include <linux/mutex.h>
51 #include <linux/atomic.h>
52 #include <linux/ratelimit.h>
53 #include <linux/uio.h>
54 #include <linux/cred.h> /* for sg_check_file_access() */
55
56 #include "scsi.h"
57 #include <scsi/scsi_dbg.h>
58 #include <scsi/scsi_host.h>
59 #include <scsi/scsi_driver.h>
60 #include <scsi/scsi_ioctl.h>
61 #include <scsi/sg.h>
62
63 #include "scsi_logging.h"
64
65 #ifdef CONFIG_SCSI_PROC_FS
66 #include <linux/proc_fs.h>
67 static char *sg_version_date = "20140603";
68
69 static int sg_proc_init(void);
70 static void sg_proc_cleanup(void);
71 #endif
72
73 #define SG_ALLOW_DIO_DEF 0
74
75 #define SG_MAX_DEVS 32768
76
77 /* SG_MAX_CDB_SIZE should be 260 (spc4r37 section 3.1.30) however the type
78  * of sg_io_hdr::cmd_len can only represent 255. All SCSI commands greater
79  * than 16 bytes are "variable length" whose length is a multiple of 4
80  */
81 #define SG_MAX_CDB_SIZE 252
82
83 #define SG_DEFAULT_TIMEOUT mult_frac(SG_DEFAULT_TIMEOUT_USER, HZ, USER_HZ)
84
85 int sg_big_buff = SG_DEF_RESERVED_SIZE;
86 /* N.B. This variable is readable and writeable via
87    /proc/scsi/sg/def_reserved_size . Each time sg_open() is called a buffer
88    of this size (or less if there is not enough memory) will be reserved
89    for use by this file descriptor. [Deprecated usage: this variable is also
90    readable via /proc/sys/kernel/sg-big-buff if the sg driver is built into
91    the kernel (i.e. it is not a module).] */
92 static int def_reserved_size = -1;      /* picks up init parameter */
93 static int sg_allow_dio = SG_ALLOW_DIO_DEF;
94
95 static int scatter_elem_sz = SG_SCATTER_SZ;
96 static int scatter_elem_sz_prev = SG_SCATTER_SZ;
97
98 #define SG_SECTOR_SZ 512
99
100 static int sg_add_device(struct device *, struct class_interface *);
101 static void sg_remove_device(struct device *, struct class_interface *);
102
103 static DEFINE_IDR(sg_index_idr);
104 static DEFINE_RWLOCK(sg_index_lock);    /* Also used to lock
105                                                            file descriptor list for device */
106
107 static struct class_interface sg_interface = {
108         .add_dev        = sg_add_device,
109         .remove_dev     = sg_remove_device,
110 };
111
112 typedef struct sg_scatter_hold { /* holding area for scsi scatter gather info */
113         unsigned short k_use_sg; /* Count of kernel scatter-gather pieces */
114         unsigned sglist_len; /* size of malloc'd scatter-gather list ++ */
115         unsigned bufflen;       /* Size of (aggregate) data buffer */
116         struct page **pages;
117         int page_order;
118         char dio_in_use;        /* 0->indirect IO (or mmap), 1->dio */
119         unsigned char cmd_opcode; /* first byte of command */
120 } Sg_scatter_hold;
121
122 struct sg_device;               /* forward declarations */
123 struct sg_fd;
124
125 typedef struct sg_request {     /* SG_MAX_QUEUE requests outstanding per file */
126         struct list_head entry; /* list entry */
127         struct sg_fd *parentfp; /* NULL -> not in use */
128         Sg_scatter_hold data;   /* hold buffer, perhaps scatter list */
129         sg_io_hdr_t header;     /* scsi command+info, see <scsi/sg.h> */
130         unsigned char sense_b[SCSI_SENSE_BUFFERSIZE];
131         char res_used;          /* 1 -> using reserve buffer, 0 -> not ... */
132         char orphan;            /* 1 -> drop on sight, 0 -> normal */
133         char sg_io_owned;       /* 1 -> packet belongs to SG_IO */
134         /* done protected by rq_list_lock */
135         char done;              /* 0->before bh, 1->before read, 2->read */
136         struct request *rq;
137         struct bio *bio;
138         struct execute_work ew;
139 } Sg_request;
140
141 typedef struct sg_fd {          /* holds the state of a file descriptor */
142         struct list_head sfd_siblings;  /* protected by device's sfd_lock */
143         struct sg_device *parentdp;     /* owning device */
144         wait_queue_head_t read_wait;    /* queue read until command done */
145         rwlock_t rq_list_lock;  /* protect access to list in req_arr */
146         struct mutex f_mutex;   /* protect against changes in this fd */
147         int timeout;            /* defaults to SG_DEFAULT_TIMEOUT      */
148         int timeout_user;       /* defaults to SG_DEFAULT_TIMEOUT_USER */
149         Sg_scatter_hold reserve;        /* buffer held for this file descriptor */
150         struct list_head rq_list; /* head of request list */
151         struct fasync_struct *async_qp; /* used by asynchronous notification */
152         Sg_request req_arr[SG_MAX_QUEUE];       /* used as singly-linked list */
153         char force_packid;      /* 1 -> pack_id input to read(), 0 -> ignored */
154         char cmd_q;             /* 1 -> allow command queuing, 0 -> don't */
155         unsigned char next_cmd_len; /* 0: automatic, >0: use on next write() */
156         char keep_orphan;       /* 0 -> drop orphan (def), 1 -> keep for read() */
157         char mmap_called;       /* 0 -> mmap() never called on this fd */
158         char res_in_use;        /* 1 -> 'reserve' array in use */
159         struct kref f_ref;
160         struct execute_work ew;
161 } Sg_fd;
162
163 typedef struct sg_device { /* holds the state of each scsi generic device */
164         struct scsi_device *device;
165         wait_queue_head_t open_wait;    /* queue open() when O_EXCL present */
166         struct mutex open_rel_lock;     /* held when in open() or release() */
167         int sg_tablesize;       /* adapter's max scatter-gather table size */
168         u32 index;              /* device index number */
169         struct list_head sfds;
170         rwlock_t sfd_lock;      /* protect access to sfd list */
171         atomic_t detaching;     /* 0->device usable, 1->device detaching */
172         bool exclude;           /* 1->open(O_EXCL) succeeded and is active */
173         int open_cnt;           /* count of opens (perhaps < num(sfds) ) */
174         char sgdebug;           /* 0->off, 1->sense, 9->dump dev, 10-> all devs */
175         struct gendisk *disk;
176         struct cdev * cdev;     /* char_dev [sysfs: /sys/cdev/major/sg<n>] */
177         struct kref d_ref;
178 } Sg_device;
179
180 /* tasklet or soft irq callback */
181 static void sg_rq_end_io(struct request *rq, blk_status_t status);
182 static int sg_start_req(Sg_request *srp, unsigned char *cmd);
183 static int sg_finish_rem_req(Sg_request * srp);
184 static int sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size);
185 static ssize_t sg_new_read(Sg_fd * sfp, char __user *buf, size_t count,
186                            Sg_request * srp);
187 static ssize_t sg_new_write(Sg_fd *sfp, struct file *file,
188                         const char __user *buf, size_t count, int blocking,
189                         int read_only, int sg_io_owned, Sg_request **o_srp);
190 static int sg_common_write(Sg_fd * sfp, Sg_request * srp,
191                            unsigned char *cmnd, int timeout, int blocking);
192 static int sg_read_oxfer(Sg_request * srp, char __user *outp, int num_read_xfer);
193 static void sg_remove_scat(Sg_fd * sfp, Sg_scatter_hold * schp);
194 static void sg_build_reserve(Sg_fd * sfp, int req_size);
195 static void sg_link_reserve(Sg_fd * sfp, Sg_request * srp, int size);
196 static void sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp);
197 static Sg_fd *sg_add_sfp(Sg_device * sdp);
198 static void sg_remove_sfp(struct kref *);
199 static Sg_request *sg_get_rq_mark(Sg_fd * sfp, int pack_id, bool *busy);
200 static Sg_request *sg_add_request(Sg_fd * sfp);
201 static int sg_remove_request(Sg_fd * sfp, Sg_request * srp);
202 static Sg_device *sg_get_dev(int dev);
203 static void sg_device_destroy(struct kref *kref);
204
205 #define SZ_SG_HEADER sizeof(struct sg_header)
206 #define SZ_SG_IO_HDR sizeof(sg_io_hdr_t)
207 #define SZ_SG_IOVEC sizeof(sg_iovec_t)
208 #define SZ_SG_REQ_INFO sizeof(sg_req_info_t)
209
210 #define sg_printk(prefix, sdp, fmt, a...) \
211         sdev_prefix_printk(prefix, (sdp)->device,               \
212                            (sdp)->disk->disk_name, fmt, ##a)
213
214 /*
215  * The SCSI interfaces that use read() and write() as an asynchronous variant of
216  * ioctl(..., SG_IO, ...) are fundamentally unsafe, since there are lots of ways
217  * to trigger read() and write() calls from various contexts with elevated
218  * privileges. This can lead to kernel memory corruption (e.g. if these
219  * interfaces are called through splice()) and privilege escalation inside
220  * userspace (e.g. if a process with access to such a device passes a file
221  * descriptor to a SUID binary as stdin/stdout/stderr).
222  *
223  * This function provides protection for the legacy API by restricting the
224  * calling context.
225  */
226 static int sg_check_file_access(struct file *filp, const char *caller)
227 {
228         if (filp->f_cred != current_real_cred()) {
229                 pr_err_once("%s: process %d (%s) changed security contexts after opening file descriptor, this is not allowed.\n",
230                         caller, task_tgid_vnr(current), current->comm);
231                 return -EPERM;
232         }
233         if (uaccess_kernel()) {
234                 pr_err_once("%s: process %d (%s) called from kernel context, this is not allowed.\n",
235                         caller, task_tgid_vnr(current), current->comm);
236                 return -EACCES;
237         }
238         return 0;
239 }
240
241 static int sg_allow_access(struct file *filp, unsigned char *cmd)
242 {
243         struct sg_fd *sfp = filp->private_data;
244
245         if (sfp->parentdp->device->type == TYPE_SCANNER)
246                 return 0;
247
248         return blk_verify_command(cmd, filp->f_mode & FMODE_WRITE);
249 }
250
251 static int
252 open_wait(Sg_device *sdp, int flags)
253 {
254         int retval = 0;
255
256         if (flags & O_EXCL) {
257                 while (sdp->open_cnt > 0) {
258                         mutex_unlock(&sdp->open_rel_lock);
259                         retval = wait_event_interruptible(sdp->open_wait,
260                                         (atomic_read(&sdp->detaching) ||
261                                          !sdp->open_cnt));
262                         mutex_lock(&sdp->open_rel_lock);
263
264                         if (retval) /* -ERESTARTSYS */
265                                 return retval;
266                         if (atomic_read(&sdp->detaching))
267                                 return -ENODEV;
268                 }
269         } else {
270                 while (sdp->exclude) {
271                         mutex_unlock(&sdp->open_rel_lock);
272                         retval = wait_event_interruptible(sdp->open_wait,
273                                         (atomic_read(&sdp->detaching) ||
274                                          !sdp->exclude));
275                         mutex_lock(&sdp->open_rel_lock);
276
277                         if (retval) /* -ERESTARTSYS */
278                                 return retval;
279                         if (atomic_read(&sdp->detaching))
280                                 return -ENODEV;
281                 }
282         }
283
284         return retval;
285 }
286
287 /* Returns 0 on success, else a negated errno value */
288 static int
289 sg_open(struct inode *inode, struct file *filp)
290 {
291         int dev = iminor(inode);
292         int flags = filp->f_flags;
293         struct request_queue *q;
294         Sg_device *sdp;
295         Sg_fd *sfp;
296         int retval;
297
298         nonseekable_open(inode, filp);
299         if ((flags & O_EXCL) && (O_RDONLY == (flags & O_ACCMODE)))
300                 return -EPERM; /* Can't lock it with read only access */
301         sdp = sg_get_dev(dev);
302         if (IS_ERR(sdp))
303                 return PTR_ERR(sdp);
304
305         SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp,
306                                       "sg_open: flags=0x%x\n", flags));
307
308         /* This driver's module count bumped by fops_get in <linux/fs.h> */
309         /* Prevent the device driver from vanishing while we sleep */
310         retval = scsi_device_get(sdp->device);
311         if (retval)
312                 goto sg_put;
313
314         retval = scsi_autopm_get_device(sdp->device);
315         if (retval)
316                 goto sdp_put;
317
318         /* scsi_block_when_processing_errors() may block so bypass
319          * check if O_NONBLOCK. Permits SCSI commands to be issued
320          * during error recovery. Tread carefully. */
321         if (!((flags & O_NONBLOCK) ||
322               scsi_block_when_processing_errors(sdp->device))) {
323                 retval = -ENXIO;
324                 /* we are in error recovery for this device */
325                 goto error_out;
326         }
327
328         mutex_lock(&sdp->open_rel_lock);
329         if (flags & O_NONBLOCK) {
330                 if (flags & O_EXCL) {
331                         if (sdp->open_cnt > 0) {
332                                 retval = -EBUSY;
333                                 goto error_mutex_locked;
334                         }
335                 } else {
336                         if (sdp->exclude) {
337                                 retval = -EBUSY;
338                                 goto error_mutex_locked;
339                         }
340                 }
341         } else {
342                 retval = open_wait(sdp, flags);
343                 if (retval) /* -ERESTARTSYS or -ENODEV */
344                         goto error_mutex_locked;
345         }
346
347         /* N.B. at this point we are holding the open_rel_lock */
348         if (flags & O_EXCL)
349                 sdp->exclude = true;
350
351         if (sdp->open_cnt < 1) {  /* no existing opens */
352                 sdp->sgdebug = 0;
353                 q = sdp->device->request_queue;
354                 sdp->sg_tablesize = queue_max_segments(q);
355         }
356         sfp = sg_add_sfp(sdp);
357         if (IS_ERR(sfp)) {
358                 retval = PTR_ERR(sfp);
359                 goto out_undo;
360         }
361
362         filp->private_data = sfp;
363         sdp->open_cnt++;
364         mutex_unlock(&sdp->open_rel_lock);
365
366         retval = 0;
367 sg_put:
368         kref_put(&sdp->d_ref, sg_device_destroy);
369         return retval;
370
371 out_undo:
372         if (flags & O_EXCL) {
373                 sdp->exclude = false;   /* undo if error */
374                 wake_up_interruptible(&sdp->open_wait);
375         }
376 error_mutex_locked:
377         mutex_unlock(&sdp->open_rel_lock);
378 error_out:
379         scsi_autopm_put_device(sdp->device);
380 sdp_put:
381         scsi_device_put(sdp->device);
382         goto sg_put;
383 }
384
385 /* Release resources associated with a successful sg_open()
386  * Returns 0 on success, else a negated errno value */
387 static int
388 sg_release(struct inode *inode, struct file *filp)
389 {
390         Sg_device *sdp;
391         Sg_fd *sfp;
392
393         if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
394                 return -ENXIO;
395         SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp, "sg_release\n"));
396
397         mutex_lock(&sdp->open_rel_lock);
398         scsi_autopm_put_device(sdp->device);
399         kref_put(&sfp->f_ref, sg_remove_sfp);
400         sdp->open_cnt--;
401
402         /* possibly many open()s waiting on exlude clearing, start many;
403          * only open(O_EXCL)s wait on 0==open_cnt so only start one */
404         if (sdp->exclude) {
405                 sdp->exclude = false;
406                 wake_up_interruptible_all(&sdp->open_wait);
407         } else if (0 == sdp->open_cnt) {
408                 wake_up_interruptible(&sdp->open_wait);
409         }
410         mutex_unlock(&sdp->open_rel_lock);
411         return 0;
412 }
413
414 static ssize_t
415 sg_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos)
416 {
417         Sg_device *sdp;
418         Sg_fd *sfp;
419         Sg_request *srp;
420         int req_pack_id = -1;
421         bool busy;
422         sg_io_hdr_t *hp;
423         struct sg_header *old_hdr = NULL;
424         int retval = 0;
425
426         /*
427          * This could cause a response to be stranded. Close the associated
428          * file descriptor to free up any resources being held.
429          */
430         retval = sg_check_file_access(filp, __func__);
431         if (retval)
432                 return retval;
433
434         if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
435                 return -ENXIO;
436         SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp,
437                                       "sg_read: count=%d\n", (int) count));
438
439         if (!access_ok(VERIFY_WRITE, buf, count))
440                 return -EFAULT;
441         if (sfp->force_packid && (count >= SZ_SG_HEADER)) {
442                 old_hdr = kmalloc(SZ_SG_HEADER, GFP_KERNEL);
443                 if (!old_hdr)
444                         return -ENOMEM;
445                 if (__copy_from_user(old_hdr, buf, SZ_SG_HEADER)) {
446                         retval = -EFAULT;
447                         goto free_old_hdr;
448                 }
449                 if (old_hdr->reply_len < 0) {
450                         if (count >= SZ_SG_IO_HDR) {
451                                 sg_io_hdr_t *new_hdr;
452                                 new_hdr = kmalloc(SZ_SG_IO_HDR, GFP_KERNEL);
453                                 if (!new_hdr) {
454                                         retval = -ENOMEM;
455                                         goto free_old_hdr;
456                                 }
457                                 retval =__copy_from_user
458                                     (new_hdr, buf, SZ_SG_IO_HDR);
459                                 req_pack_id = new_hdr->pack_id;
460                                 kfree(new_hdr);
461                                 if (retval) {
462                                         retval = -EFAULT;
463                                         goto free_old_hdr;
464                                 }
465                         }
466                 } else
467                         req_pack_id = old_hdr->pack_id;
468         }
469         srp = sg_get_rq_mark(sfp, req_pack_id, &busy);
470         if (!srp) {             /* now wait on packet to arrive */
471                 if (filp->f_flags & O_NONBLOCK) {
472                         retval = -EAGAIN;
473                         goto free_old_hdr;
474                 }
475                 retval = wait_event_interruptible(sfp->read_wait,
476                         ((srp = sg_get_rq_mark(sfp, req_pack_id, &busy)) ||
477                         (!busy && atomic_read(&sdp->detaching))));
478                 if (!srp) {
479                         /* signal or detaching */
480                         if (!retval)
481                                 retval = -ENODEV;
482                         goto free_old_hdr;
483                 }
484         }
485         if (srp->header.interface_id != '\0') {
486                 retval = sg_new_read(sfp, buf, count, srp);
487                 goto free_old_hdr;
488         }
489
490         hp = &srp->header;
491         if (old_hdr == NULL) {
492                 old_hdr = kmalloc(SZ_SG_HEADER, GFP_KERNEL);
493                 if (! old_hdr) {
494                         retval = -ENOMEM;
495                         goto free_old_hdr;
496                 }
497         }
498         memset(old_hdr, 0, SZ_SG_HEADER);
499         old_hdr->reply_len = (int) hp->timeout;
500         old_hdr->pack_len = old_hdr->reply_len; /* old, strange behaviour */
501         old_hdr->pack_id = hp->pack_id;
502         old_hdr->twelve_byte =
503             ((srp->data.cmd_opcode >= 0xc0) && (12 == hp->cmd_len)) ? 1 : 0;
504         old_hdr->target_status = hp->masked_status;
505         old_hdr->host_status = hp->host_status;
506         old_hdr->driver_status = hp->driver_status;
507         if ((CHECK_CONDITION & hp->masked_status) ||
508             (DRIVER_SENSE & hp->driver_status))
509                 memcpy(old_hdr->sense_buffer, srp->sense_b,
510                        sizeof (old_hdr->sense_buffer));
511         switch (hp->host_status) {
512         /* This setup of 'result' is for backward compatibility and is best
513            ignored by the user who should use target, host + driver status */
514         case DID_OK:
515         case DID_PASSTHROUGH:
516         case DID_SOFT_ERROR:
517                 old_hdr->result = 0;
518                 break;
519         case DID_NO_CONNECT:
520         case DID_BUS_BUSY:
521         case DID_TIME_OUT:
522                 old_hdr->result = EBUSY;
523                 break;
524         case DID_BAD_TARGET:
525         case DID_ABORT:
526         case DID_PARITY:
527         case DID_RESET:
528         case DID_BAD_INTR:
529                 old_hdr->result = EIO;
530                 break;
531         case DID_ERROR:
532                 old_hdr->result = (srp->sense_b[0] == 0 && 
533                                   hp->masked_status == GOOD) ? 0 : EIO;
534                 break;
535         default:
536                 old_hdr->result = EIO;
537                 break;
538         }
539
540         /* Now copy the result back to the user buffer.  */
541         if (count >= SZ_SG_HEADER) {
542                 if (__copy_to_user(buf, old_hdr, SZ_SG_HEADER)) {
543                         retval = -EFAULT;
544                         goto free_old_hdr;
545                 }
546                 buf += SZ_SG_HEADER;
547                 if (count > old_hdr->reply_len)
548                         count = old_hdr->reply_len;
549                 if (count > SZ_SG_HEADER) {
550                         if (sg_read_oxfer(srp, buf, count - SZ_SG_HEADER)) {
551                                 retval = -EFAULT;
552                                 goto free_old_hdr;
553                         }
554                 }
555         } else
556                 count = (old_hdr->result == 0) ? 0 : -EIO;
557         sg_finish_rem_req(srp);
558         sg_remove_request(sfp, srp);
559         retval = count;
560 free_old_hdr:
561         kfree(old_hdr);
562         return retval;
563 }
564
565 static ssize_t
566 sg_new_read(Sg_fd * sfp, char __user *buf, size_t count, Sg_request * srp)
567 {
568         sg_io_hdr_t *hp = &srp->header;
569         int err = 0, err2;
570         int len;
571
572         if (count < SZ_SG_IO_HDR) {
573                 err = -EINVAL;
574                 goto err_out;
575         }
576         hp->sb_len_wr = 0;
577         if ((hp->mx_sb_len > 0) && hp->sbp) {
578                 if ((CHECK_CONDITION & hp->masked_status) ||
579                     (DRIVER_SENSE & hp->driver_status)) {
580                         int sb_len = SCSI_SENSE_BUFFERSIZE;
581                         sb_len = (hp->mx_sb_len > sb_len) ? sb_len : hp->mx_sb_len;
582                         len = 8 + (int) srp->sense_b[7];        /* Additional sense length field */
583                         len = (len > sb_len) ? sb_len : len;
584                         if (copy_to_user(hp->sbp, srp->sense_b, len)) {
585                                 err = -EFAULT;
586                                 goto err_out;
587                         }
588                         hp->sb_len_wr = len;
589                 }
590         }
591         if (hp->masked_status || hp->host_status || hp->driver_status)
592                 hp->info |= SG_INFO_CHECK;
593         if (copy_to_user(buf, hp, SZ_SG_IO_HDR)) {
594                 err = -EFAULT;
595                 goto err_out;
596         }
597 err_out:
598         err2 = sg_finish_rem_req(srp);
599         sg_remove_request(sfp, srp);
600         return err ? : err2 ? : count;
601 }
602
603 static ssize_t
604 sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
605 {
606         int mxsize, cmd_size, k;
607         int input_size, blocking;
608         unsigned char opcode;
609         Sg_device *sdp;
610         Sg_fd *sfp;
611         Sg_request *srp;
612         struct sg_header old_hdr;
613         sg_io_hdr_t *hp;
614         unsigned char cmnd[SG_MAX_CDB_SIZE];
615         int retval;
616
617         retval = sg_check_file_access(filp, __func__);
618         if (retval)
619                 return retval;
620
621         if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
622                 return -ENXIO;
623         SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp,
624                                       "sg_write: count=%d\n", (int) count));
625         if (atomic_read(&sdp->detaching))
626                 return -ENODEV;
627         if (!((filp->f_flags & O_NONBLOCK) ||
628               scsi_block_when_processing_errors(sdp->device)))
629                 return -ENXIO;
630
631         if (!access_ok(VERIFY_READ, buf, count))
632                 return -EFAULT; /* protects following copy_from_user()s + get_user()s */
633         if (count < SZ_SG_HEADER)
634                 return -EIO;
635         if (__copy_from_user(&old_hdr, buf, SZ_SG_HEADER))
636                 return -EFAULT;
637         blocking = !(filp->f_flags & O_NONBLOCK);
638         if (old_hdr.reply_len < 0)
639                 return sg_new_write(sfp, filp, buf, count,
640                                     blocking, 0, 0, NULL);
641         if (count < (SZ_SG_HEADER + 6))
642                 return -EIO;    /* The minimum scsi command length is 6 bytes. */
643
644         if (!(srp = sg_add_request(sfp))) {
645                 SCSI_LOG_TIMEOUT(1, sg_printk(KERN_INFO, sdp,
646                                               "sg_write: queue full\n"));
647                 return -EDOM;
648         }
649         buf += SZ_SG_HEADER;
650         __get_user(opcode, buf);
651         mutex_lock(&sfp->f_mutex);
652         if (sfp->next_cmd_len > 0) {
653                 cmd_size = sfp->next_cmd_len;
654                 sfp->next_cmd_len = 0;  /* reset so only this write() effected */
655         } else {
656                 cmd_size = COMMAND_SIZE(opcode);        /* based on SCSI command group */
657                 if ((opcode >= 0xc0) && old_hdr.twelve_byte)
658                         cmd_size = 12;
659         }
660         mutex_unlock(&sfp->f_mutex);
661         SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sdp,
662                 "sg_write:   scsi opcode=0x%02x, cmd_size=%d\n", (int) opcode, cmd_size));
663 /* Determine buffer size.  */
664         input_size = count - cmd_size;
665         mxsize = (input_size > old_hdr.reply_len) ? input_size : old_hdr.reply_len;
666         mxsize -= SZ_SG_HEADER;
667         input_size -= SZ_SG_HEADER;
668         if (input_size < 0) {
669                 sg_remove_request(sfp, srp);
670                 return -EIO;    /* User did not pass enough bytes for this command. */
671         }
672         hp = &srp->header;
673         hp->interface_id = '\0';        /* indicator of old interface tunnelled */
674         hp->cmd_len = (unsigned char) cmd_size;
675         hp->iovec_count = 0;
676         hp->mx_sb_len = 0;
677         if (input_size > 0)
678                 hp->dxfer_direction = (old_hdr.reply_len > SZ_SG_HEADER) ?
679                     SG_DXFER_TO_FROM_DEV : SG_DXFER_TO_DEV;
680         else
681                 hp->dxfer_direction = (mxsize > 0) ? SG_DXFER_FROM_DEV : SG_DXFER_NONE;
682         hp->dxfer_len = mxsize;
683         if ((hp->dxfer_direction == SG_DXFER_TO_DEV) ||
684             (hp->dxfer_direction == SG_DXFER_TO_FROM_DEV))
685                 hp->dxferp = (char __user *)buf + cmd_size;
686         else
687                 hp->dxferp = NULL;
688         hp->sbp = NULL;
689         hp->timeout = old_hdr.reply_len;        /* structure abuse ... */
690         hp->flags = input_size; /* structure abuse ... */
691         hp->pack_id = old_hdr.pack_id;
692         hp->usr_ptr = NULL;
693         if (__copy_from_user(cmnd, buf, cmd_size)) {
694                 sg_remove_request(sfp, srp);
695                 return -EFAULT;
696         }
697         /*
698          * SG_DXFER_TO_FROM_DEV is functionally equivalent to SG_DXFER_FROM_DEV,
699          * but is is possible that the app intended SG_DXFER_TO_DEV, because there
700          * is a non-zero input_size, so emit a warning.
701          */
702         if (hp->dxfer_direction == SG_DXFER_TO_FROM_DEV) {
703                 printk_ratelimited(KERN_WARNING
704                                    "sg_write: data in/out %d/%d bytes "
705                                    "for SCSI command 0x%x-- guessing "
706                                    "data in;\n   program %s not setting "
707                                    "count and/or reply_len properly\n",
708                                    old_hdr.reply_len - (int)SZ_SG_HEADER,
709                                    input_size, (unsigned int) cmnd[0],
710                                    current->comm);
711         }
712         k = sg_common_write(sfp, srp, cmnd, sfp->timeout, blocking);
713         return (k < 0) ? k : count;
714 }
715
716 static ssize_t
717 sg_new_write(Sg_fd *sfp, struct file *file, const char __user *buf,
718                  size_t count, int blocking, int read_only, int sg_io_owned,
719                  Sg_request **o_srp)
720 {
721         int k;
722         Sg_request *srp;
723         sg_io_hdr_t *hp;
724         unsigned char cmnd[SG_MAX_CDB_SIZE];
725         int timeout;
726         unsigned long ul_timeout;
727
728         if (count < SZ_SG_IO_HDR)
729                 return -EINVAL;
730         if (!access_ok(VERIFY_READ, buf, count))
731                 return -EFAULT; /* protects following copy_from_user()s + get_user()s */
732
733         sfp->cmd_q = 1; /* when sg_io_hdr seen, set command queuing on */
734         if (!(srp = sg_add_request(sfp))) {
735                 SCSI_LOG_TIMEOUT(1, sg_printk(KERN_INFO, sfp->parentdp,
736                                               "sg_new_write: queue full\n"));
737                 return -EDOM;
738         }
739         srp->sg_io_owned = sg_io_owned;
740         hp = &srp->header;
741         if (__copy_from_user(hp, buf, SZ_SG_IO_HDR)) {
742                 sg_remove_request(sfp, srp);
743                 return -EFAULT;
744         }
745         if (hp->interface_id != 'S') {
746                 sg_remove_request(sfp, srp);
747                 return -ENOSYS;
748         }
749         if (hp->flags & SG_FLAG_MMAP_IO) {
750                 if (hp->dxfer_len > sfp->reserve.bufflen) {
751                         sg_remove_request(sfp, srp);
752                         return -ENOMEM; /* MMAP_IO size must fit in reserve buffer */
753                 }
754                 if (hp->flags & SG_FLAG_DIRECT_IO) {
755                         sg_remove_request(sfp, srp);
756                         return -EINVAL; /* either MMAP_IO or DIRECT_IO (not both) */
757                 }
758                 if (sfp->res_in_use) {
759                         sg_remove_request(sfp, srp);
760                         return -EBUSY;  /* reserve buffer already being used */
761                 }
762         }
763         ul_timeout = msecs_to_jiffies(srp->header.timeout);
764         timeout = (ul_timeout < INT_MAX) ? ul_timeout : INT_MAX;
765         if ((!hp->cmdp) || (hp->cmd_len < 6) || (hp->cmd_len > sizeof (cmnd))) {
766                 sg_remove_request(sfp, srp);
767                 return -EMSGSIZE;
768         }
769         if (!access_ok(VERIFY_READ, hp->cmdp, hp->cmd_len)) {
770                 sg_remove_request(sfp, srp);
771                 return -EFAULT; /* protects following copy_from_user()s + get_user()s */
772         }
773         if (__copy_from_user(cmnd, hp->cmdp, hp->cmd_len)) {
774                 sg_remove_request(sfp, srp);
775                 return -EFAULT;
776         }
777         if (read_only && sg_allow_access(file, cmnd)) {
778                 sg_remove_request(sfp, srp);
779                 return -EPERM;
780         }
781         k = sg_common_write(sfp, srp, cmnd, timeout, blocking);
782         if (k < 0)
783                 return k;
784         if (o_srp)
785                 *o_srp = srp;
786         return count;
787 }
788
789 static int
790 sg_common_write(Sg_fd * sfp, Sg_request * srp,
791                 unsigned char *cmnd, int timeout, int blocking)
792 {
793         int k, at_head;
794         Sg_device *sdp = sfp->parentdp;
795         sg_io_hdr_t *hp = &srp->header;
796
797         srp->data.cmd_opcode = cmnd[0]; /* hold opcode of command */
798         hp->status = 0;
799         hp->masked_status = 0;
800         hp->msg_status = 0;
801         hp->info = 0;
802         hp->host_status = 0;
803         hp->driver_status = 0;
804         hp->resid = 0;
805         SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp,
806                         "sg_common_write:  scsi opcode=0x%02x, cmd_size=%d\n",
807                         (int) cmnd[0], (int) hp->cmd_len));
808
809         if (hp->dxfer_len >= SZ_256M) {
810                 sg_remove_request(sfp, srp);
811                 return -EINVAL;
812         }
813
814         k = sg_start_req(srp, cmnd);
815         if (k) {
816                 SCSI_LOG_TIMEOUT(1, sg_printk(KERN_INFO, sfp->parentdp,
817                         "sg_common_write: start_req err=%d\n", k));
818                 sg_finish_rem_req(srp);
819                 sg_remove_request(sfp, srp);
820                 return k;       /* probably out of space --> ENOMEM */
821         }
822         if (atomic_read(&sdp->detaching)) {
823                 if (srp->bio) {
824                         scsi_req_free_cmd(scsi_req(srp->rq));
825                         blk_end_request_all(srp->rq, BLK_STS_IOERR);
826                         srp->rq = NULL;
827                 }
828
829                 sg_finish_rem_req(srp);
830                 sg_remove_request(sfp, srp);
831                 return -ENODEV;
832         }
833
834         hp->duration = jiffies_to_msecs(jiffies);
835         if (hp->interface_id != '\0' && /* v3 (or later) interface */
836             (SG_FLAG_Q_AT_TAIL & hp->flags))
837                 at_head = 0;
838         else
839                 at_head = 1;
840
841         srp->rq->timeout = timeout;
842         kref_get(&sfp->f_ref); /* sg_rq_end_io() does kref_put(). */
843         blk_execute_rq_nowait(sdp->device->request_queue, sdp->disk,
844                               srp->rq, at_head, sg_rq_end_io);
845         return 0;
846 }
847
848 static int srp_done(Sg_fd *sfp, Sg_request *srp)
849 {
850         unsigned long flags;
851         int ret;
852
853         read_lock_irqsave(&sfp->rq_list_lock, flags);
854         ret = srp->done;
855         read_unlock_irqrestore(&sfp->rq_list_lock, flags);
856         return ret;
857 }
858
859 static int max_sectors_bytes(struct request_queue *q)
860 {
861         unsigned int max_sectors = queue_max_sectors(q);
862
863         max_sectors = min_t(unsigned int, max_sectors, INT_MAX >> 9);
864
865         return max_sectors << 9;
866 }
867
868 static void
869 sg_fill_request_table(Sg_fd *sfp, sg_req_info_t *rinfo)
870 {
871         Sg_request *srp;
872         int val;
873         unsigned int ms;
874
875         val = 0;
876         list_for_each_entry(srp, &sfp->rq_list, entry) {
877                 if (val >= SG_MAX_QUEUE)
878                         break;
879                 rinfo[val].req_state = srp->done + 1;
880                 rinfo[val].problem =
881                         srp->header.masked_status &
882                         srp->header.host_status &
883                         srp->header.driver_status;
884                 if (srp->done)
885                         rinfo[val].duration =
886                                 srp->header.duration;
887                 else {
888                         ms = jiffies_to_msecs(jiffies);
889                         rinfo[val].duration =
890                                 (ms > srp->header.duration) ?
891                                 (ms - srp->header.duration) : 0;
892                 }
893                 rinfo[val].orphan = srp->orphan;
894                 rinfo[val].sg_io_owned = srp->sg_io_owned;
895                 rinfo[val].pack_id = srp->header.pack_id;
896                 rinfo[val].usr_ptr = srp->header.usr_ptr;
897                 val++;
898         }
899 }
900
901 static long
902 sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
903 {
904         void __user *p = (void __user *)arg;
905         int __user *ip = p;
906         int result, val, read_only;
907         Sg_device *sdp;
908         Sg_fd *sfp;
909         Sg_request *srp;
910         unsigned long iflags;
911
912         if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
913                 return -ENXIO;
914
915         SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp,
916                                    "sg_ioctl: cmd=0x%x\n", (int) cmd_in));
917         read_only = (O_RDWR != (filp->f_flags & O_ACCMODE));
918
919         switch (cmd_in) {
920         case SG_IO:
921                 if (atomic_read(&sdp->detaching))
922                         return -ENODEV;
923                 if (!scsi_block_when_processing_errors(sdp->device))
924                         return -ENXIO;
925                 if (!access_ok(VERIFY_WRITE, p, SZ_SG_IO_HDR))
926                         return -EFAULT;
927                 result = sg_new_write(sfp, filp, p, SZ_SG_IO_HDR,
928                                  1, read_only, 1, &srp);
929                 if (result < 0)
930                         return result;
931                 result = wait_event_interruptible(sfp->read_wait,
932                         srp_done(sfp, srp));
933                 write_lock_irq(&sfp->rq_list_lock);
934                 if (srp->done) {
935                         srp->done = 2;
936                         write_unlock_irq(&sfp->rq_list_lock);
937                         result = sg_new_read(sfp, p, SZ_SG_IO_HDR, srp);
938                         return (result < 0) ? result : 0;
939                 }
940                 srp->orphan = 1;
941                 write_unlock_irq(&sfp->rq_list_lock);
942                 return result;  /* -ERESTARTSYS because signal hit process */
943         case SG_SET_TIMEOUT:
944                 result = get_user(val, ip);
945                 if (result)
946                         return result;
947                 if (val < 0)
948                         return -EIO;
949                 if (val >= mult_frac((s64)INT_MAX, USER_HZ, HZ))
950                         val = min_t(s64, mult_frac((s64)INT_MAX, USER_HZ, HZ),
951                                     INT_MAX);
952                 sfp->timeout_user = val;
953                 sfp->timeout = mult_frac(val, HZ, USER_HZ);
954
955                 return 0;
956         case SG_GET_TIMEOUT:    /* N.B. User receives timeout as return value */
957                                 /* strange ..., for backward compatibility */
958                 return sfp->timeout_user;
959         case SG_SET_FORCE_LOW_DMA:
960                 /*
961                  * N.B. This ioctl never worked properly, but failed to
962                  * return an error value. So returning '0' to keep compability
963                  * with legacy applications.
964                  */
965                 return 0;
966         case SG_GET_LOW_DMA:
967                 return put_user((int) sdp->device->host->unchecked_isa_dma, ip);
968         case SG_GET_SCSI_ID:
969                 if (!access_ok(VERIFY_WRITE, p, sizeof (sg_scsi_id_t)))
970                         return -EFAULT;
971                 else {
972                         sg_scsi_id_t __user *sg_idp = p;
973
974                         if (atomic_read(&sdp->detaching))
975                                 return -ENODEV;
976                         __put_user((int) sdp->device->host->host_no,
977                                    &sg_idp->host_no);
978                         __put_user((int) sdp->device->channel,
979                                    &sg_idp->channel);
980                         __put_user((int) sdp->device->id, &sg_idp->scsi_id);
981                         __put_user((int) sdp->device->lun, &sg_idp->lun);
982                         __put_user((int) sdp->device->type, &sg_idp->scsi_type);
983                         __put_user((short) sdp->device->host->cmd_per_lun,
984                                    &sg_idp->h_cmd_per_lun);
985                         __put_user((short) sdp->device->queue_depth,
986                                    &sg_idp->d_queue_depth);
987                         __put_user(0, &sg_idp->unused[0]);
988                         __put_user(0, &sg_idp->unused[1]);
989                         return 0;
990                 }
991         case SG_SET_FORCE_PACK_ID:
992                 result = get_user(val, ip);
993                 if (result)
994                         return result;
995                 sfp->force_packid = val ? 1 : 0;
996                 return 0;
997         case SG_GET_PACK_ID:
998                 if (!access_ok(VERIFY_WRITE, ip, sizeof (int)))
999                         return -EFAULT;
1000                 read_lock_irqsave(&sfp->rq_list_lock, iflags);
1001                 list_for_each_entry(srp, &sfp->rq_list, entry) {
1002                         if ((1 == srp->done) && (!srp->sg_io_owned)) {
1003                                 read_unlock_irqrestore(&sfp->rq_list_lock,
1004                                                        iflags);
1005                                 __put_user(srp->header.pack_id, ip);
1006                                 return 0;
1007                         }
1008                 }
1009                 read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
1010                 __put_user(-1, ip);
1011                 return 0;
1012         case SG_GET_NUM_WAITING:
1013                 read_lock_irqsave(&sfp->rq_list_lock, iflags);
1014                 val = 0;
1015                 list_for_each_entry(srp, &sfp->rq_list, entry) {
1016                         if ((1 == srp->done) && (!srp->sg_io_owned))
1017                                 ++val;
1018                 }
1019                 read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
1020                 return put_user(val, ip);
1021         case SG_GET_SG_TABLESIZE:
1022                 return put_user(sdp->sg_tablesize, ip);
1023         case SG_SET_RESERVED_SIZE:
1024                 result = get_user(val, ip);
1025                 if (result)
1026                         return result;
1027                 if (val < 0)
1028                         return -EINVAL;
1029                 val = min_t(int, val,
1030                             max_sectors_bytes(sdp->device->request_queue));
1031                 mutex_lock(&sfp->f_mutex);
1032                 if (val != sfp->reserve.bufflen) {
1033                         if (sfp->mmap_called ||
1034                             sfp->res_in_use) {
1035                                 mutex_unlock(&sfp->f_mutex);
1036                                 return -EBUSY;
1037                         }
1038
1039                         sg_remove_scat(sfp, &sfp->reserve);
1040                         sg_build_reserve(sfp, val);
1041                 }
1042                 mutex_unlock(&sfp->f_mutex);
1043                 return 0;
1044         case SG_GET_RESERVED_SIZE:
1045                 val = min_t(int, sfp->reserve.bufflen,
1046                             max_sectors_bytes(sdp->device->request_queue));
1047                 return put_user(val, ip);
1048         case SG_SET_COMMAND_Q:
1049                 result = get_user(val, ip);
1050                 if (result)
1051                         return result;
1052                 sfp->cmd_q = val ? 1 : 0;
1053                 return 0;
1054         case SG_GET_COMMAND_Q:
1055                 return put_user((int) sfp->cmd_q, ip);
1056         case SG_SET_KEEP_ORPHAN:
1057                 result = get_user(val, ip);
1058                 if (result)
1059                         return result;
1060                 sfp->keep_orphan = val;
1061                 return 0;
1062         case SG_GET_KEEP_ORPHAN:
1063                 return put_user((int) sfp->keep_orphan, ip);
1064         case SG_NEXT_CMD_LEN:
1065                 result = get_user(val, ip);
1066                 if (result)
1067                         return result;
1068                 if (val > SG_MAX_CDB_SIZE)
1069                         return -ENOMEM;
1070                 sfp->next_cmd_len = (val > 0) ? val : 0;
1071                 return 0;
1072         case SG_GET_VERSION_NUM:
1073                 return put_user(sg_version_num, ip);
1074         case SG_GET_ACCESS_COUNT:
1075                 /* faked - we don't have a real access count anymore */
1076                 val = (sdp->device ? 1 : 0);
1077                 return put_user(val, ip);
1078         case SG_GET_REQUEST_TABLE:
1079                 if (!access_ok(VERIFY_WRITE, p, SZ_SG_REQ_INFO * SG_MAX_QUEUE))
1080                         return -EFAULT;
1081                 else {
1082                         sg_req_info_t *rinfo;
1083
1084                         rinfo = kzalloc(SZ_SG_REQ_INFO * SG_MAX_QUEUE,
1085                                         GFP_KERNEL);
1086                         if (!rinfo)
1087                                 return -ENOMEM;
1088                         read_lock_irqsave(&sfp->rq_list_lock, iflags);
1089                         sg_fill_request_table(sfp, rinfo);
1090                         read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
1091                         result = __copy_to_user(p, rinfo,
1092                                                 SZ_SG_REQ_INFO * SG_MAX_QUEUE);
1093                         result = result ? -EFAULT : 0;
1094                         kfree(rinfo);
1095                         return result;
1096                 }
1097         case SG_EMULATED_HOST:
1098                 if (atomic_read(&sdp->detaching))
1099                         return -ENODEV;
1100                 return put_user(sdp->device->host->hostt->emulated, ip);
1101         case SCSI_IOCTL_SEND_COMMAND:
1102                 if (atomic_read(&sdp->detaching))
1103                         return -ENODEV;
1104                 if (read_only) {
1105                         unsigned char opcode = WRITE_6;
1106                         Scsi_Ioctl_Command __user *siocp = p;
1107
1108                         if (copy_from_user(&opcode, siocp->data, 1))
1109                                 return -EFAULT;
1110                         if (sg_allow_access(filp, &opcode))
1111                                 return -EPERM;
1112                 }
1113                 return sg_scsi_ioctl(sdp->device->request_queue, NULL, filp->f_mode, p);
1114         case SG_SET_DEBUG:
1115                 result = get_user(val, ip);
1116                 if (result)
1117                         return result;
1118                 sdp->sgdebug = (char) val;
1119                 return 0;
1120         case BLKSECTGET:
1121                 return put_user(max_sectors_bytes(sdp->device->request_queue),
1122                                 ip);
1123         case BLKTRACESETUP:
1124                 return blk_trace_setup(sdp->device->request_queue,
1125                                        sdp->disk->disk_name,
1126                                        MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
1127                                        NULL, p);
1128         case BLKTRACESTART:
1129                 return blk_trace_startstop(sdp->device->request_queue, 1);
1130         case BLKTRACESTOP:
1131                 return blk_trace_startstop(sdp->device->request_queue, 0);
1132         case BLKTRACETEARDOWN:
1133                 return blk_trace_remove(sdp->device->request_queue);
1134         case SCSI_IOCTL_GET_IDLUN:
1135         case SCSI_IOCTL_GET_BUS_NUMBER:
1136         case SCSI_IOCTL_PROBE_HOST:
1137         case SG_GET_TRANSFORM:
1138         case SG_SCSI_RESET:
1139                 if (atomic_read(&sdp->detaching))
1140                         return -ENODEV;
1141                 break;
1142         default:
1143                 if (read_only)
1144                         return -EPERM;  /* don't know so take safe approach */
1145                 break;
1146         }
1147
1148         result = scsi_ioctl_block_when_processing_errors(sdp->device,
1149                         cmd_in, filp->f_flags & O_NDELAY);
1150         if (result)
1151                 return result;
1152         return scsi_ioctl(sdp->device, cmd_in, p);
1153 }
1154
1155 #ifdef CONFIG_COMPAT
1156 static long sg_compat_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
1157 {
1158         Sg_device *sdp;
1159         Sg_fd *sfp;
1160         struct scsi_device *sdev;
1161
1162         if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
1163                 return -ENXIO;
1164
1165         sdev = sdp->device;
1166         if (sdev->host->hostt->compat_ioctl) { 
1167                 int ret;
1168
1169                 ret = sdev->host->hostt->compat_ioctl(sdev, cmd_in, (void __user *)arg);
1170
1171                 return ret;
1172         }
1173         
1174         return -ENOIOCTLCMD;
1175 }
1176 #endif
1177
1178 static unsigned int
1179 sg_poll(struct file *filp, poll_table * wait)
1180 {
1181         unsigned int res = 0;
1182         Sg_device *sdp;
1183         Sg_fd *sfp;
1184         Sg_request *srp;
1185         int count = 0;
1186         unsigned long iflags;
1187
1188         sfp = filp->private_data;
1189         if (!sfp)
1190                 return POLLERR;
1191         sdp = sfp->parentdp;
1192         if (!sdp)
1193                 return POLLERR;
1194         poll_wait(filp, &sfp->read_wait, wait);
1195         read_lock_irqsave(&sfp->rq_list_lock, iflags);
1196         list_for_each_entry(srp, &sfp->rq_list, entry) {
1197                 /* if any read waiting, flag it */
1198                 if ((0 == res) && (1 == srp->done) && (!srp->sg_io_owned))
1199                         res = POLLIN | POLLRDNORM;
1200                 ++count;
1201         }
1202         read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
1203
1204         if (atomic_read(&sdp->detaching))
1205                 res |= POLLHUP;
1206         else if (!sfp->cmd_q) {
1207                 if (0 == count)
1208                         res |= POLLOUT | POLLWRNORM;
1209         } else if (count < SG_MAX_QUEUE)
1210                 res |= POLLOUT | POLLWRNORM;
1211         SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp,
1212                                       "sg_poll: res=0x%x\n", (int) res));
1213         return res;
1214 }
1215
1216 static int
1217 sg_fasync(int fd, struct file *filp, int mode)
1218 {
1219         Sg_device *sdp;
1220         Sg_fd *sfp;
1221
1222         if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
1223                 return -ENXIO;
1224         SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp,
1225                                       "sg_fasync: mode=%d\n", mode));
1226
1227         return fasync_helper(fd, filp, mode, &sfp->async_qp);
1228 }
1229
1230 static int
1231 sg_vma_fault(struct vm_fault *vmf)
1232 {
1233         struct vm_area_struct *vma = vmf->vma;
1234         Sg_fd *sfp;
1235         unsigned long offset, len, sa;
1236         Sg_scatter_hold *rsv_schp;
1237         int k, length;
1238
1239         if ((NULL == vma) || (!(sfp = (Sg_fd *) vma->vm_private_data)))
1240                 return VM_FAULT_SIGBUS;
1241         rsv_schp = &sfp->reserve;
1242         offset = vmf->pgoff << PAGE_SHIFT;
1243         if (offset >= rsv_schp->bufflen)
1244                 return VM_FAULT_SIGBUS;
1245         SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sfp->parentdp,
1246                                       "sg_vma_fault: offset=%lu, scatg=%d\n",
1247                                       offset, rsv_schp->k_use_sg));
1248         sa = vma->vm_start;
1249         length = 1 << (PAGE_SHIFT + rsv_schp->page_order);
1250         for (k = 0; k < rsv_schp->k_use_sg && sa < vma->vm_end; k++) {
1251                 len = vma->vm_end - sa;
1252                 len = (len < length) ? len : length;
1253                 if (offset < len) {
1254                         struct page *page = nth_page(rsv_schp->pages[k],
1255                                                      offset >> PAGE_SHIFT);
1256                         get_page(page); /* increment page count */
1257                         vmf->page = page;
1258                         return 0; /* success */
1259                 }
1260                 sa += len;
1261                 offset -= len;
1262         }
1263
1264         return VM_FAULT_SIGBUS;
1265 }
1266
1267 static const struct vm_operations_struct sg_mmap_vm_ops = {
1268         .fault = sg_vma_fault,
1269 };
1270
1271 static int
1272 sg_mmap(struct file *filp, struct vm_area_struct *vma)
1273 {
1274         Sg_fd *sfp;
1275         unsigned long req_sz, len, sa;
1276         Sg_scatter_hold *rsv_schp;
1277         int k, length;
1278         int ret = 0;
1279
1280         if ((!filp) || (!vma) || (!(sfp = (Sg_fd *) filp->private_data)))
1281                 return -ENXIO;
1282         req_sz = vma->vm_end - vma->vm_start;
1283         SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sfp->parentdp,
1284                                       "sg_mmap starting, vm_start=%p, len=%d\n",
1285                                       (void *) vma->vm_start, (int) req_sz));
1286         if (vma->vm_pgoff)
1287                 return -EINVAL; /* want no offset */
1288         rsv_schp = &sfp->reserve;
1289         mutex_lock(&sfp->f_mutex);
1290         if (req_sz > rsv_schp->bufflen) {
1291                 ret = -ENOMEM;  /* cannot map more than reserved buffer */
1292                 goto out;
1293         }
1294
1295         sa = vma->vm_start;
1296         length = 1 << (PAGE_SHIFT + rsv_schp->page_order);
1297         for (k = 0; k < rsv_schp->k_use_sg && sa < vma->vm_end; k++) {
1298                 len = vma->vm_end - sa;
1299                 len = (len < length) ? len : length;
1300                 sa += len;
1301         }
1302
1303         sfp->mmap_called = 1;
1304         vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
1305         vma->vm_private_data = sfp;
1306         vma->vm_ops = &sg_mmap_vm_ops;
1307 out:
1308         mutex_unlock(&sfp->f_mutex);
1309         return ret;
1310 }
1311
1312 static void
1313 sg_rq_end_io_usercontext(struct work_struct *work)
1314 {
1315         struct sg_request *srp = container_of(work, struct sg_request, ew.work);
1316         struct sg_fd *sfp = srp->parentfp;
1317
1318         sg_finish_rem_req(srp);
1319         sg_remove_request(sfp, srp);
1320         kref_put(&sfp->f_ref, sg_remove_sfp);
1321 }
1322
1323 /*
1324  * This function is a "bottom half" handler that is called by the mid
1325  * level when a command is completed (or has failed).
1326  */
1327 static void
1328 sg_rq_end_io(struct request *rq, blk_status_t status)
1329 {
1330         struct sg_request *srp = rq->end_io_data;
1331         struct scsi_request *req = scsi_req(rq);
1332         Sg_device *sdp;
1333         Sg_fd *sfp;
1334         unsigned long iflags;
1335         unsigned int ms;
1336         char *sense;
1337         int result, resid, done = 1;
1338
1339         if (WARN_ON(srp->done != 0))
1340                 return;
1341
1342         sfp = srp->parentfp;
1343         if (WARN_ON(sfp == NULL))
1344                 return;
1345
1346         sdp = sfp->parentdp;
1347         if (unlikely(atomic_read(&sdp->detaching)))
1348                 pr_info("%s: device detaching\n", __func__);
1349
1350         sense = req->sense;
1351         result = req->result;
1352         resid = req->resid_len;
1353
1354         SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sdp,
1355                                       "sg_cmd_done: pack_id=%d, res=0x%x\n",
1356                                       srp->header.pack_id, result));
1357         srp->header.resid = resid;
1358         ms = jiffies_to_msecs(jiffies);
1359         srp->header.duration = (ms > srp->header.duration) ?
1360                                 (ms - srp->header.duration) : 0;
1361         if (0 != result) {
1362                 struct scsi_sense_hdr sshdr;
1363
1364                 srp->header.status = 0xff & result;
1365                 srp->header.masked_status = status_byte(result);
1366                 srp->header.msg_status = msg_byte(result);
1367                 srp->header.host_status = host_byte(result);
1368                 srp->header.driver_status = driver_byte(result);
1369                 if ((sdp->sgdebug > 0) &&
1370                     ((CHECK_CONDITION == srp->header.masked_status) ||
1371                      (COMMAND_TERMINATED == srp->header.masked_status)))
1372                         __scsi_print_sense(sdp->device, __func__, sense,
1373                                            SCSI_SENSE_BUFFERSIZE);
1374
1375                 /* Following if statement is a patch supplied by Eric Youngdale */
1376                 if (driver_byte(result) != 0
1377                     && scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, &sshdr)
1378                     && !scsi_sense_is_deferred(&sshdr)
1379                     && sshdr.sense_key == UNIT_ATTENTION
1380                     && sdp->device->removable) {
1381                         /* Detected possible disc change. Set the bit - this */
1382                         /* may be used if there are filesystems using this device */
1383                         sdp->device->changed = 1;
1384                 }
1385         }
1386
1387         if (req->sense_len)
1388                 memcpy(srp->sense_b, req->sense, SCSI_SENSE_BUFFERSIZE);
1389
1390         /* Rely on write phase to clean out srp status values, so no "else" */
1391
1392         /*
1393          * Free the request as soon as it is complete so that its resources
1394          * can be reused without waiting for userspace to read() the
1395          * result.  But keep the associated bio (if any) around until
1396          * blk_rq_unmap_user() can be called from user context.
1397          */
1398         srp->rq = NULL;
1399         scsi_req_free_cmd(scsi_req(rq));
1400         __blk_put_request(rq->q, rq);
1401
1402         write_lock_irqsave(&sfp->rq_list_lock, iflags);
1403         if (unlikely(srp->orphan)) {
1404                 if (sfp->keep_orphan)
1405                         srp->sg_io_owned = 0;
1406                 else
1407                         done = 0;
1408         }
1409         srp->done = done;
1410         write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
1411
1412         if (likely(done)) {
1413                 /* Now wake up any sg_read() that is waiting for this
1414                  * packet.
1415                  */
1416                 wake_up_interruptible(&sfp->read_wait);
1417                 kill_fasync(&sfp->async_qp, SIGPOLL, POLL_IN);
1418                 kref_put(&sfp->f_ref, sg_remove_sfp);
1419         } else {
1420                 INIT_WORK(&srp->ew.work, sg_rq_end_io_usercontext);
1421                 schedule_work(&srp->ew.work);
1422         }
1423 }
1424
1425 static const struct file_operations sg_fops = {
1426         .owner = THIS_MODULE,
1427         .read = sg_read,
1428         .write = sg_write,
1429         .poll = sg_poll,
1430         .unlocked_ioctl = sg_ioctl,
1431 #ifdef CONFIG_COMPAT
1432         .compat_ioctl = sg_compat_ioctl,
1433 #endif
1434         .open = sg_open,
1435         .mmap = sg_mmap,
1436         .release = sg_release,
1437         .fasync = sg_fasync,
1438         .llseek = no_llseek,
1439 };
1440
1441 static struct class *sg_sysfs_class;
1442
1443 static int sg_sysfs_valid = 0;
1444
1445 static Sg_device *
1446 sg_alloc(struct gendisk *disk, struct scsi_device *scsidp)
1447 {
1448         struct request_queue *q = scsidp->request_queue;
1449         Sg_device *sdp;
1450         unsigned long iflags;
1451         int error;
1452         u32 k;
1453
1454         sdp = kzalloc(sizeof(Sg_device), GFP_KERNEL);
1455         if (!sdp) {
1456                 sdev_printk(KERN_WARNING, scsidp, "%s: kmalloc Sg_device "
1457                             "failure\n", __func__);
1458                 return ERR_PTR(-ENOMEM);
1459         }
1460
1461         idr_preload(GFP_KERNEL);
1462         write_lock_irqsave(&sg_index_lock, iflags);
1463
1464         error = idr_alloc(&sg_index_idr, sdp, 0, SG_MAX_DEVS, GFP_NOWAIT);
1465         if (error < 0) {
1466                 if (error == -ENOSPC) {
1467                         sdev_printk(KERN_WARNING, scsidp,
1468                                     "Unable to attach sg device type=%d, minor number exceeds %d\n",
1469                                     scsidp->type, SG_MAX_DEVS - 1);
1470                         error = -ENODEV;
1471                 } else {
1472                         sdev_printk(KERN_WARNING, scsidp, "%s: idr "
1473                                     "allocation Sg_device failure: %d\n",
1474                                     __func__, error);
1475                 }
1476                 goto out_unlock;
1477         }
1478         k = error;
1479
1480         SCSI_LOG_TIMEOUT(3, sdev_printk(KERN_INFO, scsidp,
1481                                         "sg_alloc: dev=%d \n", k));
1482         sprintf(disk->disk_name, "sg%d", k);
1483         disk->first_minor = k;
1484         sdp->disk = disk;
1485         sdp->device = scsidp;
1486         mutex_init(&sdp->open_rel_lock);
1487         INIT_LIST_HEAD(&sdp->sfds);
1488         init_waitqueue_head(&sdp->open_wait);
1489         atomic_set(&sdp->detaching, 0);
1490         rwlock_init(&sdp->sfd_lock);
1491         sdp->sg_tablesize = queue_max_segments(q);
1492         sdp->index = k;
1493         kref_init(&sdp->d_ref);
1494         error = 0;
1495
1496 out_unlock:
1497         write_unlock_irqrestore(&sg_index_lock, iflags);
1498         idr_preload_end();
1499
1500         if (error) {
1501                 kfree(sdp);
1502                 return ERR_PTR(error);
1503         }
1504         return sdp;
1505 }
1506
1507 static int
1508 sg_add_device(struct device *cl_dev, struct class_interface *cl_intf)
1509 {
1510         struct scsi_device *scsidp = to_scsi_device(cl_dev->parent);
1511         struct gendisk *disk;
1512         Sg_device *sdp = NULL;
1513         struct cdev * cdev = NULL;
1514         int error;
1515         unsigned long iflags;
1516
1517         disk = alloc_disk(1);
1518         if (!disk) {
1519                 pr_warn("%s: alloc_disk failed\n", __func__);
1520                 return -ENOMEM;
1521         }
1522         disk->major = SCSI_GENERIC_MAJOR;
1523
1524         error = -ENOMEM;
1525         cdev = cdev_alloc();
1526         if (!cdev) {
1527                 pr_warn("%s: cdev_alloc failed\n", __func__);
1528                 goto out;
1529         }
1530         cdev->owner = THIS_MODULE;
1531         cdev->ops = &sg_fops;
1532
1533         sdp = sg_alloc(disk, scsidp);
1534         if (IS_ERR(sdp)) {
1535                 pr_warn("%s: sg_alloc failed\n", __func__);
1536                 error = PTR_ERR(sdp);
1537                 goto out;
1538         }
1539
1540         error = cdev_add(cdev, MKDEV(SCSI_GENERIC_MAJOR, sdp->index), 1);
1541         if (error)
1542                 goto cdev_add_err;
1543
1544         sdp->cdev = cdev;
1545         if (sg_sysfs_valid) {
1546                 struct device *sg_class_member;
1547
1548                 sg_class_member = device_create(sg_sysfs_class, cl_dev->parent,
1549                                                 MKDEV(SCSI_GENERIC_MAJOR,
1550                                                       sdp->index),
1551                                                 sdp, "%s", disk->disk_name);
1552                 if (IS_ERR(sg_class_member)) {
1553                         pr_err("%s: device_create failed\n", __func__);
1554                         error = PTR_ERR(sg_class_member);
1555                         goto cdev_add_err;
1556                 }
1557                 error = sysfs_create_link(&scsidp->sdev_gendev.kobj,
1558                                           &sg_class_member->kobj, "generic");
1559                 if (error)
1560                         pr_err("%s: unable to make symlink 'generic' back "
1561                                "to sg%d\n", __func__, sdp->index);
1562         } else
1563                 pr_warn("%s: sg_sys Invalid\n", __func__);
1564
1565         sdev_printk(KERN_NOTICE, scsidp, "Attached scsi generic sg%d "
1566                     "type %d\n", sdp->index, scsidp->type);
1567
1568         dev_set_drvdata(cl_dev, sdp);
1569
1570         return 0;
1571
1572 cdev_add_err:
1573         write_lock_irqsave(&sg_index_lock, iflags);
1574         idr_remove(&sg_index_idr, sdp->index);
1575         write_unlock_irqrestore(&sg_index_lock, iflags);
1576         kfree(sdp);
1577
1578 out:
1579         put_disk(disk);
1580         if (cdev)
1581                 cdev_del(cdev);
1582         return error;
1583 }
1584
1585 static void
1586 sg_device_destroy(struct kref *kref)
1587 {
1588         struct sg_device *sdp = container_of(kref, struct sg_device, d_ref);
1589         unsigned long flags;
1590
1591         /* CAUTION!  Note that the device can still be found via idr_find()
1592          * even though the refcount is 0.  Therefore, do idr_remove() BEFORE
1593          * any other cleanup.
1594          */
1595
1596         write_lock_irqsave(&sg_index_lock, flags);
1597         idr_remove(&sg_index_idr, sdp->index);
1598         write_unlock_irqrestore(&sg_index_lock, flags);
1599
1600         SCSI_LOG_TIMEOUT(3,
1601                 sg_printk(KERN_INFO, sdp, "sg_device_destroy\n"));
1602
1603         put_disk(sdp->disk);
1604         kfree(sdp);
1605 }
1606
1607 static void
1608 sg_remove_device(struct device *cl_dev, struct class_interface *cl_intf)
1609 {
1610         struct scsi_device *scsidp = to_scsi_device(cl_dev->parent);
1611         Sg_device *sdp = dev_get_drvdata(cl_dev);
1612         unsigned long iflags;
1613         Sg_fd *sfp;
1614         int val;
1615
1616         if (!sdp)
1617                 return;
1618         /* want sdp->detaching non-zero as soon as possible */
1619         val = atomic_inc_return(&sdp->detaching);
1620         if (val > 1)
1621                 return; /* only want to do following once per device */
1622
1623         SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp,
1624                                       "%s\n", __func__));
1625
1626         read_lock_irqsave(&sdp->sfd_lock, iflags);
1627         list_for_each_entry(sfp, &sdp->sfds, sfd_siblings) {
1628                 wake_up_interruptible_all(&sfp->read_wait);
1629                 kill_fasync(&sfp->async_qp, SIGPOLL, POLL_HUP);
1630         }
1631         wake_up_interruptible_all(&sdp->open_wait);
1632         read_unlock_irqrestore(&sdp->sfd_lock, iflags);
1633
1634         sysfs_remove_link(&scsidp->sdev_gendev.kobj, "generic");
1635         device_destroy(sg_sysfs_class, MKDEV(SCSI_GENERIC_MAJOR, sdp->index));
1636         cdev_del(sdp->cdev);
1637         sdp->cdev = NULL;
1638
1639         kref_put(&sdp->d_ref, sg_device_destroy);
1640 }
1641
1642 module_param_named(scatter_elem_sz, scatter_elem_sz, int, S_IRUGO | S_IWUSR);
1643 module_param_named(def_reserved_size, def_reserved_size, int,
1644                    S_IRUGO | S_IWUSR);
1645 module_param_named(allow_dio, sg_allow_dio, int, S_IRUGO | S_IWUSR);
1646
1647 MODULE_AUTHOR("Douglas Gilbert");
1648 MODULE_DESCRIPTION("SCSI generic (sg) driver");
1649 MODULE_LICENSE("GPL");
1650 MODULE_VERSION(SG_VERSION_STR);
1651 MODULE_ALIAS_CHARDEV_MAJOR(SCSI_GENERIC_MAJOR);
1652
1653 MODULE_PARM_DESC(scatter_elem_sz, "scatter gather element "
1654                 "size (default: max(SG_SCATTER_SZ, PAGE_SIZE))");
1655 MODULE_PARM_DESC(def_reserved_size, "size of buffer reserved for each fd");
1656 MODULE_PARM_DESC(allow_dio, "allow direct I/O (default: 0 (disallow))");
1657
1658 static int __init
1659 init_sg(void)
1660 {
1661         int rc;
1662
1663         if (scatter_elem_sz < PAGE_SIZE) {
1664                 scatter_elem_sz = PAGE_SIZE;
1665                 scatter_elem_sz_prev = scatter_elem_sz;
1666         }
1667         if (def_reserved_size >= 0)
1668                 sg_big_buff = def_reserved_size;
1669         else
1670                 def_reserved_size = sg_big_buff;
1671
1672         rc = register_chrdev_region(MKDEV(SCSI_GENERIC_MAJOR, 0), 
1673                                     SG_MAX_DEVS, "sg");
1674         if (rc)
1675                 return rc;
1676         sg_sysfs_class = class_create(THIS_MODULE, "scsi_generic");
1677         if ( IS_ERR(sg_sysfs_class) ) {
1678                 rc = PTR_ERR(sg_sysfs_class);
1679                 goto err_out;
1680         }
1681         sg_sysfs_valid = 1;
1682         rc = scsi_register_interface(&sg_interface);
1683         if (0 == rc) {
1684 #ifdef CONFIG_SCSI_PROC_FS
1685                 sg_proc_init();
1686 #endif                          /* CONFIG_SCSI_PROC_FS */
1687                 return 0;
1688         }
1689         class_destroy(sg_sysfs_class);
1690 err_out:
1691         unregister_chrdev_region(MKDEV(SCSI_GENERIC_MAJOR, 0), SG_MAX_DEVS);
1692         return rc;
1693 }
1694
1695 static void __exit
1696 exit_sg(void)
1697 {
1698 #ifdef CONFIG_SCSI_PROC_FS
1699         sg_proc_cleanup();
1700 #endif                          /* CONFIG_SCSI_PROC_FS */
1701         scsi_unregister_interface(&sg_interface);
1702         class_destroy(sg_sysfs_class);
1703         sg_sysfs_valid = 0;
1704         unregister_chrdev_region(MKDEV(SCSI_GENERIC_MAJOR, 0),
1705                                  SG_MAX_DEVS);
1706         idr_destroy(&sg_index_idr);
1707 }
1708
1709 static int
1710 sg_start_req(Sg_request *srp, unsigned char *cmd)
1711 {
1712         int res;
1713         struct request *rq;
1714         struct scsi_request *req;
1715         Sg_fd *sfp = srp->parentfp;
1716         sg_io_hdr_t *hp = &srp->header;
1717         int dxfer_len = (int) hp->dxfer_len;
1718         int dxfer_dir = hp->dxfer_direction;
1719         unsigned int iov_count = hp->iovec_count;
1720         Sg_scatter_hold *req_schp = &srp->data;
1721         Sg_scatter_hold *rsv_schp = &sfp->reserve;
1722         struct request_queue *q = sfp->parentdp->device->request_queue;
1723         struct rq_map_data *md, map_data;
1724         int rw = hp->dxfer_direction == SG_DXFER_TO_DEV ? WRITE : READ;
1725         unsigned char *long_cmdp = NULL;
1726
1727         SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp,
1728                                       "sg_start_req: dxfer_len=%d\n",
1729                                       dxfer_len));
1730
1731         if (hp->cmd_len > BLK_MAX_CDB) {
1732                 long_cmdp = kzalloc(hp->cmd_len, GFP_KERNEL);
1733                 if (!long_cmdp)
1734                         return -ENOMEM;
1735         }
1736
1737         /*
1738          * NOTE
1739          *
1740          * With scsi-mq enabled, there are a fixed number of preallocated
1741          * requests equal in number to shost->can_queue.  If all of the
1742          * preallocated requests are already in use, then using GFP_ATOMIC with
1743          * blk_get_request() will return -EWOULDBLOCK, whereas using GFP_KERNEL
1744          * will cause blk_get_request() to sleep until an active command
1745          * completes, freeing up a request.  Neither option is ideal, but
1746          * GFP_KERNEL is the better choice to prevent userspace from getting an
1747          * unexpected EWOULDBLOCK.
1748          *
1749          * With scsi-mq disabled, blk_get_request() with GFP_KERNEL usually
1750          * does not sleep except under memory pressure.
1751          */
1752         rq = blk_get_request(q, hp->dxfer_direction == SG_DXFER_TO_DEV ?
1753                         REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN, GFP_KERNEL);
1754         if (IS_ERR(rq)) {
1755                 kfree(long_cmdp);
1756                 return PTR_ERR(rq);
1757         }
1758         req = scsi_req(rq);
1759
1760         if (hp->cmd_len > BLK_MAX_CDB)
1761                 req->cmd = long_cmdp;
1762         memcpy(req->cmd, cmd, hp->cmd_len);
1763         req->cmd_len = hp->cmd_len;
1764
1765         srp->rq = rq;
1766         rq->end_io_data = srp;
1767         req->retries = SG_DEFAULT_RETRIES;
1768
1769         if ((dxfer_len <= 0) || (dxfer_dir == SG_DXFER_NONE))
1770                 return 0;
1771
1772         if (sg_allow_dio && hp->flags & SG_FLAG_DIRECT_IO &&
1773             dxfer_dir != SG_DXFER_UNKNOWN && !iov_count &&
1774             !sfp->parentdp->device->host->unchecked_isa_dma &&
1775             blk_rq_aligned(q, (unsigned long)hp->dxferp, dxfer_len))
1776                 md = NULL;
1777         else
1778                 md = &map_data;
1779
1780         if (md) {
1781                 mutex_lock(&sfp->f_mutex);
1782                 if (dxfer_len <= rsv_schp->bufflen &&
1783                     !sfp->res_in_use) {
1784                         sfp->res_in_use = 1;
1785                         sg_link_reserve(sfp, srp, dxfer_len);
1786                 } else if (hp->flags & SG_FLAG_MMAP_IO) {
1787                         res = -EBUSY; /* sfp->res_in_use == 1 */
1788                         if (dxfer_len > rsv_schp->bufflen)
1789                                 res = -ENOMEM;
1790                         mutex_unlock(&sfp->f_mutex);
1791                         return res;
1792                 } else {
1793                         res = sg_build_indirect(req_schp, sfp, dxfer_len);
1794                         if (res) {
1795                                 mutex_unlock(&sfp->f_mutex);
1796                                 return res;
1797                         }
1798                 }
1799                 mutex_unlock(&sfp->f_mutex);
1800
1801                 md->pages = req_schp->pages;
1802                 md->page_order = req_schp->page_order;
1803                 md->nr_entries = req_schp->k_use_sg;
1804                 md->offset = 0;
1805                 md->null_mapped = hp->dxferp ? 0 : 1;
1806                 if (dxfer_dir == SG_DXFER_TO_FROM_DEV)
1807                         md->from_user = 1;
1808                 else
1809                         md->from_user = 0;
1810         }
1811
1812         if (iov_count) {
1813                 struct iovec *iov = NULL;
1814                 struct iov_iter i;
1815
1816                 res = import_iovec(rw, hp->dxferp, iov_count, 0, &iov, &i);
1817                 if (res < 0)
1818                         return res;
1819
1820                 iov_iter_truncate(&i, hp->dxfer_len);
1821                 if (!iov_iter_count(&i)) {
1822                         kfree(iov);
1823                         return -EINVAL;
1824                 }
1825
1826                 res = blk_rq_map_user_iov(q, rq, md, &i, GFP_ATOMIC);
1827                 kfree(iov);
1828         } else
1829                 res = blk_rq_map_user(q, rq, md, hp->dxferp,
1830                                       hp->dxfer_len, GFP_ATOMIC);
1831
1832         if (!res) {
1833                 srp->bio = rq->bio;
1834
1835                 if (!md) {
1836                         req_schp->dio_in_use = 1;
1837                         hp->info |= SG_INFO_DIRECT_IO;
1838                 }
1839         }
1840         return res;
1841 }
1842
1843 static int
1844 sg_finish_rem_req(Sg_request *srp)
1845 {
1846         int ret = 0;
1847
1848         Sg_fd *sfp = srp->parentfp;
1849         Sg_scatter_hold *req_schp = &srp->data;
1850
1851         SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp,
1852                                       "sg_finish_rem_req: res_used=%d\n",
1853                                       (int) srp->res_used));
1854         if (srp->bio)
1855                 ret = blk_rq_unmap_user(srp->bio);
1856
1857         if (srp->rq) {
1858                 scsi_req_free_cmd(scsi_req(srp->rq));
1859                 blk_put_request(srp->rq);
1860         }
1861
1862         if (srp->res_used)
1863                 sg_unlink_reserve(sfp, srp);
1864         else
1865                 sg_remove_scat(sfp, req_schp);
1866
1867         return ret;
1868 }
1869
1870 static int
1871 sg_build_sgat(Sg_scatter_hold * schp, const Sg_fd * sfp, int tablesize)
1872 {
1873         int sg_bufflen = tablesize * sizeof(struct page *);
1874         gfp_t gfp_flags = GFP_ATOMIC | __GFP_NOWARN;
1875
1876         schp->pages = kzalloc(sg_bufflen, gfp_flags);
1877         if (!schp->pages)
1878                 return -ENOMEM;
1879         schp->sglist_len = sg_bufflen;
1880         return tablesize;       /* number of scat_gath elements allocated */
1881 }
1882
1883 static int
1884 sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size)
1885 {
1886         int ret_sz = 0, i, k, rem_sz, num, mx_sc_elems;
1887         int sg_tablesize = sfp->parentdp->sg_tablesize;
1888         int blk_size = buff_size, order;
1889         gfp_t gfp_mask = GFP_ATOMIC | __GFP_COMP | __GFP_NOWARN;
1890         struct sg_device *sdp = sfp->parentdp;
1891
1892         if (blk_size < 0)
1893                 return -EFAULT;
1894         if (0 == blk_size)
1895                 ++blk_size;     /* don't know why */
1896         /* round request up to next highest SG_SECTOR_SZ byte boundary */
1897         blk_size = ALIGN(blk_size, SG_SECTOR_SZ);
1898         SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp,
1899                 "sg_build_indirect: buff_size=%d, blk_size=%d\n",
1900                 buff_size, blk_size));
1901
1902         /* N.B. ret_sz carried into this block ... */
1903         mx_sc_elems = sg_build_sgat(schp, sfp, sg_tablesize);
1904         if (mx_sc_elems < 0)
1905                 return mx_sc_elems;     /* most likely -ENOMEM */
1906
1907         num = scatter_elem_sz;
1908         if (unlikely(num != scatter_elem_sz_prev)) {
1909                 if (num < PAGE_SIZE) {
1910                         scatter_elem_sz = PAGE_SIZE;
1911                         scatter_elem_sz_prev = PAGE_SIZE;
1912                 } else
1913                         scatter_elem_sz_prev = num;
1914         }
1915
1916         if (sdp->device->host->unchecked_isa_dma)
1917                 gfp_mask |= GFP_DMA;
1918
1919         if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
1920                 gfp_mask |= __GFP_ZERO;
1921
1922         order = get_order(num);
1923 retry:
1924         ret_sz = 1 << (PAGE_SHIFT + order);
1925
1926         for (k = 0, rem_sz = blk_size; rem_sz > 0 && k < mx_sc_elems;
1927              k++, rem_sz -= ret_sz) {
1928
1929                 num = (rem_sz > scatter_elem_sz_prev) ?
1930                         scatter_elem_sz_prev : rem_sz;
1931
1932                 schp->pages[k] = alloc_pages(gfp_mask | __GFP_ZERO, order);
1933                 if (!schp->pages[k])
1934                         goto out;
1935
1936                 if (num == scatter_elem_sz_prev) {
1937                         if (unlikely(ret_sz > scatter_elem_sz_prev)) {
1938                                 scatter_elem_sz = ret_sz;
1939                                 scatter_elem_sz_prev = ret_sz;
1940                         }
1941                 }
1942
1943                 SCSI_LOG_TIMEOUT(5, sg_printk(KERN_INFO, sfp->parentdp,
1944                                  "sg_build_indirect: k=%d, num=%d, ret_sz=%d\n",
1945                                  k, num, ret_sz));
1946         }               /* end of for loop */
1947
1948         schp->page_order = order;
1949         schp->k_use_sg = k;
1950         SCSI_LOG_TIMEOUT(5, sg_printk(KERN_INFO, sfp->parentdp,
1951                          "sg_build_indirect: k_use_sg=%d, rem_sz=%d\n",
1952                          k, rem_sz));
1953
1954         schp->bufflen = blk_size;
1955         if (rem_sz > 0) /* must have failed */
1956                 return -ENOMEM;
1957         return 0;
1958 out:
1959         for (i = 0; i < k; i++)
1960                 __free_pages(schp->pages[i], order);
1961
1962         if (--order >= 0)
1963                 goto retry;
1964
1965         return -ENOMEM;
1966 }
1967
1968 static void
1969 sg_remove_scat(Sg_fd * sfp, Sg_scatter_hold * schp)
1970 {
1971         SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp,
1972                          "sg_remove_scat: k_use_sg=%d\n", schp->k_use_sg));
1973         if (schp->pages && schp->sglist_len > 0) {
1974                 if (!schp->dio_in_use) {
1975                         int k;
1976
1977                         for (k = 0; k < schp->k_use_sg && schp->pages[k]; k++) {
1978                                 SCSI_LOG_TIMEOUT(5,
1979                                         sg_printk(KERN_INFO, sfp->parentdp,
1980                                         "sg_remove_scat: k=%d, pg=0x%p\n",
1981                                         k, schp->pages[k]));
1982                                 __free_pages(schp->pages[k], schp->page_order);
1983                         }
1984
1985                         kfree(schp->pages);
1986                 }
1987         }
1988         memset(schp, 0, sizeof (*schp));
1989 }
1990
1991 static int
1992 sg_read_oxfer(Sg_request * srp, char __user *outp, int num_read_xfer)
1993 {
1994         Sg_scatter_hold *schp = &srp->data;
1995         int k, num;
1996
1997         SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, srp->parentfp->parentdp,
1998                          "sg_read_oxfer: num_read_xfer=%d\n",
1999                          num_read_xfer));
2000         if ((!outp) || (num_read_xfer <= 0))
2001                 return 0;
2002
2003         num = 1 << (PAGE_SHIFT + schp->page_order);
2004         for (k = 0; k < schp->k_use_sg && schp->pages[k]; k++) {
2005                 if (num > num_read_xfer) {
2006                         if (__copy_to_user(outp, page_address(schp->pages[k]),
2007                                            num_read_xfer))
2008                                 return -EFAULT;
2009                         break;
2010                 } else {
2011                         if (__copy_to_user(outp, page_address(schp->pages[k]),
2012                                            num))
2013                                 return -EFAULT;
2014                         num_read_xfer -= num;
2015                         if (num_read_xfer <= 0)
2016                                 break;
2017                         outp += num;
2018                 }
2019         }
2020
2021         return 0;
2022 }
2023
2024 static void
2025 sg_build_reserve(Sg_fd * sfp, int req_size)
2026 {
2027         Sg_scatter_hold *schp = &sfp->reserve;
2028
2029         SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp,
2030                          "sg_build_reserve: req_size=%d\n", req_size));
2031         do {
2032                 if (req_size < PAGE_SIZE)
2033                         req_size = PAGE_SIZE;
2034                 if (0 == sg_build_indirect(schp, sfp, req_size))
2035                         return;
2036                 else
2037                         sg_remove_scat(sfp, schp);
2038                 req_size >>= 1; /* divide by 2 */
2039         } while (req_size > (PAGE_SIZE / 2));
2040 }
2041
2042 static void
2043 sg_link_reserve(Sg_fd * sfp, Sg_request * srp, int size)
2044 {
2045         Sg_scatter_hold *req_schp = &srp->data;
2046         Sg_scatter_hold *rsv_schp = &sfp->reserve;
2047         int k, num, rem;
2048
2049         srp->res_used = 1;
2050         SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp,
2051                          "sg_link_reserve: size=%d\n", size));
2052         rem = size;
2053
2054         num = 1 << (PAGE_SHIFT + rsv_schp->page_order);
2055         for (k = 0; k < rsv_schp->k_use_sg; k++) {
2056                 if (rem <= num) {
2057                         req_schp->k_use_sg = k + 1;
2058                         req_schp->sglist_len = rsv_schp->sglist_len;
2059                         req_schp->pages = rsv_schp->pages;
2060
2061                         req_schp->bufflen = size;
2062                         req_schp->page_order = rsv_schp->page_order;
2063                         break;
2064                 } else
2065                         rem -= num;
2066         }
2067
2068         if (k >= rsv_schp->k_use_sg)
2069                 SCSI_LOG_TIMEOUT(1, sg_printk(KERN_INFO, sfp->parentdp,
2070                                  "sg_link_reserve: BAD size\n"));
2071 }
2072
2073 static void
2074 sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp)
2075 {
2076         Sg_scatter_hold *req_schp = &srp->data;
2077
2078         SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, srp->parentfp->parentdp,
2079                                       "sg_unlink_reserve: req->k_use_sg=%d\n",
2080                                       (int) req_schp->k_use_sg));
2081         req_schp->k_use_sg = 0;
2082         req_schp->bufflen = 0;
2083         req_schp->pages = NULL;
2084         req_schp->page_order = 0;
2085         req_schp->sglist_len = 0;
2086         srp->res_used = 0;
2087         /* Called without mutex lock to avoid deadlock */
2088         sfp->res_in_use = 0;
2089 }
2090
2091 static Sg_request *
2092 sg_get_rq_mark(Sg_fd * sfp, int pack_id, bool *busy)
2093 {
2094         Sg_request *resp;
2095         unsigned long iflags;
2096
2097         *busy = false;
2098         write_lock_irqsave(&sfp->rq_list_lock, iflags);
2099         list_for_each_entry(resp, &sfp->rq_list, entry) {
2100                 /* look for requests that are not SG_IO owned */
2101                 if ((!resp->sg_io_owned) &&
2102                     ((-1 == pack_id) || (resp->header.pack_id == pack_id))) {
2103                         switch (resp->done) {
2104                         case 0: /* request active */
2105                                 *busy = true;
2106                                 break;
2107                         case 1: /* request done; response ready to return */
2108                                 resp->done = 2; /* guard against other readers */
2109                                 write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
2110                                 return resp;
2111                         case 2: /* response already being returned */
2112                                 break;
2113                         }
2114                 }
2115         }
2116         write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
2117         return NULL;
2118 }
2119
2120 /* always adds to end of list */
2121 static Sg_request *
2122 sg_add_request(Sg_fd * sfp)
2123 {
2124         int k;
2125         unsigned long iflags;
2126         Sg_request *rp = sfp->req_arr;
2127
2128         write_lock_irqsave(&sfp->rq_list_lock, iflags);
2129         if (!list_empty(&sfp->rq_list)) {
2130                 if (!sfp->cmd_q)
2131                         goto out_unlock;
2132
2133                 for (k = 0; k < SG_MAX_QUEUE; ++k, ++rp) {
2134                         if (!rp->parentfp)
2135                                 break;
2136                 }
2137                 if (k >= SG_MAX_QUEUE)
2138                         goto out_unlock;
2139         }
2140         memset(rp, 0, sizeof (Sg_request));
2141         rp->parentfp = sfp;
2142         rp->header.duration = jiffies_to_msecs(jiffies);
2143         list_add_tail(&rp->entry, &sfp->rq_list);
2144         write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
2145         return rp;
2146 out_unlock:
2147         write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
2148         return NULL;
2149 }
2150
2151 /* Return of 1 for found; 0 for not found */
2152 static int
2153 sg_remove_request(Sg_fd * sfp, Sg_request * srp)
2154 {
2155         unsigned long iflags;
2156         int res = 0;
2157
2158         if (!sfp || !srp || list_empty(&sfp->rq_list))
2159                 return res;
2160         write_lock_irqsave(&sfp->rq_list_lock, iflags);
2161         if (!list_empty(&srp->entry)) {
2162                 list_del(&srp->entry);
2163                 srp->parentfp = NULL;
2164                 res = 1;
2165         }
2166         write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
2167
2168         /*
2169          * If the device is detaching, wakeup any readers in case we just
2170          * removed the last response, which would leave nothing for them to
2171          * return other than -ENODEV.
2172          */
2173         if (unlikely(atomic_read(&sfp->parentdp->detaching)))
2174                 wake_up_interruptible_all(&sfp->read_wait);
2175
2176         return res;
2177 }
2178
2179 static Sg_fd *
2180 sg_add_sfp(Sg_device * sdp)
2181 {
2182         Sg_fd *sfp;
2183         unsigned long iflags;
2184         int bufflen;
2185
2186         sfp = kzalloc(sizeof(*sfp), GFP_ATOMIC | __GFP_NOWARN);
2187         if (!sfp)
2188                 return ERR_PTR(-ENOMEM);
2189
2190         init_waitqueue_head(&sfp->read_wait);
2191         rwlock_init(&sfp->rq_list_lock);
2192         INIT_LIST_HEAD(&sfp->rq_list);
2193         kref_init(&sfp->f_ref);
2194         mutex_init(&sfp->f_mutex);
2195         sfp->timeout = SG_DEFAULT_TIMEOUT;
2196         sfp->timeout_user = SG_DEFAULT_TIMEOUT_USER;
2197         sfp->force_packid = SG_DEF_FORCE_PACK_ID;
2198         sfp->cmd_q = SG_DEF_COMMAND_Q;
2199         sfp->keep_orphan = SG_DEF_KEEP_ORPHAN;
2200         sfp->parentdp = sdp;
2201         write_lock_irqsave(&sdp->sfd_lock, iflags);
2202         if (atomic_read(&sdp->detaching)) {
2203                 write_unlock_irqrestore(&sdp->sfd_lock, iflags);
2204                 kfree(sfp);
2205                 return ERR_PTR(-ENODEV);
2206         }
2207         list_add_tail(&sfp->sfd_siblings, &sdp->sfds);
2208         write_unlock_irqrestore(&sdp->sfd_lock, iflags);
2209         SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp,
2210                                       "sg_add_sfp: sfp=0x%p\n", sfp));
2211         if (unlikely(sg_big_buff != def_reserved_size))
2212                 sg_big_buff = def_reserved_size;
2213
2214         bufflen = min_t(int, sg_big_buff,
2215                         max_sectors_bytes(sdp->device->request_queue));
2216         sg_build_reserve(sfp, bufflen);
2217         SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp,
2218                                       "sg_add_sfp: bufflen=%d, k_use_sg=%d\n",
2219                                       sfp->reserve.bufflen,
2220                                       sfp->reserve.k_use_sg));
2221
2222         kref_get(&sdp->d_ref);
2223         __module_get(THIS_MODULE);
2224         return sfp;
2225 }
2226
2227 static void
2228 sg_remove_sfp_usercontext(struct work_struct *work)
2229 {
2230         struct sg_fd *sfp = container_of(work, struct sg_fd, ew.work);
2231         struct sg_device *sdp = sfp->parentdp;
2232         Sg_request *srp;
2233         unsigned long iflags;
2234
2235         /* Cleanup any responses which were never read(). */
2236         write_lock_irqsave(&sfp->rq_list_lock, iflags);
2237         while (!list_empty(&sfp->rq_list)) {
2238                 srp = list_first_entry(&sfp->rq_list, Sg_request, entry);
2239                 sg_finish_rem_req(srp);
2240                 list_del(&srp->entry);
2241                 srp->parentfp = NULL;
2242         }
2243         write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
2244
2245         if (sfp->reserve.bufflen > 0) {
2246                 SCSI_LOG_TIMEOUT(6, sg_printk(KERN_INFO, sdp,
2247                                 "sg_remove_sfp:    bufflen=%d, k_use_sg=%d\n",
2248                                 (int) sfp->reserve.bufflen,
2249                                 (int) sfp->reserve.k_use_sg));
2250                 sg_remove_scat(sfp, &sfp->reserve);
2251         }
2252
2253         SCSI_LOG_TIMEOUT(6, sg_printk(KERN_INFO, sdp,
2254                         "sg_remove_sfp: sfp=0x%p\n", sfp));
2255         kfree(sfp);
2256
2257         scsi_device_put(sdp->device);
2258         kref_put(&sdp->d_ref, sg_device_destroy);
2259         module_put(THIS_MODULE);
2260 }
2261
2262 static void
2263 sg_remove_sfp(struct kref *kref)
2264 {
2265         struct sg_fd *sfp = container_of(kref, struct sg_fd, f_ref);
2266         struct sg_device *sdp = sfp->parentdp;
2267         unsigned long iflags;
2268
2269         write_lock_irqsave(&sdp->sfd_lock, iflags);
2270         list_del(&sfp->sfd_siblings);
2271         write_unlock_irqrestore(&sdp->sfd_lock, iflags);
2272
2273         INIT_WORK(&sfp->ew.work, sg_remove_sfp_usercontext);
2274         schedule_work(&sfp->ew.work);
2275 }
2276
2277 #ifdef CONFIG_SCSI_PROC_FS
2278 static int
2279 sg_idr_max_id(int id, void *p, void *data)
2280 {
2281         int *k = data;
2282
2283         if (*k < id)
2284                 *k = id;
2285
2286         return 0;
2287 }
2288
2289 static int
2290 sg_last_dev(void)
2291 {
2292         int k = -1;
2293         unsigned long iflags;
2294
2295         read_lock_irqsave(&sg_index_lock, iflags);
2296         idr_for_each(&sg_index_idr, sg_idr_max_id, &k);
2297         read_unlock_irqrestore(&sg_index_lock, iflags);
2298         return k + 1;           /* origin 1 */
2299 }
2300 #endif
2301
2302 /* must be called with sg_index_lock held */
2303 static Sg_device *sg_lookup_dev(int dev)
2304 {
2305         return idr_find(&sg_index_idr, dev);
2306 }
2307
2308 static Sg_device *
2309 sg_get_dev(int dev)
2310 {
2311         struct sg_device *sdp;
2312         unsigned long flags;
2313
2314         read_lock_irqsave(&sg_index_lock, flags);
2315         sdp = sg_lookup_dev(dev);
2316         if (!sdp)
2317                 sdp = ERR_PTR(-ENXIO);
2318         else if (atomic_read(&sdp->detaching)) {
2319                 /* If sdp->detaching, then the refcount may already be 0, in
2320                  * which case it would be a bug to do kref_get().
2321                  */
2322                 sdp = ERR_PTR(-ENODEV);
2323         } else
2324                 kref_get(&sdp->d_ref);
2325         read_unlock_irqrestore(&sg_index_lock, flags);
2326
2327         return sdp;
2328 }
2329
2330 #ifdef CONFIG_SCSI_PROC_FS
2331
2332 static struct proc_dir_entry *sg_proc_sgp = NULL;
2333
2334 static char sg_proc_sg_dirname[] = "scsi/sg";
2335
2336 static int sg_proc_seq_show_int(struct seq_file *s, void *v);
2337
2338 static int sg_proc_single_open_adio(struct inode *inode, struct file *file);
2339 static ssize_t sg_proc_write_adio(struct file *filp, const char __user *buffer,
2340                                   size_t count, loff_t *off);
2341 static const struct file_operations adio_fops = {
2342         .owner = THIS_MODULE,
2343         .open = sg_proc_single_open_adio,
2344         .read = seq_read,
2345         .llseek = seq_lseek,
2346         .write = sg_proc_write_adio,
2347         .release = single_release,
2348 };
2349
2350 static int sg_proc_single_open_dressz(struct inode *inode, struct file *file);
2351 static ssize_t sg_proc_write_dressz(struct file *filp, 
2352                 const char __user *buffer, size_t count, loff_t *off);
2353 static const struct file_operations dressz_fops = {
2354         .owner = THIS_MODULE,
2355         .open = sg_proc_single_open_dressz,
2356         .read = seq_read,
2357         .llseek = seq_lseek,
2358         .write = sg_proc_write_dressz,
2359         .release = single_release,
2360 };
2361
2362 static int sg_proc_seq_show_version(struct seq_file *s, void *v);
2363 static int sg_proc_single_open_version(struct inode *inode, struct file *file);
2364 static const struct file_operations version_fops = {
2365         .owner = THIS_MODULE,
2366         .open = sg_proc_single_open_version,
2367         .read = seq_read,
2368         .llseek = seq_lseek,
2369         .release = single_release,
2370 };
2371
2372 static int sg_proc_seq_show_devhdr(struct seq_file *s, void *v);
2373 static int sg_proc_single_open_devhdr(struct inode *inode, struct file *file);
2374 static const struct file_operations devhdr_fops = {
2375         .owner = THIS_MODULE,
2376         .open = sg_proc_single_open_devhdr,
2377         .read = seq_read,
2378         .llseek = seq_lseek,
2379         .release = single_release,
2380 };
2381
2382 static int sg_proc_seq_show_dev(struct seq_file *s, void *v);
2383 static int sg_proc_open_dev(struct inode *inode, struct file *file);
2384 static void * dev_seq_start(struct seq_file *s, loff_t *pos);
2385 static void * dev_seq_next(struct seq_file *s, void *v, loff_t *pos);
2386 static void dev_seq_stop(struct seq_file *s, void *v);
2387 static const struct file_operations dev_fops = {
2388         .owner = THIS_MODULE,
2389         .open = sg_proc_open_dev,
2390         .read = seq_read,
2391         .llseek = seq_lseek,
2392         .release = seq_release,
2393 };
2394 static const struct seq_operations dev_seq_ops = {
2395         .start = dev_seq_start,
2396         .next  = dev_seq_next,
2397         .stop  = dev_seq_stop,
2398         .show  = sg_proc_seq_show_dev,
2399 };
2400
2401 static int sg_proc_seq_show_devstrs(struct seq_file *s, void *v);
2402 static int sg_proc_open_devstrs(struct inode *inode, struct file *file);
2403 static const struct file_operations devstrs_fops = {
2404         .owner = THIS_MODULE,
2405         .open = sg_proc_open_devstrs,
2406         .read = seq_read,
2407         .llseek = seq_lseek,
2408         .release = seq_release,
2409 };
2410 static const struct seq_operations devstrs_seq_ops = {
2411         .start = dev_seq_start,
2412         .next  = dev_seq_next,
2413         .stop  = dev_seq_stop,
2414         .show  = sg_proc_seq_show_devstrs,
2415 };
2416
2417 static int sg_proc_seq_show_debug(struct seq_file *s, void *v);
2418 static int sg_proc_open_debug(struct inode *inode, struct file *file);
2419 static const struct file_operations debug_fops = {
2420         .owner = THIS_MODULE,
2421         .open = sg_proc_open_debug,
2422         .read = seq_read,
2423         .llseek = seq_lseek,
2424         .release = seq_release,
2425 };
2426 static const struct seq_operations debug_seq_ops = {
2427         .start = dev_seq_start,
2428         .next  = dev_seq_next,
2429         .stop  = dev_seq_stop,
2430         .show  = sg_proc_seq_show_debug,
2431 };
2432
2433
2434 struct sg_proc_leaf {
2435         const char * name;
2436         const struct file_operations * fops;
2437 };
2438
2439 static const struct sg_proc_leaf sg_proc_leaf_arr[] = {
2440         {"allow_dio", &adio_fops},
2441         {"debug", &debug_fops},
2442         {"def_reserved_size", &dressz_fops},
2443         {"device_hdr", &devhdr_fops},
2444         {"devices", &dev_fops},
2445         {"device_strs", &devstrs_fops},
2446         {"version", &version_fops}
2447 };
2448
2449 static int
2450 sg_proc_init(void)
2451 {
2452         int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr);
2453         int k;
2454
2455         sg_proc_sgp = proc_mkdir(sg_proc_sg_dirname, NULL);
2456         if (!sg_proc_sgp)
2457                 return 1;
2458         for (k = 0; k < num_leaves; ++k) {
2459                 const struct sg_proc_leaf *leaf = &sg_proc_leaf_arr[k];
2460                 umode_t mask = leaf->fops->write ? S_IRUGO | S_IWUSR : S_IRUGO;
2461                 proc_create(leaf->name, mask, sg_proc_sgp, leaf->fops);
2462         }
2463         return 0;
2464 }
2465
2466 static void
2467 sg_proc_cleanup(void)
2468 {
2469         int k;
2470         int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr);
2471
2472         if (!sg_proc_sgp)
2473                 return;
2474         for (k = 0; k < num_leaves; ++k)
2475                 remove_proc_entry(sg_proc_leaf_arr[k].name, sg_proc_sgp);
2476         remove_proc_entry(sg_proc_sg_dirname, NULL);
2477 }
2478
2479
2480 static int sg_proc_seq_show_int(struct seq_file *s, void *v)
2481 {
2482         seq_printf(s, "%d\n", *((int *)s->private));
2483         return 0;
2484 }
2485
2486 static int sg_proc_single_open_adio(struct inode *inode, struct file *file)
2487 {
2488         return single_open(file, sg_proc_seq_show_int, &sg_allow_dio);
2489 }
2490
2491 static ssize_t 
2492 sg_proc_write_adio(struct file *filp, const char __user *buffer,
2493                    size_t count, loff_t *off)
2494 {
2495         int err;
2496         unsigned long num;
2497
2498         if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
2499                 return -EACCES;
2500         err = kstrtoul_from_user(buffer, count, 0, &num);
2501         if (err)
2502                 return err;
2503         sg_allow_dio = num ? 1 : 0;
2504         return count;
2505 }
2506
2507 static int sg_proc_single_open_dressz(struct inode *inode, struct file *file)
2508 {
2509         return single_open(file, sg_proc_seq_show_int, &sg_big_buff);
2510 }
2511
2512 static ssize_t 
2513 sg_proc_write_dressz(struct file *filp, const char __user *buffer,
2514                      size_t count, loff_t *off)
2515 {
2516         int err;
2517         unsigned long k = ULONG_MAX;
2518
2519         if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
2520                 return -EACCES;
2521
2522         err = kstrtoul_from_user(buffer, count, 0, &k);
2523         if (err)
2524                 return err;
2525         if (k <= 1048576) {     /* limit "big buff" to 1 MB */
2526                 sg_big_buff = k;
2527                 return count;
2528         }
2529         return -ERANGE;
2530 }
2531
2532 static int sg_proc_seq_show_version(struct seq_file *s, void *v)
2533 {
2534         seq_printf(s, "%d\t%s [%s]\n", sg_version_num, SG_VERSION_STR,
2535                    sg_version_date);
2536         return 0;
2537 }
2538
2539 static int sg_proc_single_open_version(struct inode *inode, struct file *file)
2540 {
2541         return single_open(file, sg_proc_seq_show_version, NULL);
2542 }
2543
2544 static int sg_proc_seq_show_devhdr(struct seq_file *s, void *v)
2545 {
2546         seq_puts(s, "host\tchan\tid\tlun\ttype\topens\tqdepth\tbusy\tonline\n");
2547         return 0;
2548 }
2549
2550 static int sg_proc_single_open_devhdr(struct inode *inode, struct file *file)
2551 {
2552         return single_open(file, sg_proc_seq_show_devhdr, NULL);
2553 }
2554
2555 struct sg_proc_deviter {
2556         loff_t  index;
2557         size_t  max;
2558 };
2559
2560 static void * dev_seq_start(struct seq_file *s, loff_t *pos)
2561 {
2562         struct sg_proc_deviter * it = kmalloc(sizeof(*it), GFP_KERNEL);
2563
2564         s->private = it;
2565         if (! it)
2566                 return NULL;
2567
2568         it->index = *pos;
2569         it->max = sg_last_dev();
2570         if (it->index >= it->max)
2571                 return NULL;
2572         return it;
2573 }
2574
2575 static void * dev_seq_next(struct seq_file *s, void *v, loff_t *pos)
2576 {
2577         struct sg_proc_deviter * it = s->private;
2578
2579         *pos = ++it->index;
2580         return (it->index < it->max) ? it : NULL;
2581 }
2582
2583 static void dev_seq_stop(struct seq_file *s, void *v)
2584 {
2585         kfree(s->private);
2586 }
2587
2588 static int sg_proc_open_dev(struct inode *inode, struct file *file)
2589 {
2590         return seq_open(file, &dev_seq_ops);
2591 }
2592
2593 static int sg_proc_seq_show_dev(struct seq_file *s, void *v)
2594 {
2595         struct sg_proc_deviter * it = (struct sg_proc_deviter *) v;
2596         Sg_device *sdp;
2597         struct scsi_device *scsidp;
2598         unsigned long iflags;
2599
2600         read_lock_irqsave(&sg_index_lock, iflags);
2601         sdp = it ? sg_lookup_dev(it->index) : NULL;
2602         if ((NULL == sdp) || (NULL == sdp->device) ||
2603             (atomic_read(&sdp->detaching)))
2604                 seq_puts(s, "-1\t-1\t-1\t-1\t-1\t-1\t-1\t-1\t-1\n");
2605         else {
2606                 scsidp = sdp->device;
2607                 seq_printf(s, "%d\t%d\t%d\t%llu\t%d\t%d\t%d\t%d\t%d\n",
2608                               scsidp->host->host_no, scsidp->channel,
2609                               scsidp->id, scsidp->lun, (int) scsidp->type,
2610                               1,
2611                               (int) scsidp->queue_depth,
2612                               (int) atomic_read(&scsidp->device_busy),
2613                               (int) scsi_device_online(scsidp));
2614         }
2615         read_unlock_irqrestore(&sg_index_lock, iflags);
2616         return 0;
2617 }
2618
2619 static int sg_proc_open_devstrs(struct inode *inode, struct file *file)
2620 {
2621         return seq_open(file, &devstrs_seq_ops);
2622 }
2623
2624 static int sg_proc_seq_show_devstrs(struct seq_file *s, void *v)
2625 {
2626         struct sg_proc_deviter * it = (struct sg_proc_deviter *) v;
2627         Sg_device *sdp;
2628         struct scsi_device *scsidp;
2629         unsigned long iflags;
2630
2631         read_lock_irqsave(&sg_index_lock, iflags);
2632         sdp = it ? sg_lookup_dev(it->index) : NULL;
2633         scsidp = sdp ? sdp->device : NULL;
2634         if (sdp && scsidp && (!atomic_read(&sdp->detaching)))
2635                 seq_printf(s, "%8.8s\t%16.16s\t%4.4s\n",
2636                            scsidp->vendor, scsidp->model, scsidp->rev);
2637         else
2638                 seq_puts(s, "<no active device>\n");
2639         read_unlock_irqrestore(&sg_index_lock, iflags);
2640         return 0;
2641 }
2642
2643 /* must be called while holding sg_index_lock */
2644 static void sg_proc_debug_helper(struct seq_file *s, Sg_device * sdp)
2645 {
2646         int k, new_interface, blen, usg;
2647         Sg_request *srp;
2648         Sg_fd *fp;
2649         const sg_io_hdr_t *hp;
2650         const char * cp;
2651         unsigned int ms;
2652
2653         k = 0;
2654         list_for_each_entry(fp, &sdp->sfds, sfd_siblings) {
2655                 k++;
2656                 read_lock(&fp->rq_list_lock); /* irqs already disabled */
2657                 seq_printf(s, "   FD(%d): timeout=%dms bufflen=%d "
2658                            "(res)sgat=%d low_dma=%d\n", k,
2659                            jiffies_to_msecs(fp->timeout),
2660                            fp->reserve.bufflen,
2661                            (int) fp->reserve.k_use_sg,
2662                            (int) sdp->device->host->unchecked_isa_dma);
2663                 seq_printf(s, "   cmd_q=%d f_packid=%d k_orphan=%d closed=0\n",
2664                            (int) fp->cmd_q, (int) fp->force_packid,
2665                            (int) fp->keep_orphan);
2666                 list_for_each_entry(srp, &fp->rq_list, entry) {
2667                         hp = &srp->header;
2668                         new_interface = (hp->interface_id == '\0') ? 0 : 1;
2669                         if (srp->res_used) {
2670                                 if (new_interface &&
2671                                     (SG_FLAG_MMAP_IO & hp->flags))
2672                                         cp = "     mmap>> ";
2673                                 else
2674                                         cp = "     rb>> ";
2675                         } else {
2676                                 if (SG_INFO_DIRECT_IO_MASK & hp->info)
2677                                         cp = "     dio>> ";
2678                                 else
2679                                         cp = "     ";
2680                         }
2681                         seq_puts(s, cp);
2682                         blen = srp->data.bufflen;
2683                         usg = srp->data.k_use_sg;
2684                         seq_puts(s, srp->done ?
2685                                  ((1 == srp->done) ?  "rcv:" : "fin:")
2686                                   : "act:");
2687                         seq_printf(s, " id=%d blen=%d",
2688                                    srp->header.pack_id, blen);
2689                         if (srp->done)
2690                                 seq_printf(s, " dur=%d", hp->duration);
2691                         else {
2692                                 ms = jiffies_to_msecs(jiffies);
2693                                 seq_printf(s, " t_o/elap=%d/%d",
2694                                         (new_interface ? hp->timeout :
2695                                                   jiffies_to_msecs(fp->timeout)),
2696                                         (ms > hp->duration ? ms - hp->duration : 0));
2697                         }
2698                         seq_printf(s, "ms sgat=%d op=0x%02x\n", usg,
2699                                    (int) srp->data.cmd_opcode);
2700                 }
2701                 if (list_empty(&fp->rq_list))
2702                         seq_puts(s, "     No requests active\n");
2703                 read_unlock(&fp->rq_list_lock);
2704         }
2705 }
2706
2707 static int sg_proc_open_debug(struct inode *inode, struct file *file)
2708 {
2709         return seq_open(file, &debug_seq_ops);
2710 }
2711
2712 static int sg_proc_seq_show_debug(struct seq_file *s, void *v)
2713 {
2714         struct sg_proc_deviter * it = (struct sg_proc_deviter *) v;
2715         Sg_device *sdp;
2716         unsigned long iflags;
2717
2718         if (it && (0 == it->index))
2719                 seq_printf(s, "max_active_device=%d  def_reserved_size=%d\n",
2720                            (int)it->max, sg_big_buff);
2721
2722         read_lock_irqsave(&sg_index_lock, iflags);
2723         sdp = it ? sg_lookup_dev(it->index) : NULL;
2724         if (NULL == sdp)
2725                 goto skip;
2726         read_lock(&sdp->sfd_lock);
2727         if (!list_empty(&sdp->sfds)) {
2728                 seq_printf(s, " >>> device=%s ", sdp->disk->disk_name);
2729                 if (atomic_read(&sdp->detaching))
2730                         seq_puts(s, "detaching pending close ");
2731                 else if (sdp->device) {
2732                         struct scsi_device *scsidp = sdp->device;
2733
2734                         seq_printf(s, "%d:%d:%d:%llu   em=%d",
2735                                    scsidp->host->host_no,
2736                                    scsidp->channel, scsidp->id,
2737                                    scsidp->lun,
2738                                    scsidp->host->hostt->emulated);
2739                 }
2740                 seq_printf(s, " sg_tablesize=%d excl=%d open_cnt=%d\n",
2741                            sdp->sg_tablesize, sdp->exclude, sdp->open_cnt);
2742                 sg_proc_debug_helper(s, sdp);
2743         }
2744         read_unlock(&sdp->sfd_lock);
2745 skip:
2746         read_unlock_irqrestore(&sg_index_lock, iflags);
2747         return 0;
2748 }
2749
2750 #endif                          /* CONFIG_SCSI_PROC_FS */
2751
2752 module_init(init_sg);
2753 module_exit(exit_sg);