3 * Started: Aug 9 by Lawrence Foard (entropy@world.std.com),
4 * to allow user process control of SCSI devices.
5 * Development Sponsored by Killy Corp. NY NY
7 * Original driver (sg.c):
8 * Copyright (C) 1992 Lawrence Foard
9 * Version 2 and 3 extensions to driver:
10 * Copyright (C) 1998 - 2014 Douglas Gilbert
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
19 static int sg_version_num = 30536; /* 2 digits for each component */
20 #define SG_VERSION_STR "3.5.36"
23 * D. P. Gilbert (dgilbert@interlog.com), notes:
24 * - scsi logging is available via SCSI_LOG_TIMEOUT macros. First
25 * the kernel/module needs to be built with CONFIG_SCSI_LOGGING
26 * (otherwise the macros compile to empty statements).
29 #include <linux/module.h>
32 #include <linux/kernel.h>
33 #include <linux/sched.h>
34 #include <linux/string.h>
36 #include <linux/errno.h>
37 #include <linux/mtio.h>
38 #include <linux/ioctl.h>
39 #include <linux/slab.h>
40 #include <linux/fcntl.h>
41 #include <linux/init.h>
42 #include <linux/poll.h>
43 #include <linux/moduleparam.h>
44 #include <linux/cdev.h>
45 #include <linux/idr.h>
46 #include <linux/seq_file.h>
47 #include <linux/blkdev.h>
48 #include <linux/delay.h>
49 #include <linux/blktrace_api.h>
50 #include <linux/mutex.h>
51 #include <linux/atomic.h>
52 #include <linux/ratelimit.h>
53 #include <linux/uio.h>
56 #include <scsi/scsi_dbg.h>
57 #include <scsi/scsi_host.h>
58 #include <scsi/scsi_driver.h>
59 #include <scsi/scsi_ioctl.h>
62 #include "scsi_logging.h"
64 #ifdef CONFIG_SCSI_PROC_FS
65 #include <linux/proc_fs.h>
66 static char *sg_version_date = "20140603";
68 static int sg_proc_init(void);
71 #define SG_ALLOW_DIO_DEF 0
73 #define SG_MAX_DEVS 32768
75 /* SG_MAX_CDB_SIZE should be 260 (spc4r37 section 3.1.30) however the type
76 * of sg_io_hdr::cmd_len can only represent 255. All SCSI commands greater
77 * than 16 bytes are "variable length" whose length is a multiple of 4
79 #define SG_MAX_CDB_SIZE 252
81 #define SG_DEFAULT_TIMEOUT mult_frac(SG_DEFAULT_TIMEOUT_USER, HZ, USER_HZ)
83 int sg_big_buff = SG_DEF_RESERVED_SIZE;
84 /* N.B. This variable is readable and writeable via
85 /proc/scsi/sg/def_reserved_size . Each time sg_open() is called a buffer
86 of this size (or less if there is not enough memory) will be reserved
87 for use by this file descriptor. [Deprecated usage: this variable is also
88 readable via /proc/sys/kernel/sg-big-buff if the sg driver is built into
89 the kernel (i.e. it is not a module).] */
90 static int def_reserved_size = -1; /* picks up init parameter */
91 static int sg_allow_dio = SG_ALLOW_DIO_DEF;
93 static int scatter_elem_sz = SG_SCATTER_SZ;
94 static int scatter_elem_sz_prev = SG_SCATTER_SZ;
96 #define SG_SECTOR_SZ 512
98 static int sg_add_device(struct device *, struct class_interface *);
99 static void sg_remove_device(struct device *, struct class_interface *);
101 static DEFINE_IDR(sg_index_idr);
102 static DEFINE_RWLOCK(sg_index_lock); /* Also used to lock
103 file descriptor list for device */
105 static struct class_interface sg_interface = {
106 .add_dev = sg_add_device,
107 .remove_dev = sg_remove_device,
110 typedef struct sg_scatter_hold { /* holding area for scsi scatter gather info */
111 unsigned short k_use_sg; /* Count of kernel scatter-gather pieces */
112 unsigned sglist_len; /* size of malloc'd scatter-gather list ++ */
113 unsigned bufflen; /* Size of (aggregate) data buffer */
116 char dio_in_use; /* 0->indirect IO (or mmap), 1->dio */
117 unsigned char cmd_opcode; /* first byte of command */
120 struct sg_device; /* forward declarations */
123 typedef struct sg_request { /* SG_MAX_QUEUE requests outstanding per file */
124 struct list_head entry; /* list entry */
125 struct sg_fd *parentfp; /* NULL -> not in use */
126 Sg_scatter_hold data; /* hold buffer, perhaps scatter list */
127 sg_io_hdr_t header; /* scsi command+info, see <scsi/sg.h> */
128 unsigned char sense_b[SCSI_SENSE_BUFFERSIZE];
129 char res_used; /* 1 -> using reserve buffer, 0 -> not ... */
130 char orphan; /* 1 -> drop on sight, 0 -> normal */
131 char sg_io_owned; /* 1 -> packet belongs to SG_IO */
132 /* done protected by rq_list_lock */
133 char done; /* 0->before bh, 1->before read, 2->read */
136 struct execute_work ew;
139 typedef struct sg_fd { /* holds the state of a file descriptor */
140 struct list_head sfd_siblings; /* protected by device's sfd_lock */
141 struct sg_device *parentdp; /* owning device */
142 wait_queue_head_t read_wait; /* queue read until command done */
143 rwlock_t rq_list_lock; /* protect access to list in req_arr */
144 struct mutex f_mutex; /* protect against changes in this fd */
145 int timeout; /* defaults to SG_DEFAULT_TIMEOUT */
146 int timeout_user; /* defaults to SG_DEFAULT_TIMEOUT_USER */
147 Sg_scatter_hold reserve; /* buffer held for this file descriptor */
148 struct list_head rq_list; /* head of request list */
149 struct fasync_struct *async_qp; /* used by asynchronous notification */
150 Sg_request req_arr[SG_MAX_QUEUE]; /* used as singly-linked list */
151 char force_packid; /* 1 -> pack_id input to read(), 0 -> ignored */
152 char cmd_q; /* 1 -> allow command queuing, 0 -> don't */
153 unsigned char next_cmd_len; /* 0: automatic, >0: use on next write() */
154 char keep_orphan; /* 0 -> drop orphan (def), 1 -> keep for read() */
155 char mmap_called; /* 0 -> mmap() never called on this fd */
156 char res_in_use; /* 1 -> 'reserve' array in use */
158 struct execute_work ew;
161 typedef struct sg_device { /* holds the state of each scsi generic device */
162 struct scsi_device *device;
163 wait_queue_head_t open_wait; /* queue open() when O_EXCL present */
164 struct mutex open_rel_lock; /* held when in open() or release() */
165 int sg_tablesize; /* adapter's max scatter-gather table size */
166 u32 index; /* device index number */
167 struct list_head sfds;
168 rwlock_t sfd_lock; /* protect access to sfd list */
169 atomic_t detaching; /* 0->device usable, 1->device detaching */
170 bool exclude; /* 1->open(O_EXCL) succeeded and is active */
171 int open_cnt; /* count of opens (perhaps < num(sfds) ) */
172 char sgdebug; /* 0->off, 1->sense, 9->dump dev, 10-> all devs */
173 struct gendisk *disk;
174 struct cdev * cdev; /* char_dev [sysfs: /sys/cdev/major/sg<n>] */
178 /* tasklet or soft irq callback */
179 static void sg_rq_end_io(struct request *rq, blk_status_t status);
180 static int sg_start_req(Sg_request *srp, unsigned char *cmd);
181 static int sg_finish_rem_req(Sg_request * srp);
182 static int sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size);
183 static ssize_t sg_new_read(Sg_fd * sfp, char __user *buf, size_t count,
185 static ssize_t sg_new_write(Sg_fd *sfp, struct file *file,
186 const char __user *buf, size_t count, int blocking,
187 int read_only, int sg_io_owned, Sg_request **o_srp);
188 static int sg_common_write(Sg_fd * sfp, Sg_request * srp,
189 unsigned char *cmnd, int timeout, int blocking);
190 static int sg_read_oxfer(Sg_request * srp, char __user *outp, int num_read_xfer);
191 static void sg_remove_scat(Sg_fd * sfp, Sg_scatter_hold * schp);
192 static void sg_build_reserve(Sg_fd * sfp, int req_size);
193 static void sg_link_reserve(Sg_fd * sfp, Sg_request * srp, int size);
194 static void sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp);
195 static Sg_fd *sg_add_sfp(Sg_device * sdp);
196 static void sg_remove_sfp(struct kref *);
197 static Sg_request *sg_get_rq_mark(Sg_fd * sfp, int pack_id);
198 static Sg_request *sg_add_request(Sg_fd * sfp);
199 static int sg_remove_request(Sg_fd * sfp, Sg_request * srp);
200 static Sg_device *sg_get_dev(int dev);
201 static void sg_device_destroy(struct kref *kref);
203 #define SZ_SG_HEADER sizeof(struct sg_header)
204 #define SZ_SG_IO_HDR sizeof(sg_io_hdr_t)
205 #define SZ_SG_IOVEC sizeof(sg_iovec_t)
206 #define SZ_SG_REQ_INFO sizeof(sg_req_info_t)
208 #define sg_printk(prefix, sdp, fmt, a...) \
209 sdev_prefix_printk(prefix, (sdp)->device, \
210 (sdp)->disk->disk_name, fmt, ##a)
212 static int sg_allow_access(struct file *filp, unsigned char *cmd)
214 struct sg_fd *sfp = filp->private_data;
216 if (sfp->parentdp->device->type == TYPE_SCANNER)
219 return blk_verify_command(cmd, filp->f_mode);
223 open_wait(Sg_device *sdp, int flags)
227 if (flags & O_EXCL) {
228 while (sdp->open_cnt > 0) {
229 mutex_unlock(&sdp->open_rel_lock);
230 retval = wait_event_interruptible(sdp->open_wait,
231 (atomic_read(&sdp->detaching) ||
233 mutex_lock(&sdp->open_rel_lock);
235 if (retval) /* -ERESTARTSYS */
237 if (atomic_read(&sdp->detaching))
241 while (sdp->exclude) {
242 mutex_unlock(&sdp->open_rel_lock);
243 retval = wait_event_interruptible(sdp->open_wait,
244 (atomic_read(&sdp->detaching) ||
246 mutex_lock(&sdp->open_rel_lock);
248 if (retval) /* -ERESTARTSYS */
250 if (atomic_read(&sdp->detaching))
258 /* Returns 0 on success, else a negated errno value */
260 sg_open(struct inode *inode, struct file *filp)
262 int dev = iminor(inode);
263 int flags = filp->f_flags;
264 struct request_queue *q;
269 nonseekable_open(inode, filp);
270 if ((flags & O_EXCL) && (O_RDONLY == (flags & O_ACCMODE)))
271 return -EPERM; /* Can't lock it with read only access */
272 sdp = sg_get_dev(dev);
276 SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp,
277 "sg_open: flags=0x%x\n", flags));
279 /* This driver's module count bumped by fops_get in <linux/fs.h> */
280 /* Prevent the device driver from vanishing while we sleep */
281 retval = scsi_device_get(sdp->device);
285 retval = scsi_autopm_get_device(sdp->device);
289 /* scsi_block_when_processing_errors() may block so bypass
290 * check if O_NONBLOCK. Permits SCSI commands to be issued
291 * during error recovery. Tread carefully. */
292 if (!((flags & O_NONBLOCK) ||
293 scsi_block_when_processing_errors(sdp->device))) {
295 /* we are in error recovery for this device */
299 mutex_lock(&sdp->open_rel_lock);
300 if (flags & O_NONBLOCK) {
301 if (flags & O_EXCL) {
302 if (sdp->open_cnt > 0) {
304 goto error_mutex_locked;
309 goto error_mutex_locked;
313 retval = open_wait(sdp, flags);
314 if (retval) /* -ERESTARTSYS or -ENODEV */
315 goto error_mutex_locked;
318 /* N.B. at this point we are holding the open_rel_lock */
322 if (sdp->open_cnt < 1) { /* no existing opens */
324 q = sdp->device->request_queue;
325 sdp->sg_tablesize = queue_max_segments(q);
327 sfp = sg_add_sfp(sdp);
329 retval = PTR_ERR(sfp);
333 filp->private_data = sfp;
335 mutex_unlock(&sdp->open_rel_lock);
339 kref_put(&sdp->d_ref, sg_device_destroy);
343 if (flags & O_EXCL) {
344 sdp->exclude = false; /* undo if error */
345 wake_up_interruptible(&sdp->open_wait);
348 mutex_unlock(&sdp->open_rel_lock);
350 scsi_autopm_put_device(sdp->device);
352 scsi_device_put(sdp->device);
356 /* Release resources associated with a successful sg_open()
357 * Returns 0 on success, else a negated errno value */
359 sg_release(struct inode *inode, struct file *filp)
364 if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
366 SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp, "sg_release\n"));
368 mutex_lock(&sdp->open_rel_lock);
369 scsi_autopm_put_device(sdp->device);
370 kref_put(&sfp->f_ref, sg_remove_sfp);
373 /* possibly many open()s waiting on exlude clearing, start many;
374 * only open(O_EXCL)s wait on 0==open_cnt so only start one */
376 sdp->exclude = false;
377 wake_up_interruptible_all(&sdp->open_wait);
378 } else if (0 == sdp->open_cnt) {
379 wake_up_interruptible(&sdp->open_wait);
381 mutex_unlock(&sdp->open_rel_lock);
386 sg_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos)
391 int req_pack_id = -1;
393 struct sg_header *old_hdr = NULL;
396 if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
398 SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp,
399 "sg_read: count=%d\n", (int) count));
401 if (!access_ok(VERIFY_WRITE, buf, count))
403 if (sfp->force_packid && (count >= SZ_SG_HEADER)) {
404 old_hdr = kmalloc(SZ_SG_HEADER, GFP_KERNEL);
407 if (__copy_from_user(old_hdr, buf, SZ_SG_HEADER)) {
411 if (old_hdr->reply_len < 0) {
412 if (count >= SZ_SG_IO_HDR) {
413 sg_io_hdr_t *new_hdr;
414 new_hdr = kmalloc(SZ_SG_IO_HDR, GFP_KERNEL);
419 retval =__copy_from_user
420 (new_hdr, buf, SZ_SG_IO_HDR);
421 req_pack_id = new_hdr->pack_id;
429 req_pack_id = old_hdr->pack_id;
431 srp = sg_get_rq_mark(sfp, req_pack_id);
432 if (!srp) { /* now wait on packet to arrive */
433 if (atomic_read(&sdp->detaching)) {
437 if (filp->f_flags & O_NONBLOCK) {
441 retval = wait_event_interruptible(sfp->read_wait,
442 (atomic_read(&sdp->detaching) ||
443 (srp = sg_get_rq_mark(sfp, req_pack_id))));
444 if (atomic_read(&sdp->detaching)) {
449 /* -ERESTARTSYS as signal hit process */
453 if (srp->header.interface_id != '\0') {
454 retval = sg_new_read(sfp, buf, count, srp);
459 if (old_hdr == NULL) {
460 old_hdr = kmalloc(SZ_SG_HEADER, GFP_KERNEL);
466 memset(old_hdr, 0, SZ_SG_HEADER);
467 old_hdr->reply_len = (int) hp->timeout;
468 old_hdr->pack_len = old_hdr->reply_len; /* old, strange behaviour */
469 old_hdr->pack_id = hp->pack_id;
470 old_hdr->twelve_byte =
471 ((srp->data.cmd_opcode >= 0xc0) && (12 == hp->cmd_len)) ? 1 : 0;
472 old_hdr->target_status = hp->masked_status;
473 old_hdr->host_status = hp->host_status;
474 old_hdr->driver_status = hp->driver_status;
475 if ((CHECK_CONDITION & hp->masked_status) ||
476 (DRIVER_SENSE & hp->driver_status))
477 memcpy(old_hdr->sense_buffer, srp->sense_b,
478 sizeof (old_hdr->sense_buffer));
479 switch (hp->host_status) {
480 /* This setup of 'result' is for backward compatibility and is best
481 ignored by the user who should use target, host + driver status */
483 case DID_PASSTHROUGH:
490 old_hdr->result = EBUSY;
497 old_hdr->result = EIO;
500 old_hdr->result = (srp->sense_b[0] == 0 &&
501 hp->masked_status == GOOD) ? 0 : EIO;
504 old_hdr->result = EIO;
508 /* Now copy the result back to the user buffer. */
509 if (count >= SZ_SG_HEADER) {
510 if (__copy_to_user(buf, old_hdr, SZ_SG_HEADER)) {
515 if (count > old_hdr->reply_len)
516 count = old_hdr->reply_len;
517 if (count > SZ_SG_HEADER) {
518 if (sg_read_oxfer(srp, buf, count - SZ_SG_HEADER)) {
524 count = (old_hdr->result == 0) ? 0 : -EIO;
525 sg_finish_rem_req(srp);
526 sg_remove_request(sfp, srp);
534 sg_new_read(Sg_fd * sfp, char __user *buf, size_t count, Sg_request * srp)
536 sg_io_hdr_t *hp = &srp->header;
540 if (count < SZ_SG_IO_HDR) {
545 if ((hp->mx_sb_len > 0) && hp->sbp) {
546 if ((CHECK_CONDITION & hp->masked_status) ||
547 (DRIVER_SENSE & hp->driver_status)) {
548 int sb_len = SCSI_SENSE_BUFFERSIZE;
549 sb_len = (hp->mx_sb_len > sb_len) ? sb_len : hp->mx_sb_len;
550 len = 8 + (int) srp->sense_b[7]; /* Additional sense length field */
551 len = (len > sb_len) ? sb_len : len;
552 if (copy_to_user(hp->sbp, srp->sense_b, len)) {
559 if (hp->masked_status || hp->host_status || hp->driver_status)
560 hp->info |= SG_INFO_CHECK;
561 if (copy_to_user(buf, hp, SZ_SG_IO_HDR)) {
566 err2 = sg_finish_rem_req(srp);
567 sg_remove_request(sfp, srp);
568 return err ? : err2 ? : count;
572 sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
574 int mxsize, cmd_size, k;
575 int input_size, blocking;
576 unsigned char opcode;
580 struct sg_header old_hdr;
582 unsigned char cmnd[SG_MAX_CDB_SIZE];
584 if (unlikely(uaccess_kernel()))
587 if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
589 SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp,
590 "sg_write: count=%d\n", (int) count));
591 if (atomic_read(&sdp->detaching))
593 if (!((filp->f_flags & O_NONBLOCK) ||
594 scsi_block_when_processing_errors(sdp->device)))
597 if (!access_ok(VERIFY_READ, buf, count))
598 return -EFAULT; /* protects following copy_from_user()s + get_user()s */
599 if (count < SZ_SG_HEADER)
601 if (__copy_from_user(&old_hdr, buf, SZ_SG_HEADER))
603 blocking = !(filp->f_flags & O_NONBLOCK);
604 if (old_hdr.reply_len < 0)
605 return sg_new_write(sfp, filp, buf, count,
606 blocking, 0, 0, NULL);
607 if (count < (SZ_SG_HEADER + 6))
608 return -EIO; /* The minimum scsi command length is 6 bytes. */
610 if (!(srp = sg_add_request(sfp))) {
611 SCSI_LOG_TIMEOUT(1, sg_printk(KERN_INFO, sdp,
612 "sg_write: queue full\n"));
616 __get_user(opcode, buf);
617 mutex_lock(&sfp->f_mutex);
618 if (sfp->next_cmd_len > 0) {
619 cmd_size = sfp->next_cmd_len;
620 sfp->next_cmd_len = 0; /* reset so only this write() effected */
622 cmd_size = COMMAND_SIZE(opcode); /* based on SCSI command group */
623 if ((opcode >= 0xc0) && old_hdr.twelve_byte)
626 mutex_unlock(&sfp->f_mutex);
627 SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sdp,
628 "sg_write: scsi opcode=0x%02x, cmd_size=%d\n", (int) opcode, cmd_size));
629 /* Determine buffer size. */
630 input_size = count - cmd_size;
631 mxsize = (input_size > old_hdr.reply_len) ? input_size : old_hdr.reply_len;
632 mxsize -= SZ_SG_HEADER;
633 input_size -= SZ_SG_HEADER;
634 if (input_size < 0) {
635 sg_remove_request(sfp, srp);
636 return -EIO; /* User did not pass enough bytes for this command. */
639 hp->interface_id = '\0'; /* indicator of old interface tunnelled */
640 hp->cmd_len = (unsigned char) cmd_size;
644 hp->dxfer_direction = (old_hdr.reply_len > SZ_SG_HEADER) ?
645 SG_DXFER_TO_FROM_DEV : SG_DXFER_TO_DEV;
647 hp->dxfer_direction = (mxsize > 0) ? SG_DXFER_FROM_DEV : SG_DXFER_NONE;
648 hp->dxfer_len = mxsize;
649 if ((hp->dxfer_direction == SG_DXFER_TO_DEV) ||
650 (hp->dxfer_direction == SG_DXFER_TO_FROM_DEV))
651 hp->dxferp = (char __user *)buf + cmd_size;
655 hp->timeout = old_hdr.reply_len; /* structure abuse ... */
656 hp->flags = input_size; /* structure abuse ... */
657 hp->pack_id = old_hdr.pack_id;
659 if (__copy_from_user(cmnd, buf, cmd_size))
662 * SG_DXFER_TO_FROM_DEV is functionally equivalent to SG_DXFER_FROM_DEV,
663 * but is is possible that the app intended SG_DXFER_TO_DEV, because there
664 * is a non-zero input_size, so emit a warning.
666 if (hp->dxfer_direction == SG_DXFER_TO_FROM_DEV) {
667 printk_ratelimited(KERN_WARNING
668 "sg_write: data in/out %d/%d bytes "
669 "for SCSI command 0x%x-- guessing "
670 "data in;\n program %s not setting "
671 "count and/or reply_len properly\n",
672 old_hdr.reply_len - (int)SZ_SG_HEADER,
673 input_size, (unsigned int) cmnd[0],
676 k = sg_common_write(sfp, srp, cmnd, sfp->timeout, blocking);
677 return (k < 0) ? k : count;
681 sg_new_write(Sg_fd *sfp, struct file *file, const char __user *buf,
682 size_t count, int blocking, int read_only, int sg_io_owned,
688 unsigned char cmnd[SG_MAX_CDB_SIZE];
690 unsigned long ul_timeout;
692 if (count < SZ_SG_IO_HDR)
694 if (!access_ok(VERIFY_READ, buf, count))
695 return -EFAULT; /* protects following copy_from_user()s + get_user()s */
697 sfp->cmd_q = 1; /* when sg_io_hdr seen, set command queuing on */
698 if (!(srp = sg_add_request(sfp))) {
699 SCSI_LOG_TIMEOUT(1, sg_printk(KERN_INFO, sfp->parentdp,
700 "sg_new_write: queue full\n"));
703 srp->sg_io_owned = sg_io_owned;
705 if (__copy_from_user(hp, buf, SZ_SG_IO_HDR)) {
706 sg_remove_request(sfp, srp);
709 if (hp->interface_id != 'S') {
710 sg_remove_request(sfp, srp);
713 if (hp->flags & SG_FLAG_MMAP_IO) {
714 if (hp->dxfer_len > sfp->reserve.bufflen) {
715 sg_remove_request(sfp, srp);
716 return -ENOMEM; /* MMAP_IO size must fit in reserve buffer */
718 if (hp->flags & SG_FLAG_DIRECT_IO) {
719 sg_remove_request(sfp, srp);
720 return -EINVAL; /* either MMAP_IO or DIRECT_IO (not both) */
722 if (sfp->res_in_use) {
723 sg_remove_request(sfp, srp);
724 return -EBUSY; /* reserve buffer already being used */
727 ul_timeout = msecs_to_jiffies(srp->header.timeout);
728 timeout = (ul_timeout < INT_MAX) ? ul_timeout : INT_MAX;
729 if ((!hp->cmdp) || (hp->cmd_len < 6) || (hp->cmd_len > sizeof (cmnd))) {
730 sg_remove_request(sfp, srp);
733 if (!access_ok(VERIFY_READ, hp->cmdp, hp->cmd_len)) {
734 sg_remove_request(sfp, srp);
735 return -EFAULT; /* protects following copy_from_user()s + get_user()s */
737 if (__copy_from_user(cmnd, hp->cmdp, hp->cmd_len)) {
738 sg_remove_request(sfp, srp);
741 if (read_only && sg_allow_access(file, cmnd)) {
742 sg_remove_request(sfp, srp);
745 k = sg_common_write(sfp, srp, cmnd, timeout, blocking);
754 sg_common_write(Sg_fd * sfp, Sg_request * srp,
755 unsigned char *cmnd, int timeout, int blocking)
758 Sg_device *sdp = sfp->parentdp;
759 sg_io_hdr_t *hp = &srp->header;
761 srp->data.cmd_opcode = cmnd[0]; /* hold opcode of command */
763 hp->masked_status = 0;
767 hp->driver_status = 0;
769 SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp,
770 "sg_common_write: scsi opcode=0x%02x, cmd_size=%d\n",
771 (int) cmnd[0], (int) hp->cmd_len));
773 if (hp->dxfer_len >= SZ_256M)
776 k = sg_start_req(srp, cmnd);
778 SCSI_LOG_TIMEOUT(1, sg_printk(KERN_INFO, sfp->parentdp,
779 "sg_common_write: start_req err=%d\n", k));
780 sg_finish_rem_req(srp);
781 sg_remove_request(sfp, srp);
782 return k; /* probably out of space --> ENOMEM */
784 if (atomic_read(&sdp->detaching)) {
786 scsi_req_free_cmd(scsi_req(srp->rq));
787 blk_end_request_all(srp->rq, BLK_STS_IOERR);
791 sg_finish_rem_req(srp);
792 sg_remove_request(sfp, srp);
796 hp->duration = jiffies_to_msecs(jiffies);
797 if (hp->interface_id != '\0' && /* v3 (or later) interface */
798 (SG_FLAG_Q_AT_TAIL & hp->flags))
803 srp->rq->timeout = timeout;
804 kref_get(&sfp->f_ref); /* sg_rq_end_io() does kref_put(). */
805 blk_execute_rq_nowait(sdp->device->request_queue, sdp->disk,
806 srp->rq, at_head, sg_rq_end_io);
810 static int srp_done(Sg_fd *sfp, Sg_request *srp)
815 read_lock_irqsave(&sfp->rq_list_lock, flags);
817 read_unlock_irqrestore(&sfp->rq_list_lock, flags);
821 static int max_sectors_bytes(struct request_queue *q)
823 unsigned int max_sectors = queue_max_sectors(q);
825 max_sectors = min_t(unsigned int, max_sectors, INT_MAX >> 9);
827 return max_sectors << 9;
831 sg_fill_request_table(Sg_fd *sfp, sg_req_info_t *rinfo)
838 list_for_each_entry(srp, &sfp->rq_list, entry) {
839 if (val >= SG_MAX_QUEUE)
841 rinfo[val].req_state = srp->done + 1;
843 srp->header.masked_status &
844 srp->header.host_status &
845 srp->header.driver_status;
847 rinfo[val].duration =
848 srp->header.duration;
850 ms = jiffies_to_msecs(jiffies);
851 rinfo[val].duration =
852 (ms > srp->header.duration) ?
853 (ms - srp->header.duration) : 0;
855 rinfo[val].orphan = srp->orphan;
856 rinfo[val].sg_io_owned = srp->sg_io_owned;
857 rinfo[val].pack_id = srp->header.pack_id;
858 rinfo[val].usr_ptr = srp->header.usr_ptr;
864 sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
866 void __user *p = (void __user *)arg;
868 int result, val, read_only;
872 unsigned long iflags;
874 if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
877 SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp,
878 "sg_ioctl: cmd=0x%x\n", (int) cmd_in));
879 read_only = (O_RDWR != (filp->f_flags & O_ACCMODE));
883 if (atomic_read(&sdp->detaching))
885 if (!scsi_block_when_processing_errors(sdp->device))
887 if (!access_ok(VERIFY_WRITE, p, SZ_SG_IO_HDR))
889 result = sg_new_write(sfp, filp, p, SZ_SG_IO_HDR,
890 1, read_only, 1, &srp);
893 result = wait_event_interruptible(sfp->read_wait,
894 (srp_done(sfp, srp) || atomic_read(&sdp->detaching)));
895 if (atomic_read(&sdp->detaching))
897 write_lock_irq(&sfp->rq_list_lock);
900 write_unlock_irq(&sfp->rq_list_lock);
901 result = sg_new_read(sfp, p, SZ_SG_IO_HDR, srp);
902 return (result < 0) ? result : 0;
905 write_unlock_irq(&sfp->rq_list_lock);
906 return result; /* -ERESTARTSYS because signal hit process */
908 result = get_user(val, ip);
913 if (val >= mult_frac((s64)INT_MAX, USER_HZ, HZ))
914 val = min_t(s64, mult_frac((s64)INT_MAX, USER_HZ, HZ),
916 sfp->timeout_user = val;
917 sfp->timeout = mult_frac(val, HZ, USER_HZ);
920 case SG_GET_TIMEOUT: /* N.B. User receives timeout as return value */
921 /* strange ..., for backward compatibility */
922 return sfp->timeout_user;
923 case SG_SET_FORCE_LOW_DMA:
925 * N.B. This ioctl never worked properly, but failed to
926 * return an error value. So returning '0' to keep compability
927 * with legacy applications.
931 return put_user((int) sdp->device->host->unchecked_isa_dma, ip);
933 if (!access_ok(VERIFY_WRITE, p, sizeof (sg_scsi_id_t)))
936 sg_scsi_id_t __user *sg_idp = p;
938 if (atomic_read(&sdp->detaching))
940 __put_user((int) sdp->device->host->host_no,
942 __put_user((int) sdp->device->channel,
944 __put_user((int) sdp->device->id, &sg_idp->scsi_id);
945 __put_user((int) sdp->device->lun, &sg_idp->lun);
946 __put_user((int) sdp->device->type, &sg_idp->scsi_type);
947 __put_user((short) sdp->device->host->cmd_per_lun,
948 &sg_idp->h_cmd_per_lun);
949 __put_user((short) sdp->device->queue_depth,
950 &sg_idp->d_queue_depth);
951 __put_user(0, &sg_idp->unused[0]);
952 __put_user(0, &sg_idp->unused[1]);
955 case SG_SET_FORCE_PACK_ID:
956 result = get_user(val, ip);
959 sfp->force_packid = val ? 1 : 0;
962 if (!access_ok(VERIFY_WRITE, ip, sizeof (int)))
964 read_lock_irqsave(&sfp->rq_list_lock, iflags);
965 list_for_each_entry(srp, &sfp->rq_list, entry) {
966 if ((1 == srp->done) && (!srp->sg_io_owned)) {
967 read_unlock_irqrestore(&sfp->rq_list_lock,
969 __put_user(srp->header.pack_id, ip);
973 read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
976 case SG_GET_NUM_WAITING:
977 read_lock_irqsave(&sfp->rq_list_lock, iflags);
979 list_for_each_entry(srp, &sfp->rq_list, entry) {
980 if ((1 == srp->done) && (!srp->sg_io_owned))
983 read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
984 return put_user(val, ip);
985 case SG_GET_SG_TABLESIZE:
986 return put_user(sdp->sg_tablesize, ip);
987 case SG_SET_RESERVED_SIZE:
988 result = get_user(val, ip);
993 val = min_t(int, val,
994 max_sectors_bytes(sdp->device->request_queue));
995 mutex_lock(&sfp->f_mutex);
996 if (val != sfp->reserve.bufflen) {
997 if (sfp->mmap_called ||
999 mutex_unlock(&sfp->f_mutex);
1003 sg_remove_scat(sfp, &sfp->reserve);
1004 sg_build_reserve(sfp, val);
1006 mutex_unlock(&sfp->f_mutex);
1008 case SG_GET_RESERVED_SIZE:
1009 val = min_t(int, sfp->reserve.bufflen,
1010 max_sectors_bytes(sdp->device->request_queue));
1011 return put_user(val, ip);
1012 case SG_SET_COMMAND_Q:
1013 result = get_user(val, ip);
1016 sfp->cmd_q = val ? 1 : 0;
1018 case SG_GET_COMMAND_Q:
1019 return put_user((int) sfp->cmd_q, ip);
1020 case SG_SET_KEEP_ORPHAN:
1021 result = get_user(val, ip);
1024 sfp->keep_orphan = val;
1026 case SG_GET_KEEP_ORPHAN:
1027 return put_user((int) sfp->keep_orphan, ip);
1028 case SG_NEXT_CMD_LEN:
1029 result = get_user(val, ip);
1032 if (val > SG_MAX_CDB_SIZE)
1034 sfp->next_cmd_len = (val > 0) ? val : 0;
1036 case SG_GET_VERSION_NUM:
1037 return put_user(sg_version_num, ip);
1038 case SG_GET_ACCESS_COUNT:
1039 /* faked - we don't have a real access count anymore */
1040 val = (sdp->device ? 1 : 0);
1041 return put_user(val, ip);
1042 case SG_GET_REQUEST_TABLE:
1043 if (!access_ok(VERIFY_WRITE, p, SZ_SG_REQ_INFO * SG_MAX_QUEUE))
1046 sg_req_info_t *rinfo;
1048 rinfo = kcalloc(SG_MAX_QUEUE, SZ_SG_REQ_INFO,
1052 read_lock_irqsave(&sfp->rq_list_lock, iflags);
1053 sg_fill_request_table(sfp, rinfo);
1054 read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
1055 result = __copy_to_user(p, rinfo,
1056 SZ_SG_REQ_INFO * SG_MAX_QUEUE);
1057 result = result ? -EFAULT : 0;
1061 case SG_EMULATED_HOST:
1062 if (atomic_read(&sdp->detaching))
1064 return put_user(sdp->device->host->hostt->emulated, ip);
1065 case SCSI_IOCTL_SEND_COMMAND:
1066 if (atomic_read(&sdp->detaching))
1069 unsigned char opcode = WRITE_6;
1070 Scsi_Ioctl_Command __user *siocp = p;
1072 if (copy_from_user(&opcode, siocp->data, 1))
1074 if (sg_allow_access(filp, &opcode))
1077 return sg_scsi_ioctl(sdp->device->request_queue, NULL, filp->f_mode, p);
1079 result = get_user(val, ip);
1082 sdp->sgdebug = (char) val;
1085 return put_user(max_sectors_bytes(sdp->device->request_queue),
1088 return blk_trace_setup(sdp->device->request_queue,
1089 sdp->disk->disk_name,
1090 MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
1093 return blk_trace_startstop(sdp->device->request_queue, 1);
1095 return blk_trace_startstop(sdp->device->request_queue, 0);
1096 case BLKTRACETEARDOWN:
1097 return blk_trace_remove(sdp->device->request_queue);
1098 case SCSI_IOCTL_GET_IDLUN:
1099 case SCSI_IOCTL_GET_BUS_NUMBER:
1100 case SCSI_IOCTL_PROBE_HOST:
1101 case SG_GET_TRANSFORM:
1103 if (atomic_read(&sdp->detaching))
1108 return -EPERM; /* don't know so take safe approach */
1112 result = scsi_ioctl_block_when_processing_errors(sdp->device,
1113 cmd_in, filp->f_flags & O_NDELAY);
1116 return scsi_ioctl(sdp->device, cmd_in, p);
1119 #ifdef CONFIG_COMPAT
1120 static long sg_compat_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
1124 struct scsi_device *sdev;
1126 if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
1130 if (sdev->host->hostt->compat_ioctl) {
1133 ret = sdev->host->hostt->compat_ioctl(sdev, cmd_in, (void __user *)arg);
1138 return -ENOIOCTLCMD;
1143 sg_poll(struct file *filp, poll_table * wait)
1150 unsigned long iflags;
1152 sfp = filp->private_data;
1155 sdp = sfp->parentdp;
1158 poll_wait(filp, &sfp->read_wait, wait);
1159 read_lock_irqsave(&sfp->rq_list_lock, iflags);
1160 list_for_each_entry(srp, &sfp->rq_list, entry) {
1161 /* if any read waiting, flag it */
1162 if ((0 == res) && (1 == srp->done) && (!srp->sg_io_owned))
1163 res = EPOLLIN | EPOLLRDNORM;
1166 read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
1168 if (atomic_read(&sdp->detaching))
1170 else if (!sfp->cmd_q) {
1172 res |= EPOLLOUT | EPOLLWRNORM;
1173 } else if (count < SG_MAX_QUEUE)
1174 res |= EPOLLOUT | EPOLLWRNORM;
1175 SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp,
1176 "sg_poll: res=0x%x\n", (__force u32) res));
1181 sg_fasync(int fd, struct file *filp, int mode)
1186 if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
1188 SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp,
1189 "sg_fasync: mode=%d\n", mode));
1191 return fasync_helper(fd, filp, mode, &sfp->async_qp);
1195 sg_vma_fault(struct vm_fault *vmf)
1197 struct vm_area_struct *vma = vmf->vma;
1199 unsigned long offset, len, sa;
1200 Sg_scatter_hold *rsv_schp;
1203 if ((NULL == vma) || (!(sfp = (Sg_fd *) vma->vm_private_data)))
1204 return VM_FAULT_SIGBUS;
1205 rsv_schp = &sfp->reserve;
1206 offset = vmf->pgoff << PAGE_SHIFT;
1207 if (offset >= rsv_schp->bufflen)
1208 return VM_FAULT_SIGBUS;
1209 SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sfp->parentdp,
1210 "sg_vma_fault: offset=%lu, scatg=%d\n",
1211 offset, rsv_schp->k_use_sg));
1213 length = 1 << (PAGE_SHIFT + rsv_schp->page_order);
1214 for (k = 0; k < rsv_schp->k_use_sg && sa < vma->vm_end; k++) {
1215 len = vma->vm_end - sa;
1216 len = (len < length) ? len : length;
1218 struct page *page = nth_page(rsv_schp->pages[k],
1219 offset >> PAGE_SHIFT);
1220 get_page(page); /* increment page count */
1222 return 0; /* success */
1228 return VM_FAULT_SIGBUS;
1231 static const struct vm_operations_struct sg_mmap_vm_ops = {
1232 .fault = sg_vma_fault,
1236 sg_mmap(struct file *filp, struct vm_area_struct *vma)
1239 unsigned long req_sz, len, sa;
1240 Sg_scatter_hold *rsv_schp;
1244 if ((!filp) || (!vma) || (!(sfp = (Sg_fd *) filp->private_data)))
1246 req_sz = vma->vm_end - vma->vm_start;
1247 SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sfp->parentdp,
1248 "sg_mmap starting, vm_start=%p, len=%d\n",
1249 (void *) vma->vm_start, (int) req_sz));
1251 return -EINVAL; /* want no offset */
1252 rsv_schp = &sfp->reserve;
1253 mutex_lock(&sfp->f_mutex);
1254 if (req_sz > rsv_schp->bufflen) {
1255 ret = -ENOMEM; /* cannot map more than reserved buffer */
1260 length = 1 << (PAGE_SHIFT + rsv_schp->page_order);
1261 for (k = 0; k < rsv_schp->k_use_sg && sa < vma->vm_end; k++) {
1262 len = vma->vm_end - sa;
1263 len = (len < length) ? len : length;
1267 sfp->mmap_called = 1;
1268 vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
1269 vma->vm_private_data = sfp;
1270 vma->vm_ops = &sg_mmap_vm_ops;
1272 mutex_unlock(&sfp->f_mutex);
1277 sg_rq_end_io_usercontext(struct work_struct *work)
1279 struct sg_request *srp = container_of(work, struct sg_request, ew.work);
1280 struct sg_fd *sfp = srp->parentfp;
1282 sg_finish_rem_req(srp);
1283 sg_remove_request(sfp, srp);
1284 kref_put(&sfp->f_ref, sg_remove_sfp);
1288 * This function is a "bottom half" handler that is called by the mid
1289 * level when a command is completed (or has failed).
1292 sg_rq_end_io(struct request *rq, blk_status_t status)
1294 struct sg_request *srp = rq->end_io_data;
1295 struct scsi_request *req = scsi_req(rq);
1298 unsigned long iflags;
1301 int result, resid, done = 1;
1303 if (WARN_ON(srp->done != 0))
1306 sfp = srp->parentfp;
1307 if (WARN_ON(sfp == NULL))
1310 sdp = sfp->parentdp;
1311 if (unlikely(atomic_read(&sdp->detaching)))
1312 pr_info("%s: device detaching\n", __func__);
1315 result = req->result;
1316 resid = req->resid_len;
1318 SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sdp,
1319 "sg_cmd_done: pack_id=%d, res=0x%x\n",
1320 srp->header.pack_id, result));
1321 srp->header.resid = resid;
1322 ms = jiffies_to_msecs(jiffies);
1323 srp->header.duration = (ms > srp->header.duration) ?
1324 (ms - srp->header.duration) : 0;
1326 struct scsi_sense_hdr sshdr;
1328 srp->header.status = 0xff & result;
1329 srp->header.masked_status = status_byte(result);
1330 srp->header.msg_status = msg_byte(result);
1331 srp->header.host_status = host_byte(result);
1332 srp->header.driver_status = driver_byte(result);
1333 if ((sdp->sgdebug > 0) &&
1334 ((CHECK_CONDITION == srp->header.masked_status) ||
1335 (COMMAND_TERMINATED == srp->header.masked_status)))
1336 __scsi_print_sense(sdp->device, __func__, sense,
1337 SCSI_SENSE_BUFFERSIZE);
1339 /* Following if statement is a patch supplied by Eric Youngdale */
1340 if (driver_byte(result) != 0
1341 && scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, &sshdr)
1342 && !scsi_sense_is_deferred(&sshdr)
1343 && sshdr.sense_key == UNIT_ATTENTION
1344 && sdp->device->removable) {
1345 /* Detected possible disc change. Set the bit - this */
1346 /* may be used if there are filesystems using this device */
1347 sdp->device->changed = 1;
1352 memcpy(srp->sense_b, req->sense, SCSI_SENSE_BUFFERSIZE);
1354 /* Rely on write phase to clean out srp status values, so no "else" */
1357 * Free the request as soon as it is complete so that its resources
1358 * can be reused without waiting for userspace to read() the
1359 * result. But keep the associated bio (if any) around until
1360 * blk_rq_unmap_user() can be called from user context.
1363 scsi_req_free_cmd(scsi_req(rq));
1364 __blk_put_request(rq->q, rq);
1366 write_lock_irqsave(&sfp->rq_list_lock, iflags);
1367 if (unlikely(srp->orphan)) {
1368 if (sfp->keep_orphan)
1369 srp->sg_io_owned = 0;
1374 write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
1377 /* Now wake up any sg_read() that is waiting for this
1380 wake_up_interruptible(&sfp->read_wait);
1381 kill_fasync(&sfp->async_qp, SIGPOLL, POLL_IN);
1382 kref_put(&sfp->f_ref, sg_remove_sfp);
1384 INIT_WORK(&srp->ew.work, sg_rq_end_io_usercontext);
1385 schedule_work(&srp->ew.work);
1389 static const struct file_operations sg_fops = {
1390 .owner = THIS_MODULE,
1394 .unlocked_ioctl = sg_ioctl,
1395 #ifdef CONFIG_COMPAT
1396 .compat_ioctl = sg_compat_ioctl,
1400 .release = sg_release,
1401 .fasync = sg_fasync,
1402 .llseek = no_llseek,
1405 static struct class *sg_sysfs_class;
1407 static int sg_sysfs_valid = 0;
1410 sg_alloc(struct gendisk *disk, struct scsi_device *scsidp)
1412 struct request_queue *q = scsidp->request_queue;
1414 unsigned long iflags;
1418 sdp = kzalloc(sizeof(Sg_device), GFP_KERNEL);
1420 sdev_printk(KERN_WARNING, scsidp, "%s: kmalloc Sg_device "
1421 "failure\n", __func__);
1422 return ERR_PTR(-ENOMEM);
1425 idr_preload(GFP_KERNEL);
1426 write_lock_irqsave(&sg_index_lock, iflags);
1428 error = idr_alloc(&sg_index_idr, sdp, 0, SG_MAX_DEVS, GFP_NOWAIT);
1430 if (error == -ENOSPC) {
1431 sdev_printk(KERN_WARNING, scsidp,
1432 "Unable to attach sg device type=%d, minor number exceeds %d\n",
1433 scsidp->type, SG_MAX_DEVS - 1);
1436 sdev_printk(KERN_WARNING, scsidp, "%s: idr "
1437 "allocation Sg_device failure: %d\n",
1444 SCSI_LOG_TIMEOUT(3, sdev_printk(KERN_INFO, scsidp,
1445 "sg_alloc: dev=%d \n", k));
1446 sprintf(disk->disk_name, "sg%d", k);
1447 disk->first_minor = k;
1449 sdp->device = scsidp;
1450 mutex_init(&sdp->open_rel_lock);
1451 INIT_LIST_HEAD(&sdp->sfds);
1452 init_waitqueue_head(&sdp->open_wait);
1453 atomic_set(&sdp->detaching, 0);
1454 rwlock_init(&sdp->sfd_lock);
1455 sdp->sg_tablesize = queue_max_segments(q);
1457 kref_init(&sdp->d_ref);
1461 write_unlock_irqrestore(&sg_index_lock, iflags);
1466 return ERR_PTR(error);
1472 sg_add_device(struct device *cl_dev, struct class_interface *cl_intf)
1474 struct scsi_device *scsidp = to_scsi_device(cl_dev->parent);
1475 struct gendisk *disk;
1476 Sg_device *sdp = NULL;
1477 struct cdev * cdev = NULL;
1479 unsigned long iflags;
1481 disk = alloc_disk(1);
1483 pr_warn("%s: alloc_disk failed\n", __func__);
1486 disk->major = SCSI_GENERIC_MAJOR;
1489 cdev = cdev_alloc();
1491 pr_warn("%s: cdev_alloc failed\n", __func__);
1494 cdev->owner = THIS_MODULE;
1495 cdev->ops = &sg_fops;
1497 sdp = sg_alloc(disk, scsidp);
1499 pr_warn("%s: sg_alloc failed\n", __func__);
1500 error = PTR_ERR(sdp);
1504 error = cdev_add(cdev, MKDEV(SCSI_GENERIC_MAJOR, sdp->index), 1);
1509 if (sg_sysfs_valid) {
1510 struct device *sg_class_member;
1512 sg_class_member = device_create(sg_sysfs_class, cl_dev->parent,
1513 MKDEV(SCSI_GENERIC_MAJOR,
1515 sdp, "%s", disk->disk_name);
1516 if (IS_ERR(sg_class_member)) {
1517 pr_err("%s: device_create failed\n", __func__);
1518 error = PTR_ERR(sg_class_member);
1521 error = sysfs_create_link(&scsidp->sdev_gendev.kobj,
1522 &sg_class_member->kobj, "generic");
1524 pr_err("%s: unable to make symlink 'generic' back "
1525 "to sg%d\n", __func__, sdp->index);
1527 pr_warn("%s: sg_sys Invalid\n", __func__);
1529 sdev_printk(KERN_NOTICE, scsidp, "Attached scsi generic sg%d "
1530 "type %d\n", sdp->index, scsidp->type);
1532 dev_set_drvdata(cl_dev, sdp);
1537 write_lock_irqsave(&sg_index_lock, iflags);
1538 idr_remove(&sg_index_idr, sdp->index);
1539 write_unlock_irqrestore(&sg_index_lock, iflags);
1550 sg_device_destroy(struct kref *kref)
1552 struct sg_device *sdp = container_of(kref, struct sg_device, d_ref);
1553 unsigned long flags;
1555 /* CAUTION! Note that the device can still be found via idr_find()
1556 * even though the refcount is 0. Therefore, do idr_remove() BEFORE
1557 * any other cleanup.
1560 write_lock_irqsave(&sg_index_lock, flags);
1561 idr_remove(&sg_index_idr, sdp->index);
1562 write_unlock_irqrestore(&sg_index_lock, flags);
1565 sg_printk(KERN_INFO, sdp, "sg_device_destroy\n"));
1567 put_disk(sdp->disk);
1572 sg_remove_device(struct device *cl_dev, struct class_interface *cl_intf)
1574 struct scsi_device *scsidp = to_scsi_device(cl_dev->parent);
1575 Sg_device *sdp = dev_get_drvdata(cl_dev);
1576 unsigned long iflags;
1582 /* want sdp->detaching non-zero as soon as possible */
1583 val = atomic_inc_return(&sdp->detaching);
1585 return; /* only want to do following once per device */
1587 SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp,
1590 read_lock_irqsave(&sdp->sfd_lock, iflags);
1591 list_for_each_entry(sfp, &sdp->sfds, sfd_siblings) {
1592 wake_up_interruptible_all(&sfp->read_wait);
1593 kill_fasync(&sfp->async_qp, SIGPOLL, POLL_HUP);
1595 wake_up_interruptible_all(&sdp->open_wait);
1596 read_unlock_irqrestore(&sdp->sfd_lock, iflags);
1598 sysfs_remove_link(&scsidp->sdev_gendev.kobj, "generic");
1599 device_destroy(sg_sysfs_class, MKDEV(SCSI_GENERIC_MAJOR, sdp->index));
1600 cdev_del(sdp->cdev);
1603 kref_put(&sdp->d_ref, sg_device_destroy);
1606 module_param_named(scatter_elem_sz, scatter_elem_sz, int, S_IRUGO | S_IWUSR);
1607 module_param_named(def_reserved_size, def_reserved_size, int,
1609 module_param_named(allow_dio, sg_allow_dio, int, S_IRUGO | S_IWUSR);
1611 MODULE_AUTHOR("Douglas Gilbert");
1612 MODULE_DESCRIPTION("SCSI generic (sg) driver");
1613 MODULE_LICENSE("GPL");
1614 MODULE_VERSION(SG_VERSION_STR);
1615 MODULE_ALIAS_CHARDEV_MAJOR(SCSI_GENERIC_MAJOR);
1617 MODULE_PARM_DESC(scatter_elem_sz, "scatter gather element "
1618 "size (default: max(SG_SCATTER_SZ, PAGE_SIZE))");
1619 MODULE_PARM_DESC(def_reserved_size, "size of buffer reserved for each fd");
1620 MODULE_PARM_DESC(allow_dio, "allow direct I/O (default: 0 (disallow))");
1627 if (scatter_elem_sz < PAGE_SIZE) {
1628 scatter_elem_sz = PAGE_SIZE;
1629 scatter_elem_sz_prev = scatter_elem_sz;
1631 if (def_reserved_size >= 0)
1632 sg_big_buff = def_reserved_size;
1634 def_reserved_size = sg_big_buff;
1636 rc = register_chrdev_region(MKDEV(SCSI_GENERIC_MAJOR, 0),
1640 sg_sysfs_class = class_create(THIS_MODULE, "scsi_generic");
1641 if ( IS_ERR(sg_sysfs_class) ) {
1642 rc = PTR_ERR(sg_sysfs_class);
1646 rc = scsi_register_interface(&sg_interface);
1648 #ifdef CONFIG_SCSI_PROC_FS
1650 #endif /* CONFIG_SCSI_PROC_FS */
1653 class_destroy(sg_sysfs_class);
1655 unregister_chrdev_region(MKDEV(SCSI_GENERIC_MAJOR, 0), SG_MAX_DEVS);
1662 #ifdef CONFIG_SCSI_PROC_FS
1663 remove_proc_subtree("scsi/sg", NULL);
1664 #endif /* CONFIG_SCSI_PROC_FS */
1665 scsi_unregister_interface(&sg_interface);
1666 class_destroy(sg_sysfs_class);
1668 unregister_chrdev_region(MKDEV(SCSI_GENERIC_MAJOR, 0),
1670 idr_destroy(&sg_index_idr);
1674 sg_start_req(Sg_request *srp, unsigned char *cmd)
1678 struct scsi_request *req;
1679 Sg_fd *sfp = srp->parentfp;
1680 sg_io_hdr_t *hp = &srp->header;
1681 int dxfer_len = (int) hp->dxfer_len;
1682 int dxfer_dir = hp->dxfer_direction;
1683 unsigned int iov_count = hp->iovec_count;
1684 Sg_scatter_hold *req_schp = &srp->data;
1685 Sg_scatter_hold *rsv_schp = &sfp->reserve;
1686 struct request_queue *q = sfp->parentdp->device->request_queue;
1687 struct rq_map_data *md, map_data;
1688 int rw = hp->dxfer_direction == SG_DXFER_TO_DEV ? WRITE : READ;
1689 unsigned char *long_cmdp = NULL;
1691 SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp,
1692 "sg_start_req: dxfer_len=%d\n",
1695 if (hp->cmd_len > BLK_MAX_CDB) {
1696 long_cmdp = kzalloc(hp->cmd_len, GFP_KERNEL);
1704 * With scsi-mq enabled, there are a fixed number of preallocated
1705 * requests equal in number to shost->can_queue. If all of the
1706 * preallocated requests are already in use, then using GFP_ATOMIC with
1707 * blk_get_request() will return -EWOULDBLOCK, whereas using GFP_KERNEL
1708 * will cause blk_get_request() to sleep until an active command
1709 * completes, freeing up a request. Neither option is ideal, but
1710 * GFP_KERNEL is the better choice to prevent userspace from getting an
1711 * unexpected EWOULDBLOCK.
1713 * With scsi-mq disabled, blk_get_request() with GFP_KERNEL usually
1714 * does not sleep except under memory pressure.
1716 rq = blk_get_request(q, hp->dxfer_direction == SG_DXFER_TO_DEV ?
1717 REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN, 0);
1724 if (hp->cmd_len > BLK_MAX_CDB)
1725 req->cmd = long_cmdp;
1726 memcpy(req->cmd, cmd, hp->cmd_len);
1727 req->cmd_len = hp->cmd_len;
1730 rq->end_io_data = srp;
1731 req->retries = SG_DEFAULT_RETRIES;
1733 if ((dxfer_len <= 0) || (dxfer_dir == SG_DXFER_NONE))
1736 if (sg_allow_dio && hp->flags & SG_FLAG_DIRECT_IO &&
1737 dxfer_dir != SG_DXFER_UNKNOWN && !iov_count &&
1738 !sfp->parentdp->device->host->unchecked_isa_dma &&
1739 blk_rq_aligned(q, (unsigned long)hp->dxferp, dxfer_len))
1745 mutex_lock(&sfp->f_mutex);
1746 if (dxfer_len <= rsv_schp->bufflen &&
1748 sfp->res_in_use = 1;
1749 sg_link_reserve(sfp, srp, dxfer_len);
1750 } else if (hp->flags & SG_FLAG_MMAP_IO) {
1751 res = -EBUSY; /* sfp->res_in_use == 1 */
1752 if (dxfer_len > rsv_schp->bufflen)
1754 mutex_unlock(&sfp->f_mutex);
1757 res = sg_build_indirect(req_schp, sfp, dxfer_len);
1759 mutex_unlock(&sfp->f_mutex);
1763 mutex_unlock(&sfp->f_mutex);
1765 md->pages = req_schp->pages;
1766 md->page_order = req_schp->page_order;
1767 md->nr_entries = req_schp->k_use_sg;
1769 md->null_mapped = hp->dxferp ? 0 : 1;
1770 if (dxfer_dir == SG_DXFER_TO_FROM_DEV)
1777 struct iovec *iov = NULL;
1780 res = import_iovec(rw, hp->dxferp, iov_count, 0, &iov, &i);
1784 iov_iter_truncate(&i, hp->dxfer_len);
1785 if (!iov_iter_count(&i)) {
1790 res = blk_rq_map_user_iov(q, rq, md, &i, GFP_ATOMIC);
1793 res = blk_rq_map_user(q, rq, md, hp->dxferp,
1794 hp->dxfer_len, GFP_ATOMIC);
1800 req_schp->dio_in_use = 1;
1801 hp->info |= SG_INFO_DIRECT_IO;
1808 sg_finish_rem_req(Sg_request *srp)
1812 Sg_fd *sfp = srp->parentfp;
1813 Sg_scatter_hold *req_schp = &srp->data;
1815 SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp,
1816 "sg_finish_rem_req: res_used=%d\n",
1817 (int) srp->res_used));
1819 ret = blk_rq_unmap_user(srp->bio);
1822 scsi_req_free_cmd(scsi_req(srp->rq));
1823 blk_put_request(srp->rq);
1827 sg_unlink_reserve(sfp, srp);
1829 sg_remove_scat(sfp, req_schp);
1835 sg_build_sgat(Sg_scatter_hold * schp, const Sg_fd * sfp, int tablesize)
1837 int sg_bufflen = tablesize * sizeof(struct page *);
1838 gfp_t gfp_flags = GFP_ATOMIC | __GFP_NOWARN;
1840 schp->pages = kzalloc(sg_bufflen, gfp_flags);
1843 schp->sglist_len = sg_bufflen;
1844 return tablesize; /* number of scat_gath elements allocated */
1848 sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size)
1850 int ret_sz = 0, i, k, rem_sz, num, mx_sc_elems;
1851 int sg_tablesize = sfp->parentdp->sg_tablesize;
1852 int blk_size = buff_size, order;
1853 gfp_t gfp_mask = GFP_ATOMIC | __GFP_COMP | __GFP_NOWARN;
1854 struct sg_device *sdp = sfp->parentdp;
1859 ++blk_size; /* don't know why */
1860 /* round request up to next highest SG_SECTOR_SZ byte boundary */
1861 blk_size = ALIGN(blk_size, SG_SECTOR_SZ);
1862 SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp,
1863 "sg_build_indirect: buff_size=%d, blk_size=%d\n",
1864 buff_size, blk_size));
1866 /* N.B. ret_sz carried into this block ... */
1867 mx_sc_elems = sg_build_sgat(schp, sfp, sg_tablesize);
1868 if (mx_sc_elems < 0)
1869 return mx_sc_elems; /* most likely -ENOMEM */
1871 num = scatter_elem_sz;
1872 if (unlikely(num != scatter_elem_sz_prev)) {
1873 if (num < PAGE_SIZE) {
1874 scatter_elem_sz = PAGE_SIZE;
1875 scatter_elem_sz_prev = PAGE_SIZE;
1877 scatter_elem_sz_prev = num;
1880 if (sdp->device->host->unchecked_isa_dma)
1881 gfp_mask |= GFP_DMA;
1883 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
1884 gfp_mask |= __GFP_ZERO;
1886 order = get_order(num);
1888 ret_sz = 1 << (PAGE_SHIFT + order);
1890 for (k = 0, rem_sz = blk_size; rem_sz > 0 && k < mx_sc_elems;
1891 k++, rem_sz -= ret_sz) {
1893 num = (rem_sz > scatter_elem_sz_prev) ?
1894 scatter_elem_sz_prev : rem_sz;
1896 schp->pages[k] = alloc_pages(gfp_mask | __GFP_ZERO, order);
1897 if (!schp->pages[k])
1900 if (num == scatter_elem_sz_prev) {
1901 if (unlikely(ret_sz > scatter_elem_sz_prev)) {
1902 scatter_elem_sz = ret_sz;
1903 scatter_elem_sz_prev = ret_sz;
1907 SCSI_LOG_TIMEOUT(5, sg_printk(KERN_INFO, sfp->parentdp,
1908 "sg_build_indirect: k=%d, num=%d, ret_sz=%d\n",
1910 } /* end of for loop */
1912 schp->page_order = order;
1914 SCSI_LOG_TIMEOUT(5, sg_printk(KERN_INFO, sfp->parentdp,
1915 "sg_build_indirect: k_use_sg=%d, rem_sz=%d\n",
1918 schp->bufflen = blk_size;
1919 if (rem_sz > 0) /* must have failed */
1923 for (i = 0; i < k; i++)
1924 __free_pages(schp->pages[i], order);
1933 sg_remove_scat(Sg_fd * sfp, Sg_scatter_hold * schp)
1935 SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp,
1936 "sg_remove_scat: k_use_sg=%d\n", schp->k_use_sg));
1937 if (schp->pages && schp->sglist_len > 0) {
1938 if (!schp->dio_in_use) {
1941 for (k = 0; k < schp->k_use_sg && schp->pages[k]; k++) {
1943 sg_printk(KERN_INFO, sfp->parentdp,
1944 "sg_remove_scat: k=%d, pg=0x%p\n",
1945 k, schp->pages[k]));
1946 __free_pages(schp->pages[k], schp->page_order);
1952 memset(schp, 0, sizeof (*schp));
1956 sg_read_oxfer(Sg_request * srp, char __user *outp, int num_read_xfer)
1958 Sg_scatter_hold *schp = &srp->data;
1961 SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, srp->parentfp->parentdp,
1962 "sg_read_oxfer: num_read_xfer=%d\n",
1964 if ((!outp) || (num_read_xfer <= 0))
1967 num = 1 << (PAGE_SHIFT + schp->page_order);
1968 for (k = 0; k < schp->k_use_sg && schp->pages[k]; k++) {
1969 if (num > num_read_xfer) {
1970 if (__copy_to_user(outp, page_address(schp->pages[k]),
1975 if (__copy_to_user(outp, page_address(schp->pages[k]),
1978 num_read_xfer -= num;
1979 if (num_read_xfer <= 0)
1989 sg_build_reserve(Sg_fd * sfp, int req_size)
1991 Sg_scatter_hold *schp = &sfp->reserve;
1993 SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp,
1994 "sg_build_reserve: req_size=%d\n", req_size));
1996 if (req_size < PAGE_SIZE)
1997 req_size = PAGE_SIZE;
1998 if (0 == sg_build_indirect(schp, sfp, req_size))
2001 sg_remove_scat(sfp, schp);
2002 req_size >>= 1; /* divide by 2 */
2003 } while (req_size > (PAGE_SIZE / 2));
2007 sg_link_reserve(Sg_fd * sfp, Sg_request * srp, int size)
2009 Sg_scatter_hold *req_schp = &srp->data;
2010 Sg_scatter_hold *rsv_schp = &sfp->reserve;
2014 SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp,
2015 "sg_link_reserve: size=%d\n", size));
2018 num = 1 << (PAGE_SHIFT + rsv_schp->page_order);
2019 for (k = 0; k < rsv_schp->k_use_sg; k++) {
2021 req_schp->k_use_sg = k + 1;
2022 req_schp->sglist_len = rsv_schp->sglist_len;
2023 req_schp->pages = rsv_schp->pages;
2025 req_schp->bufflen = size;
2026 req_schp->page_order = rsv_schp->page_order;
2032 if (k >= rsv_schp->k_use_sg)
2033 SCSI_LOG_TIMEOUT(1, sg_printk(KERN_INFO, sfp->parentdp,
2034 "sg_link_reserve: BAD size\n"));
2038 sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp)
2040 Sg_scatter_hold *req_schp = &srp->data;
2042 SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, srp->parentfp->parentdp,
2043 "sg_unlink_reserve: req->k_use_sg=%d\n",
2044 (int) req_schp->k_use_sg));
2045 req_schp->k_use_sg = 0;
2046 req_schp->bufflen = 0;
2047 req_schp->pages = NULL;
2048 req_schp->page_order = 0;
2049 req_schp->sglist_len = 0;
2051 /* Called without mutex lock to avoid deadlock */
2052 sfp->res_in_use = 0;
2056 sg_get_rq_mark(Sg_fd * sfp, int pack_id)
2059 unsigned long iflags;
2061 write_lock_irqsave(&sfp->rq_list_lock, iflags);
2062 list_for_each_entry(resp, &sfp->rq_list, entry) {
2063 /* look for requests that are ready + not SG_IO owned */
2064 if ((1 == resp->done) && (!resp->sg_io_owned) &&
2065 ((-1 == pack_id) || (resp->header.pack_id == pack_id))) {
2066 resp->done = 2; /* guard against other readers */
2067 write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
2071 write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
2075 /* always adds to end of list */
2077 sg_add_request(Sg_fd * sfp)
2080 unsigned long iflags;
2081 Sg_request *rp = sfp->req_arr;
2083 write_lock_irqsave(&sfp->rq_list_lock, iflags);
2084 if (!list_empty(&sfp->rq_list)) {
2088 for (k = 0; k < SG_MAX_QUEUE; ++k, ++rp) {
2092 if (k >= SG_MAX_QUEUE)
2095 memset(rp, 0, sizeof (Sg_request));
2097 rp->header.duration = jiffies_to_msecs(jiffies);
2098 list_add_tail(&rp->entry, &sfp->rq_list);
2099 write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
2102 write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
2106 /* Return of 1 for found; 0 for not found */
2108 sg_remove_request(Sg_fd * sfp, Sg_request * srp)
2110 unsigned long iflags;
2113 if (!sfp || !srp || list_empty(&sfp->rq_list))
2115 write_lock_irqsave(&sfp->rq_list_lock, iflags);
2116 if (!list_empty(&srp->entry)) {
2117 list_del(&srp->entry);
2118 srp->parentfp = NULL;
2121 write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
2126 sg_add_sfp(Sg_device * sdp)
2129 unsigned long iflags;
2132 sfp = kzalloc(sizeof(*sfp), GFP_ATOMIC | __GFP_NOWARN);
2134 return ERR_PTR(-ENOMEM);
2136 init_waitqueue_head(&sfp->read_wait);
2137 rwlock_init(&sfp->rq_list_lock);
2138 INIT_LIST_HEAD(&sfp->rq_list);
2139 kref_init(&sfp->f_ref);
2140 mutex_init(&sfp->f_mutex);
2141 sfp->timeout = SG_DEFAULT_TIMEOUT;
2142 sfp->timeout_user = SG_DEFAULT_TIMEOUT_USER;
2143 sfp->force_packid = SG_DEF_FORCE_PACK_ID;
2144 sfp->cmd_q = SG_DEF_COMMAND_Q;
2145 sfp->keep_orphan = SG_DEF_KEEP_ORPHAN;
2146 sfp->parentdp = sdp;
2147 write_lock_irqsave(&sdp->sfd_lock, iflags);
2148 if (atomic_read(&sdp->detaching)) {
2149 write_unlock_irqrestore(&sdp->sfd_lock, iflags);
2150 return ERR_PTR(-ENODEV);
2152 list_add_tail(&sfp->sfd_siblings, &sdp->sfds);
2153 write_unlock_irqrestore(&sdp->sfd_lock, iflags);
2154 SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp,
2155 "sg_add_sfp: sfp=0x%p\n", sfp));
2156 if (unlikely(sg_big_buff != def_reserved_size))
2157 sg_big_buff = def_reserved_size;
2159 bufflen = min_t(int, sg_big_buff,
2160 max_sectors_bytes(sdp->device->request_queue));
2161 sg_build_reserve(sfp, bufflen);
2162 SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp,
2163 "sg_add_sfp: bufflen=%d, k_use_sg=%d\n",
2164 sfp->reserve.bufflen,
2165 sfp->reserve.k_use_sg));
2167 kref_get(&sdp->d_ref);
2168 __module_get(THIS_MODULE);
2173 sg_remove_sfp_usercontext(struct work_struct *work)
2175 struct sg_fd *sfp = container_of(work, struct sg_fd, ew.work);
2176 struct sg_device *sdp = sfp->parentdp;
2178 unsigned long iflags;
2180 /* Cleanup any responses which were never read(). */
2181 write_lock_irqsave(&sfp->rq_list_lock, iflags);
2182 while (!list_empty(&sfp->rq_list)) {
2183 srp = list_first_entry(&sfp->rq_list, Sg_request, entry);
2184 sg_finish_rem_req(srp);
2185 list_del(&srp->entry);
2186 srp->parentfp = NULL;
2188 write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
2190 if (sfp->reserve.bufflen > 0) {
2191 SCSI_LOG_TIMEOUT(6, sg_printk(KERN_INFO, sdp,
2192 "sg_remove_sfp: bufflen=%d, k_use_sg=%d\n",
2193 (int) sfp->reserve.bufflen,
2194 (int) sfp->reserve.k_use_sg));
2195 sg_remove_scat(sfp, &sfp->reserve);
2198 SCSI_LOG_TIMEOUT(6, sg_printk(KERN_INFO, sdp,
2199 "sg_remove_sfp: sfp=0x%p\n", sfp));
2202 scsi_device_put(sdp->device);
2203 kref_put(&sdp->d_ref, sg_device_destroy);
2204 module_put(THIS_MODULE);
2208 sg_remove_sfp(struct kref *kref)
2210 struct sg_fd *sfp = container_of(kref, struct sg_fd, f_ref);
2211 struct sg_device *sdp = sfp->parentdp;
2212 unsigned long iflags;
2214 write_lock_irqsave(&sdp->sfd_lock, iflags);
2215 list_del(&sfp->sfd_siblings);
2216 write_unlock_irqrestore(&sdp->sfd_lock, iflags);
2218 INIT_WORK(&sfp->ew.work, sg_remove_sfp_usercontext);
2219 schedule_work(&sfp->ew.work);
2222 #ifdef CONFIG_SCSI_PROC_FS
2224 sg_idr_max_id(int id, void *p, void *data)
2238 unsigned long iflags;
2240 read_lock_irqsave(&sg_index_lock, iflags);
2241 idr_for_each(&sg_index_idr, sg_idr_max_id, &k);
2242 read_unlock_irqrestore(&sg_index_lock, iflags);
2243 return k + 1; /* origin 1 */
2247 /* must be called with sg_index_lock held */
2248 static Sg_device *sg_lookup_dev(int dev)
2250 return idr_find(&sg_index_idr, dev);
2256 struct sg_device *sdp;
2257 unsigned long flags;
2259 read_lock_irqsave(&sg_index_lock, flags);
2260 sdp = sg_lookup_dev(dev);
2262 sdp = ERR_PTR(-ENXIO);
2263 else if (atomic_read(&sdp->detaching)) {
2264 /* If sdp->detaching, then the refcount may already be 0, in
2265 * which case it would be a bug to do kref_get().
2267 sdp = ERR_PTR(-ENODEV);
2269 kref_get(&sdp->d_ref);
2270 read_unlock_irqrestore(&sg_index_lock, flags);
2275 #ifdef CONFIG_SCSI_PROC_FS
2276 static int sg_proc_seq_show_int(struct seq_file *s, void *v);
2278 static int sg_proc_single_open_adio(struct inode *inode, struct file *file);
2279 static ssize_t sg_proc_write_adio(struct file *filp, const char __user *buffer,
2280 size_t count, loff_t *off);
2281 static const struct file_operations adio_fops = {
2282 .owner = THIS_MODULE,
2283 .open = sg_proc_single_open_adio,
2285 .llseek = seq_lseek,
2286 .write = sg_proc_write_adio,
2287 .release = single_release,
2290 static int sg_proc_single_open_dressz(struct inode *inode, struct file *file);
2291 static ssize_t sg_proc_write_dressz(struct file *filp,
2292 const char __user *buffer, size_t count, loff_t *off);
2293 static const struct file_operations dressz_fops = {
2294 .owner = THIS_MODULE,
2295 .open = sg_proc_single_open_dressz,
2297 .llseek = seq_lseek,
2298 .write = sg_proc_write_dressz,
2299 .release = single_release,
2302 static int sg_proc_seq_show_version(struct seq_file *s, void *v);
2303 static int sg_proc_seq_show_devhdr(struct seq_file *s, void *v);
2304 static int sg_proc_seq_show_dev(struct seq_file *s, void *v);
2305 static void * dev_seq_start(struct seq_file *s, loff_t *pos);
2306 static void * dev_seq_next(struct seq_file *s, void *v, loff_t *pos);
2307 static void dev_seq_stop(struct seq_file *s, void *v);
2308 static const struct seq_operations dev_seq_ops = {
2309 .start = dev_seq_start,
2310 .next = dev_seq_next,
2311 .stop = dev_seq_stop,
2312 .show = sg_proc_seq_show_dev,
2315 static int sg_proc_seq_show_devstrs(struct seq_file *s, void *v);
2316 static const struct seq_operations devstrs_seq_ops = {
2317 .start = dev_seq_start,
2318 .next = dev_seq_next,
2319 .stop = dev_seq_stop,
2320 .show = sg_proc_seq_show_devstrs,
2323 static int sg_proc_seq_show_debug(struct seq_file *s, void *v);
2324 static const struct seq_operations debug_seq_ops = {
2325 .start = dev_seq_start,
2326 .next = dev_seq_next,
2327 .stop = dev_seq_stop,
2328 .show = sg_proc_seq_show_debug,
2334 struct proc_dir_entry *p;
2336 p = proc_mkdir("scsi/sg", NULL);
2340 proc_create("allow_dio", S_IRUGO | S_IWUSR, p, &adio_fops);
2341 proc_create_seq("debug", S_IRUGO, p, &debug_seq_ops);
2342 proc_create("def_reserved_size", S_IRUGO | S_IWUSR, p, &dressz_fops);
2343 proc_create_single("device_hdr", S_IRUGO, p, sg_proc_seq_show_devhdr);
2344 proc_create_seq("devices", S_IRUGO, p, &dev_seq_ops);
2345 proc_create_seq("device_strs", S_IRUGO, p, &devstrs_seq_ops);
2346 proc_create_single("version", S_IRUGO, p, sg_proc_seq_show_version);
2351 static int sg_proc_seq_show_int(struct seq_file *s, void *v)
2353 seq_printf(s, "%d\n", *((int *)s->private));
2357 static int sg_proc_single_open_adio(struct inode *inode, struct file *file)
2359 return single_open(file, sg_proc_seq_show_int, &sg_allow_dio);
2363 sg_proc_write_adio(struct file *filp, const char __user *buffer,
2364 size_t count, loff_t *off)
2369 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
2371 err = kstrtoul_from_user(buffer, count, 0, &num);
2374 sg_allow_dio = num ? 1 : 0;
2378 static int sg_proc_single_open_dressz(struct inode *inode, struct file *file)
2380 return single_open(file, sg_proc_seq_show_int, &sg_big_buff);
2384 sg_proc_write_dressz(struct file *filp, const char __user *buffer,
2385 size_t count, loff_t *off)
2388 unsigned long k = ULONG_MAX;
2390 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
2393 err = kstrtoul_from_user(buffer, count, 0, &k);
2396 if (k <= 1048576) { /* limit "big buff" to 1 MB */
2403 static int sg_proc_seq_show_version(struct seq_file *s, void *v)
2405 seq_printf(s, "%d\t%s [%s]\n", sg_version_num, SG_VERSION_STR,
2410 static int sg_proc_seq_show_devhdr(struct seq_file *s, void *v)
2412 seq_puts(s, "host\tchan\tid\tlun\ttype\topens\tqdepth\tbusy\tonline\n");
2416 struct sg_proc_deviter {
2421 static void * dev_seq_start(struct seq_file *s, loff_t *pos)
2423 struct sg_proc_deviter * it = kmalloc(sizeof(*it), GFP_KERNEL);
2430 it->max = sg_last_dev();
2431 if (it->index >= it->max)
2436 static void * dev_seq_next(struct seq_file *s, void *v, loff_t *pos)
2438 struct sg_proc_deviter * it = s->private;
2441 return (it->index < it->max) ? it : NULL;
2444 static void dev_seq_stop(struct seq_file *s, void *v)
2449 static int sg_proc_seq_show_dev(struct seq_file *s, void *v)
2451 struct sg_proc_deviter * it = (struct sg_proc_deviter *) v;
2453 struct scsi_device *scsidp;
2454 unsigned long iflags;
2456 read_lock_irqsave(&sg_index_lock, iflags);
2457 sdp = it ? sg_lookup_dev(it->index) : NULL;
2458 if ((NULL == sdp) || (NULL == sdp->device) ||
2459 (atomic_read(&sdp->detaching)))
2460 seq_puts(s, "-1\t-1\t-1\t-1\t-1\t-1\t-1\t-1\t-1\n");
2462 scsidp = sdp->device;
2463 seq_printf(s, "%d\t%d\t%d\t%llu\t%d\t%d\t%d\t%d\t%d\n",
2464 scsidp->host->host_no, scsidp->channel,
2465 scsidp->id, scsidp->lun, (int) scsidp->type,
2467 (int) scsidp->queue_depth,
2468 (int) atomic_read(&scsidp->device_busy),
2469 (int) scsi_device_online(scsidp));
2471 read_unlock_irqrestore(&sg_index_lock, iflags);
2475 static int sg_proc_seq_show_devstrs(struct seq_file *s, void *v)
2477 struct sg_proc_deviter * it = (struct sg_proc_deviter *) v;
2479 struct scsi_device *scsidp;
2480 unsigned long iflags;
2482 read_lock_irqsave(&sg_index_lock, iflags);
2483 sdp = it ? sg_lookup_dev(it->index) : NULL;
2484 scsidp = sdp ? sdp->device : NULL;
2485 if (sdp && scsidp && (!atomic_read(&sdp->detaching)))
2486 seq_printf(s, "%8.8s\t%16.16s\t%4.4s\n",
2487 scsidp->vendor, scsidp->model, scsidp->rev);
2489 seq_puts(s, "<no active device>\n");
2490 read_unlock_irqrestore(&sg_index_lock, iflags);
2494 /* must be called while holding sg_index_lock */
2495 static void sg_proc_debug_helper(struct seq_file *s, Sg_device * sdp)
2497 int k, new_interface, blen, usg;
2500 const sg_io_hdr_t *hp;
2505 list_for_each_entry(fp, &sdp->sfds, sfd_siblings) {
2507 read_lock(&fp->rq_list_lock); /* irqs already disabled */
2508 seq_printf(s, " FD(%d): timeout=%dms bufflen=%d "
2509 "(res)sgat=%d low_dma=%d\n", k,
2510 jiffies_to_msecs(fp->timeout),
2511 fp->reserve.bufflen,
2512 (int) fp->reserve.k_use_sg,
2513 (int) sdp->device->host->unchecked_isa_dma);
2514 seq_printf(s, " cmd_q=%d f_packid=%d k_orphan=%d closed=0\n",
2515 (int) fp->cmd_q, (int) fp->force_packid,
2516 (int) fp->keep_orphan);
2517 list_for_each_entry(srp, &fp->rq_list, entry) {
2519 new_interface = (hp->interface_id == '\0') ? 0 : 1;
2520 if (srp->res_used) {
2521 if (new_interface &&
2522 (SG_FLAG_MMAP_IO & hp->flags))
2527 if (SG_INFO_DIRECT_IO_MASK & hp->info)
2533 blen = srp->data.bufflen;
2534 usg = srp->data.k_use_sg;
2535 seq_puts(s, srp->done ?
2536 ((1 == srp->done) ? "rcv:" : "fin:")
2538 seq_printf(s, " id=%d blen=%d",
2539 srp->header.pack_id, blen);
2541 seq_printf(s, " dur=%d", hp->duration);
2543 ms = jiffies_to_msecs(jiffies);
2544 seq_printf(s, " t_o/elap=%d/%d",
2545 (new_interface ? hp->timeout :
2546 jiffies_to_msecs(fp->timeout)),
2547 (ms > hp->duration ? ms - hp->duration : 0));
2549 seq_printf(s, "ms sgat=%d op=0x%02x\n", usg,
2550 (int) srp->data.cmd_opcode);
2552 if (list_empty(&fp->rq_list))
2553 seq_puts(s, " No requests active\n");
2554 read_unlock(&fp->rq_list_lock);
2558 static int sg_proc_seq_show_debug(struct seq_file *s, void *v)
2560 struct sg_proc_deviter * it = (struct sg_proc_deviter *) v;
2562 unsigned long iflags;
2564 if (it && (0 == it->index))
2565 seq_printf(s, "max_active_device=%d def_reserved_size=%d\n",
2566 (int)it->max, sg_big_buff);
2568 read_lock_irqsave(&sg_index_lock, iflags);
2569 sdp = it ? sg_lookup_dev(it->index) : NULL;
2572 read_lock(&sdp->sfd_lock);
2573 if (!list_empty(&sdp->sfds)) {
2574 seq_printf(s, " >>> device=%s ", sdp->disk->disk_name);
2575 if (atomic_read(&sdp->detaching))
2576 seq_puts(s, "detaching pending close ");
2577 else if (sdp->device) {
2578 struct scsi_device *scsidp = sdp->device;
2580 seq_printf(s, "%d:%d:%d:%llu em=%d",
2581 scsidp->host->host_no,
2582 scsidp->channel, scsidp->id,
2584 scsidp->host->hostt->emulated);
2586 seq_printf(s, " sg_tablesize=%d excl=%d open_cnt=%d\n",
2587 sdp->sg_tablesize, sdp->exclude, sdp->open_cnt);
2588 sg_proc_debug_helper(s, sdp);
2590 read_unlock(&sdp->sfd_lock);
2592 read_unlock_irqrestore(&sg_index_lock, iflags);
2596 #endif /* CONFIG_SCSI_PROC_FS */
2598 module_init(init_sg);
2599 module_exit(exit_sg);