2 * bsg.c - block layer implementation of the sg v4 interface
4 * Copyright (C) 2004 Jens Axboe <axboe@suse.de> SUSE Labs
5 * Copyright (C) 2004 Peter M. Jones <pjones@redhat.com>
7 * This file is subject to the terms and conditions of the GNU General Public
8 * License version 2. See the file "COPYING" in the main directory of this
9 * archive for more details.
12 #include <linux/module.h>
13 #include <linux/init.h>
14 #include <linux/file.h>
15 #include <linux/blkdev.h>
16 #include <linux/cdev.h>
17 #include <linux/jiffies.h>
18 #include <linux/percpu.h>
19 #include <linux/idr.h>
20 #include <linux/bsg.h>
21 #include <linux/slab.h>
23 #include <scsi/scsi.h>
24 #include <scsi/scsi_ioctl.h>
25 #include <scsi/scsi_cmnd.h>
26 #include <scsi/scsi_device.h>
27 #include <scsi/scsi_driver.h>
30 #define BSG_DESCRIPTION "Block layer SCSI generic (bsg) driver"
31 #define BSG_VERSION "0.4"
33 #define bsg_dbg(bd, fmt, ...) \
34 pr_debug("%s: " fmt, (bd)->name, ##__VA_ARGS__)
37 struct request_queue *queue;
39 struct hlist_node dev_list;
45 #define BSG_DEFAULT_CMDS 64
46 #define BSG_MAX_DEVS 32768
48 static DEFINE_MUTEX(bsg_mutex);
49 static DEFINE_IDR(bsg_minor_idr);
51 #define BSG_LIST_ARRAY_SIZE 8
52 static struct hlist_head bsg_device_list[BSG_LIST_ARRAY_SIZE];
54 static struct class *bsg_class;
57 static inline struct hlist_head *bsg_dev_idx_hash(int index)
59 return &bsg_device_list[index & (BSG_LIST_ARRAY_SIZE - 1)];
62 #define uptr64(val) ((void __user *)(uintptr_t)(val))
64 static int bsg_scsi_check_proto(struct sg_io_v4 *hdr)
66 if (hdr->protocol != BSG_PROTOCOL_SCSI ||
67 hdr->subprotocol != BSG_SUB_PROTOCOL_SCSI_CMD)
72 static int bsg_scsi_fill_hdr(struct request *rq, struct sg_io_v4 *hdr,
75 struct scsi_request *sreq = scsi_req(rq);
77 sreq->cmd_len = hdr->request_len;
78 if (sreq->cmd_len > BLK_MAX_CDB) {
79 sreq->cmd = kzalloc(sreq->cmd_len, GFP_KERNEL);
84 if (copy_from_user(sreq->cmd, uptr64(hdr->request), sreq->cmd_len))
86 if (blk_verify_command(sreq->cmd, mode))
91 static int bsg_scsi_complete_rq(struct request *rq, struct sg_io_v4 *hdr)
93 struct scsi_request *sreq = scsi_req(rq);
97 * fill in all the output members
99 hdr->device_status = sreq->result & 0xff;
100 hdr->transport_status = host_byte(sreq->result);
101 hdr->driver_status = driver_byte(sreq->result);
103 if (hdr->device_status || hdr->transport_status || hdr->driver_status)
104 hdr->info |= SG_INFO_CHECK;
105 hdr->response_len = 0;
107 if (sreq->sense_len && hdr->response) {
108 int len = min_t(unsigned int, hdr->max_response_len,
111 if (copy_to_user(uptr64(hdr->response), sreq->sense, len))
114 hdr->response_len = len;
118 hdr->dout_resid = sreq->resid_len;
119 hdr->din_resid = scsi_req(rq->next_rq)->resid_len;
120 } else if (rq_data_dir(rq) == READ) {
121 hdr->din_resid = sreq->resid_len;
123 hdr->dout_resid = sreq->resid_len;
129 static void bsg_scsi_free_rq(struct request *rq)
131 scsi_req_free_cmd(scsi_req(rq));
134 static const struct bsg_ops bsg_scsi_ops = {
135 .check_proto = bsg_scsi_check_proto,
136 .fill_hdr = bsg_scsi_fill_hdr,
137 .complete_rq = bsg_scsi_complete_rq,
138 .free_rq = bsg_scsi_free_rq,
141 static struct request *
142 bsg_map_hdr(struct request_queue *q, struct sg_io_v4 *hdr, fmode_t mode)
144 struct request *rq, *next_rq = NULL;
147 if (!q->bsg_dev.class_dev)
148 return ERR_PTR(-ENXIO);
150 if (hdr->guard != 'Q')
151 return ERR_PTR(-EINVAL);
153 ret = q->bsg_dev.ops->check_proto(hdr);
157 rq = blk_get_request(q, hdr->dout_xfer_len ?
158 REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN, 0);
162 ret = q->bsg_dev.ops->fill_hdr(rq, hdr, mode);
166 rq->timeout = msecs_to_jiffies(hdr->timeout);
168 rq->timeout = q->sg_timeout;
170 rq->timeout = BLK_DEFAULT_SG_TIMEOUT;
171 if (rq->timeout < BLK_MIN_SG_TIMEOUT)
172 rq->timeout = BLK_MIN_SG_TIMEOUT;
174 if (hdr->dout_xfer_len && hdr->din_xfer_len) {
175 if (!test_bit(QUEUE_FLAG_BIDI, &q->queue_flags)) {
180 next_rq = blk_get_request(q, REQ_OP_SCSI_IN, 0);
181 if (IS_ERR(next_rq)) {
182 ret = PTR_ERR(next_rq);
186 rq->next_rq = next_rq;
187 ret = blk_rq_map_user(q, next_rq, NULL, uptr64(hdr->din_xferp),
188 hdr->din_xfer_len, GFP_KERNEL);
190 goto out_free_nextrq;
193 if (hdr->dout_xfer_len) {
194 ret = blk_rq_map_user(q, rq, NULL, uptr64(hdr->dout_xferp),
195 hdr->dout_xfer_len, GFP_KERNEL);
196 } else if (hdr->din_xfer_len) {
197 ret = blk_rq_map_user(q, rq, NULL, uptr64(hdr->din_xferp),
198 hdr->din_xfer_len, GFP_KERNEL);
200 ret = blk_rq_map_user(q, rq, NULL, NULL, 0, GFP_KERNEL);
204 goto out_unmap_nextrq;
209 blk_rq_unmap_user(rq->next_rq->bio);
212 blk_put_request(rq->next_rq);
214 q->bsg_dev.ops->free_rq(rq);
219 static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr,
220 struct bio *bio, struct bio *bidi_bio)
224 ret = rq->q->bsg_dev.ops->complete_rq(rq, hdr);
227 blk_rq_unmap_user(bidi_bio);
228 blk_put_request(rq->next_rq);
231 blk_rq_unmap_user(bio);
232 rq->q->bsg_dev.ops->free_rq(rq);
237 static struct bsg_device *bsg_alloc_device(void)
239 struct bsg_device *bd;
241 bd = kzalloc(sizeof(struct bsg_device), GFP_KERNEL);
245 spin_lock_init(&bd->lock);
246 bd->max_queue = BSG_DEFAULT_CMDS;
247 INIT_HLIST_NODE(&bd->dev_list);
251 static int bsg_put_device(struct bsg_device *bd)
253 struct request_queue *q = bd->queue;
255 mutex_lock(&bsg_mutex);
257 if (!atomic_dec_and_test(&bd->ref_count)) {
258 mutex_unlock(&bsg_mutex);
262 hlist_del(&bd->dev_list);
263 mutex_unlock(&bsg_mutex);
265 bsg_dbg(bd, "tearing down\n");
268 * close can always block
275 static struct bsg_device *bsg_add_device(struct inode *inode,
276 struct request_queue *rq,
279 struct bsg_device *bd;
280 unsigned char buf[32];
282 lockdep_assert_held(&bsg_mutex);
284 if (!blk_get_queue(rq))
285 return ERR_PTR(-ENXIO);
287 bd = bsg_alloc_device();
290 return ERR_PTR(-ENOMEM);
295 atomic_set(&bd->ref_count, 1);
296 hlist_add_head(&bd->dev_list, bsg_dev_idx_hash(iminor(inode)));
298 strncpy(bd->name, dev_name(rq->bsg_dev.class_dev), sizeof(bd->name) - 1);
299 bsg_dbg(bd, "bound to <%s>, max queue %d\n",
300 format_dev_t(buf, inode->i_rdev), bd->max_queue);
305 static struct bsg_device *__bsg_get_device(int minor, struct request_queue *q)
307 struct bsg_device *bd;
309 lockdep_assert_held(&bsg_mutex);
311 hlist_for_each_entry(bd, bsg_dev_idx_hash(minor), dev_list) {
312 if (bd->queue == q) {
313 atomic_inc(&bd->ref_count);
322 static struct bsg_device *bsg_get_device(struct inode *inode, struct file *file)
324 struct bsg_device *bd;
325 struct bsg_class_device *bcd;
328 * find the class device
330 mutex_lock(&bsg_mutex);
331 bcd = idr_find(&bsg_minor_idr, iminor(inode));
334 bd = ERR_PTR(-ENODEV);
338 bd = __bsg_get_device(iminor(inode), bcd->queue);
340 bd = bsg_add_device(inode, bcd->queue, file);
343 mutex_unlock(&bsg_mutex);
347 static int bsg_open(struct inode *inode, struct file *file)
349 struct bsg_device *bd;
351 bd = bsg_get_device(inode, file);
356 file->private_data = bd;
360 static int bsg_release(struct inode *inode, struct file *file)
362 struct bsg_device *bd = file->private_data;
364 file->private_data = NULL;
365 return bsg_put_device(bd);
368 static long bsg_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
370 struct bsg_device *bd = file->private_data;
371 int __user *uarg = (int __user *) arg;
378 case SG_GET_COMMAND_Q:
379 return put_user(bd->max_queue, uarg);
380 case SG_SET_COMMAND_Q: {
383 if (get_user(queue, uarg))
388 spin_lock_irq(&bd->lock);
389 bd->max_queue = queue;
390 spin_unlock_irq(&bd->lock);
397 case SG_GET_VERSION_NUM:
398 case SCSI_IOCTL_GET_IDLUN:
399 case SCSI_IOCTL_GET_BUS_NUMBER:
402 case SG_GET_RESERVED_SIZE:
403 case SG_SET_RESERVED_SIZE:
404 case SG_EMULATED_HOST:
405 case SCSI_IOCTL_SEND_COMMAND: {
406 void __user *uarg = (void __user *) arg;
407 return scsi_cmd_ioctl(bd->queue, NULL, file->f_mode, cmd, uarg);
411 struct bio *bio, *bidi_bio = NULL;
415 if (copy_from_user(&hdr, uarg, sizeof(hdr)))
418 rq = bsg_map_hdr(bd->queue, &hdr, file->f_mode);
424 bidi_bio = rq->next_rq->bio;
426 at_head = (0 == (hdr.flags & BSG_FLAG_Q_AT_TAIL));
427 blk_execute_rq(bd->queue, NULL, rq, at_head);
428 ret = blk_complete_sgv4_hdr_rq(rq, &hdr, bio, bidi_bio);
430 if (copy_to_user(uarg, &hdr, sizeof(hdr)))
440 static const struct file_operations bsg_fops = {
442 .release = bsg_release,
443 .unlocked_ioctl = bsg_ioctl,
444 .owner = THIS_MODULE,
445 .llseek = default_llseek,
448 void bsg_unregister_queue(struct request_queue *q)
450 struct bsg_class_device *bcd = &q->bsg_dev;
455 mutex_lock(&bsg_mutex);
456 idr_remove(&bsg_minor_idr, bcd->minor);
458 sysfs_remove_link(&q->kobj, "bsg");
459 device_unregister(bcd->class_dev);
460 bcd->class_dev = NULL;
461 mutex_unlock(&bsg_mutex);
463 EXPORT_SYMBOL_GPL(bsg_unregister_queue);
465 int bsg_register_queue(struct request_queue *q, struct device *parent,
466 const char *name, const struct bsg_ops *ops)
468 struct bsg_class_device *bcd;
471 struct device *class_dev = NULL;
474 * we need a proper transport to send commands, not a stacked device
476 if (!queue_is_rq_based(q))
480 memset(bcd, 0, sizeof(*bcd));
482 mutex_lock(&bsg_mutex);
484 ret = idr_alloc(&bsg_minor_idr, bcd, 0, BSG_MAX_DEVS, GFP_KERNEL);
486 if (ret == -ENOSPC) {
487 printk(KERN_ERR "bsg: too many bsg devices\n");
496 dev = MKDEV(bsg_major, bcd->minor);
497 class_dev = device_create(bsg_class, parent, dev, NULL, "%s", name);
498 if (IS_ERR(class_dev)) {
499 ret = PTR_ERR(class_dev);
502 bcd->class_dev = class_dev;
505 ret = sysfs_create_link(&q->kobj, &bcd->class_dev->kobj, "bsg");
507 goto unregister_class_dev;
510 mutex_unlock(&bsg_mutex);
513 unregister_class_dev:
514 device_unregister(class_dev);
516 idr_remove(&bsg_minor_idr, bcd->minor);
518 mutex_unlock(&bsg_mutex);
522 int bsg_scsi_register_queue(struct request_queue *q, struct device *parent)
524 if (!blk_queue_scsi_passthrough(q)) {
525 WARN_ONCE(true, "Attempt to register a non-SCSI queue\n");
529 return bsg_register_queue(q, parent, dev_name(parent), &bsg_scsi_ops);
531 EXPORT_SYMBOL_GPL(bsg_scsi_register_queue);
533 static struct cdev bsg_cdev;
535 static char *bsg_devnode(struct device *dev, umode_t *mode)
537 return kasprintf(GFP_KERNEL, "bsg/%s", dev_name(dev));
540 static int __init bsg_init(void)
545 for (i = 0; i < BSG_LIST_ARRAY_SIZE; i++)
546 INIT_HLIST_HEAD(&bsg_device_list[i]);
548 bsg_class = class_create(THIS_MODULE, "bsg");
549 if (IS_ERR(bsg_class))
550 return PTR_ERR(bsg_class);
551 bsg_class->devnode = bsg_devnode;
553 ret = alloc_chrdev_region(&devid, 0, BSG_MAX_DEVS, "bsg");
555 goto destroy_bsg_class;
557 bsg_major = MAJOR(devid);
559 cdev_init(&bsg_cdev, &bsg_fops);
560 ret = cdev_add(&bsg_cdev, MKDEV(bsg_major, 0), BSG_MAX_DEVS);
562 goto unregister_chrdev;
564 printk(KERN_INFO BSG_DESCRIPTION " version " BSG_VERSION
565 " loaded (major %d)\n", bsg_major);
568 unregister_chrdev_region(MKDEV(bsg_major, 0), BSG_MAX_DEVS);
570 class_destroy(bsg_class);
574 MODULE_AUTHOR("Jens Axboe");
575 MODULE_DESCRIPTION(BSG_DESCRIPTION);
576 MODULE_LICENSE("GPL");
578 device_initcall(bsg_init);