2 * Copyright (C) 2000 Jens Axboe <axboe@suse.de>
3 * Copyright (C) 2001-2004 Peter Osterlund <petero2@telia.com>
4 * Copyright (C) 2006 Thomas Maier <balagi@justmail.de>
6 * May be copied or modified under the terms of the GNU General Public
7 * License. See linux/COPYING for more information.
9 * Packet writing layer for ATAPI and SCSI CD-RW, DVD+RW, DVD-RW and
12 * Theory of operation:
14 * At the lowest level, there is the standard driver for the CD/DVD device,
15 * such as drivers/scsi/sr.c. This driver can handle read and write requests,
16 * but it doesn't know anything about the special restrictions that apply to
17 * packet writing. One restriction is that write requests must be aligned to
18 * packet boundaries on the physical media, and the size of a write request
19 * must be equal to the packet size. Another restriction is that a
20 * GPCMD_FLUSH_CACHE command has to be issued to the drive before a read
21 * command, if the previous command was a write.
23 * The purpose of the packet writing driver is to hide these restrictions from
24 * higher layers, such as file systems, and present a block device that can be
25 * randomly read and written using 2kB-sized blocks.
27 * The lowest layer in the packet writing driver is the packet I/O scheduler.
28 * Its data is defined by the struct packet_iosched and includes two bio
29 * queues with pending read and write requests. These queues are processed
30 * by the pkt_iosched_process_queue() function. The write requests in this
31 * queue are already properly aligned and sized. This layer is responsible for
32 * issuing the flush cache commands and scheduling the I/O in a good order.
34 * The next layer transforms unaligned write requests to aligned writes. This
35 * transformation requires reading missing pieces of data from the underlying
36 * block device, assembling the pieces to full packets and queuing them to the
37 * packet I/O scheduler.
39 * At the top layer there is a custom ->submit_bio function that forwards
40 * read requests directly to the iosched queue and puts write requests in the
41 * unaligned write queue. A kernel thread performs the necessary read
42 * gathering to convert the unaligned writes to aligned writes and then feeds
43 * them to the packet I/O scheduler.
45 *************************************************************************/
47 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
49 #include <linux/pktcdvd.h>
50 #include <linux/module.h>
51 #include <linux/types.h>
52 #include <linux/kernel.h>
53 #include <linux/compat.h>
54 #include <linux/kthread.h>
55 #include <linux/errno.h>
56 #include <linux/spinlock.h>
57 #include <linux/file.h>
58 #include <linux/proc_fs.h>
59 #include <linux/seq_file.h>
60 #include <linux/miscdevice.h>
61 #include <linux/freezer.h>
62 #include <linux/mutex.h>
63 #include <linux/slab.h>
64 #include <linux/backing-dev.h>
65 #include <scsi/scsi_cmnd.h>
66 #include <scsi/scsi_ioctl.h>
67 #include <scsi/scsi.h>
68 #include <linux/debugfs.h>
69 #include <linux/device.h>
70 #include <linux/nospec.h>
71 #include <linux/uaccess.h>
73 #define DRIVER_NAME "pktcdvd"
75 #define pkt_err(pd, fmt, ...) \
76 pr_err("%s: " fmt, pd->name, ##__VA_ARGS__)
77 #define pkt_notice(pd, fmt, ...) \
78 pr_notice("%s: " fmt, pd->name, ##__VA_ARGS__)
79 #define pkt_info(pd, fmt, ...) \
80 pr_info("%s: " fmt, pd->name, ##__VA_ARGS__)
82 #define pkt_dbg(level, pd, fmt, ...) \
84 if (level == 2 && PACKET_DEBUG >= 2) \
85 pr_notice("%s: %s():" fmt, \
86 pd->name, __func__, ##__VA_ARGS__); \
87 else if (level == 1 && PACKET_DEBUG >= 1) \
88 pr_notice("%s: " fmt, pd->name, ##__VA_ARGS__); \
91 #define MAX_SPEED 0xffff
93 static DEFINE_MUTEX(pktcdvd_mutex);
94 static struct pktcdvd_device *pkt_devs[MAX_WRITERS];
95 static struct proc_dir_entry *pkt_proc;
96 static int pktdev_major;
97 static int write_congestion_on = PKT_WRITE_CONGESTION_ON;
98 static int write_congestion_off = PKT_WRITE_CONGESTION_OFF;
99 static struct mutex ctl_mutex; /* Serialize open/close/setup/teardown */
100 static mempool_t psd_pool;
101 static struct bio_set pkt_bio_set;
103 static struct class *class_pktcdvd = NULL; /* /sys/class/pktcdvd */
104 static struct dentry *pkt_debugfs_root = NULL; /* /sys/kernel/debug/pktcdvd */
106 /* forward declaration */
107 static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev);
108 static int pkt_remove_dev(dev_t pkt_dev);
109 static int pkt_seq_show(struct seq_file *m, void *p);
111 static sector_t get_zone(sector_t sector, struct pktcdvd_device *pd)
113 return (sector + pd->offset) & ~(sector_t)(pd->settings.size - 1);
116 /**********************************************************
117 * sysfs interface for pktcdvd
118 * by (C) 2006 Thomas Maier <balagi@justmail.de>
120 /sys/class/pktcdvd/pktcdvd[0-7]/
123 stat/packets_finished
128 write_queue/congestion_off
129 write_queue/congestion_on
130 **********************************************************/
132 static ssize_t packets_started_show(struct device *dev,
133 struct device_attribute *attr, char *buf)
135 struct pktcdvd_device *pd = dev_get_drvdata(dev);
137 return sysfs_emit(buf, "%lu\n", pd->stats.pkt_started);
139 static DEVICE_ATTR_RO(packets_started);
141 static ssize_t packets_finished_show(struct device *dev,
142 struct device_attribute *attr, char *buf)
144 struct pktcdvd_device *pd = dev_get_drvdata(dev);
146 return sysfs_emit(buf, "%lu\n", pd->stats.pkt_ended);
148 static DEVICE_ATTR_RO(packets_finished);
150 static ssize_t kb_written_show(struct device *dev,
151 struct device_attribute *attr, char *buf)
153 struct pktcdvd_device *pd = dev_get_drvdata(dev);
155 return sysfs_emit(buf, "%lu\n", pd->stats.secs_w >> 1);
157 static DEVICE_ATTR_RO(kb_written);
159 static ssize_t kb_read_show(struct device *dev,
160 struct device_attribute *attr, char *buf)
162 struct pktcdvd_device *pd = dev_get_drvdata(dev);
164 return sysfs_emit(buf, "%lu\n", pd->stats.secs_r >> 1);
166 static DEVICE_ATTR_RO(kb_read);
168 static ssize_t kb_read_gather_show(struct device *dev,
169 struct device_attribute *attr, char *buf)
171 struct pktcdvd_device *pd = dev_get_drvdata(dev);
173 return sysfs_emit(buf, "%lu\n", pd->stats.secs_rg >> 1);
175 static DEVICE_ATTR_RO(kb_read_gather);
177 static ssize_t reset_store(struct device *dev, struct device_attribute *attr,
178 const char *buf, size_t len)
180 struct pktcdvd_device *pd = dev_get_drvdata(dev);
183 pd->stats.pkt_started = 0;
184 pd->stats.pkt_ended = 0;
185 pd->stats.secs_w = 0;
186 pd->stats.secs_rg = 0;
187 pd->stats.secs_r = 0;
191 static DEVICE_ATTR_WO(reset);
193 static struct attribute *pkt_stat_attrs[] = {
194 &dev_attr_packets_finished.attr,
195 &dev_attr_packets_started.attr,
196 &dev_attr_kb_read.attr,
197 &dev_attr_kb_written.attr,
198 &dev_attr_kb_read_gather.attr,
199 &dev_attr_reset.attr,
203 static const struct attribute_group pkt_stat_group = {
205 .attrs = pkt_stat_attrs,
208 static ssize_t size_show(struct device *dev,
209 struct device_attribute *attr, char *buf)
211 struct pktcdvd_device *pd = dev_get_drvdata(dev);
214 spin_lock(&pd->lock);
215 n = sysfs_emit(buf, "%d\n", pd->bio_queue_size);
216 spin_unlock(&pd->lock);
219 static DEVICE_ATTR_RO(size);
221 static void init_write_congestion_marks(int* lo, int* hi)
225 *hi = min(*hi, 1000000);
229 *lo = min(*lo, *hi - 100);
238 static ssize_t congestion_off_show(struct device *dev,
239 struct device_attribute *attr, char *buf)
241 struct pktcdvd_device *pd = dev_get_drvdata(dev);
244 spin_lock(&pd->lock);
245 n = sysfs_emit(buf, "%d\n", pd->write_congestion_off);
246 spin_unlock(&pd->lock);
250 static ssize_t congestion_off_store(struct device *dev,
251 struct device_attribute *attr,
252 const char *buf, size_t len)
254 struct pktcdvd_device *pd = dev_get_drvdata(dev);
257 if (sscanf(buf, "%d", &val) == 1) {
258 spin_lock(&pd->lock);
259 pd->write_congestion_off = val;
260 init_write_congestion_marks(&pd->write_congestion_off,
261 &pd->write_congestion_on);
262 spin_unlock(&pd->lock);
266 static DEVICE_ATTR_RW(congestion_off);
268 static ssize_t congestion_on_show(struct device *dev,
269 struct device_attribute *attr, char *buf)
271 struct pktcdvd_device *pd = dev_get_drvdata(dev);
274 spin_lock(&pd->lock);
275 n = sysfs_emit(buf, "%d\n", pd->write_congestion_on);
276 spin_unlock(&pd->lock);
280 static ssize_t congestion_on_store(struct device *dev,
281 struct device_attribute *attr,
282 const char *buf, size_t len)
284 struct pktcdvd_device *pd = dev_get_drvdata(dev);
287 if (sscanf(buf, "%d", &val) == 1) {
288 spin_lock(&pd->lock);
289 pd->write_congestion_on = val;
290 init_write_congestion_marks(&pd->write_congestion_off,
291 &pd->write_congestion_on);
292 spin_unlock(&pd->lock);
296 static DEVICE_ATTR_RW(congestion_on);
298 static struct attribute *pkt_wq_attrs[] = {
299 &dev_attr_congestion_on.attr,
300 &dev_attr_congestion_off.attr,
305 static const struct attribute_group pkt_wq_group = {
306 .name = "write_queue",
307 .attrs = pkt_wq_attrs,
310 static const struct attribute_group *pkt_groups[] = {
316 static void pkt_sysfs_dev_new(struct pktcdvd_device *pd)
319 pd->dev = device_create_with_groups(class_pktcdvd, NULL,
320 MKDEV(0, 0), pd, pkt_groups,
327 static void pkt_sysfs_dev_remove(struct pktcdvd_device *pd)
330 device_unregister(pd->dev);
334 /********************************************************************
337 remove unmap packet dev
338 device_map show mappings
339 *******************************************************************/
341 static void class_pktcdvd_release(struct class *cls)
346 static ssize_t device_map_show(const struct class *c, const struct class_attribute *attr,
351 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
352 for (idx = 0; idx < MAX_WRITERS; idx++) {
353 struct pktcdvd_device *pd = pkt_devs[idx];
356 n += sprintf(data+n, "%s %u:%u %u:%u\n",
358 MAJOR(pd->pkt_dev), MINOR(pd->pkt_dev),
359 MAJOR(pd->bdev->bd_dev),
360 MINOR(pd->bdev->bd_dev));
362 mutex_unlock(&ctl_mutex);
365 static CLASS_ATTR_RO(device_map);
367 static ssize_t add_store(const struct class *c, const struct class_attribute *attr,
368 const char *buf, size_t count)
370 unsigned int major, minor;
372 if (sscanf(buf, "%u:%u", &major, &minor) == 2) {
373 /* pkt_setup_dev() expects caller to hold reference to self */
374 if (!try_module_get(THIS_MODULE))
377 pkt_setup_dev(MKDEV(major, minor), NULL);
379 module_put(THIS_MODULE);
386 static CLASS_ATTR_WO(add);
388 static ssize_t remove_store(const struct class *c, const struct class_attribute *attr,
389 const char *buf, size_t count)
391 unsigned int major, minor;
392 if (sscanf(buf, "%u:%u", &major, &minor) == 2) {
393 pkt_remove_dev(MKDEV(major, minor));
398 static CLASS_ATTR_WO(remove);
400 static struct attribute *class_pktcdvd_attrs[] = {
401 &class_attr_add.attr,
402 &class_attr_remove.attr,
403 &class_attr_device_map.attr,
406 ATTRIBUTE_GROUPS(class_pktcdvd);
408 static int pkt_sysfs_init(void)
413 * create control files in sysfs
414 * /sys/class/pktcdvd/...
416 class_pktcdvd = kzalloc(sizeof(*class_pktcdvd), GFP_KERNEL);
419 class_pktcdvd->name = DRIVER_NAME;
420 class_pktcdvd->class_release = class_pktcdvd_release;
421 class_pktcdvd->class_groups = class_pktcdvd_groups;
422 ret = class_register(class_pktcdvd);
424 kfree(class_pktcdvd);
425 class_pktcdvd = NULL;
426 pr_err("failed to create class pktcdvd\n");
432 static void pkt_sysfs_cleanup(void)
435 class_destroy(class_pktcdvd);
436 class_pktcdvd = NULL;
439 /********************************************************************
442 /sys/kernel/debug/pktcdvd[0-7]/
445 *******************************************************************/
447 static int pkt_debugfs_seq_show(struct seq_file *m, void *p)
449 return pkt_seq_show(m, p);
452 static int pkt_debugfs_fops_open(struct inode *inode, struct file *file)
454 return single_open(file, pkt_debugfs_seq_show, inode->i_private);
457 static const struct file_operations debug_fops = {
458 .open = pkt_debugfs_fops_open,
461 .release = single_release,
462 .owner = THIS_MODULE,
465 static void pkt_debugfs_dev_new(struct pktcdvd_device *pd)
467 if (!pkt_debugfs_root)
469 pd->dfs_d_root = debugfs_create_dir(pd->name, pkt_debugfs_root);
473 pd->dfs_f_info = debugfs_create_file("info", 0444,
474 pd->dfs_d_root, pd, &debug_fops);
477 static void pkt_debugfs_dev_remove(struct pktcdvd_device *pd)
479 if (!pkt_debugfs_root)
481 debugfs_remove(pd->dfs_f_info);
482 debugfs_remove(pd->dfs_d_root);
483 pd->dfs_f_info = NULL;
484 pd->dfs_d_root = NULL;
487 static void pkt_debugfs_init(void)
489 pkt_debugfs_root = debugfs_create_dir(DRIVER_NAME, NULL);
492 static void pkt_debugfs_cleanup(void)
494 debugfs_remove(pkt_debugfs_root);
495 pkt_debugfs_root = NULL;
498 /* ----------------------------------------------------------*/
501 static void pkt_bio_finished(struct pktcdvd_device *pd)
503 BUG_ON(atomic_read(&pd->cdrw.pending_bios) <= 0);
504 if (atomic_dec_and_test(&pd->cdrw.pending_bios)) {
505 pkt_dbg(2, pd, "queue empty\n");
506 atomic_set(&pd->iosched.attention, 1);
507 wake_up(&pd->wqueue);
512 * Allocate a packet_data struct
514 static struct packet_data *pkt_alloc_packet_data(int frames)
517 struct packet_data *pkt;
519 pkt = kzalloc(sizeof(struct packet_data), GFP_KERNEL);
523 pkt->frames = frames;
524 pkt->w_bio = bio_kmalloc(frames, GFP_KERNEL);
528 for (i = 0; i < frames / FRAMES_PER_PAGE; i++) {
529 pkt->pages[i] = alloc_page(GFP_KERNEL|__GFP_ZERO);
534 spin_lock_init(&pkt->lock);
535 bio_list_init(&pkt->orig_bios);
537 for (i = 0; i < frames; i++) {
538 pkt->r_bios[i] = bio_kmalloc(1, GFP_KERNEL);
546 for (i = 0; i < frames; i++)
547 kfree(pkt->r_bios[i]);
549 for (i = 0; i < frames / FRAMES_PER_PAGE; i++)
551 __free_page(pkt->pages[i]);
560 * Free a packet_data struct
562 static void pkt_free_packet_data(struct packet_data *pkt)
566 for (i = 0; i < pkt->frames; i++)
567 kfree(pkt->r_bios[i]);
568 for (i = 0; i < pkt->frames / FRAMES_PER_PAGE; i++)
569 __free_page(pkt->pages[i]);
574 static void pkt_shrink_pktlist(struct pktcdvd_device *pd)
576 struct packet_data *pkt, *next;
578 BUG_ON(!list_empty(&pd->cdrw.pkt_active_list));
580 list_for_each_entry_safe(pkt, next, &pd->cdrw.pkt_free_list, list) {
581 pkt_free_packet_data(pkt);
583 INIT_LIST_HEAD(&pd->cdrw.pkt_free_list);
586 static int pkt_grow_pktlist(struct pktcdvd_device *pd, int nr_packets)
588 struct packet_data *pkt;
590 BUG_ON(!list_empty(&pd->cdrw.pkt_free_list));
592 while (nr_packets > 0) {
593 pkt = pkt_alloc_packet_data(pd->settings.size >> 2);
595 pkt_shrink_pktlist(pd);
598 pkt->id = nr_packets;
600 list_add(&pkt->list, &pd->cdrw.pkt_free_list);
606 static inline struct pkt_rb_node *pkt_rbtree_next(struct pkt_rb_node *node)
608 struct rb_node *n = rb_next(&node->rb_node);
611 return rb_entry(n, struct pkt_rb_node, rb_node);
614 static void pkt_rbtree_erase(struct pktcdvd_device *pd, struct pkt_rb_node *node)
616 rb_erase(&node->rb_node, &pd->bio_queue);
617 mempool_free(node, &pd->rb_pool);
618 pd->bio_queue_size--;
619 BUG_ON(pd->bio_queue_size < 0);
623 * Find the first node in the pd->bio_queue rb tree with a starting sector >= s.
625 static struct pkt_rb_node *pkt_rbtree_find(struct pktcdvd_device *pd, sector_t s)
627 struct rb_node *n = pd->bio_queue.rb_node;
628 struct rb_node *next;
629 struct pkt_rb_node *tmp;
632 BUG_ON(pd->bio_queue_size > 0);
637 tmp = rb_entry(n, struct pkt_rb_node, rb_node);
638 if (s <= tmp->bio->bi_iter.bi_sector)
647 if (s > tmp->bio->bi_iter.bi_sector) {
648 tmp = pkt_rbtree_next(tmp);
652 BUG_ON(s > tmp->bio->bi_iter.bi_sector);
657 * Insert a node into the pd->bio_queue rb tree.
659 static void pkt_rbtree_insert(struct pktcdvd_device *pd, struct pkt_rb_node *node)
661 struct rb_node **p = &pd->bio_queue.rb_node;
662 struct rb_node *parent = NULL;
663 sector_t s = node->bio->bi_iter.bi_sector;
664 struct pkt_rb_node *tmp;
668 tmp = rb_entry(parent, struct pkt_rb_node, rb_node);
669 if (s < tmp->bio->bi_iter.bi_sector)
674 rb_link_node(&node->rb_node, parent, p);
675 rb_insert_color(&node->rb_node, &pd->bio_queue);
676 pd->bio_queue_size++;
680 * Send a packet_command to the underlying block device and
681 * wait for completion.
683 static int pkt_generic_packet(struct pktcdvd_device *pd, struct packet_command *cgc)
685 struct request_queue *q = bdev_get_queue(pd->bdev);
686 struct scsi_cmnd *scmd;
690 rq = scsi_alloc_request(q, (cgc->data_direction == CGC_DATA_WRITE) ?
691 REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
694 scmd = blk_mq_rq_to_pdu(rq);
697 ret = blk_rq_map_kern(q, rq, cgc->buffer, cgc->buflen,
703 scmd->cmd_len = COMMAND_SIZE(cgc->cmd[0]);
704 memcpy(scmd->cmnd, cgc->cmd, CDROM_PACKET_SIZE);
708 rq->rq_flags |= RQF_QUIET;
710 blk_execute_rq(rq, false);
714 blk_mq_free_request(rq);
718 static const char *sense_key_string(__u8 index)
720 static const char * const info[] = {
721 "No sense", "Recovered error", "Not ready",
722 "Medium error", "Hardware error", "Illegal request",
723 "Unit attention", "Data protect", "Blank check",
726 return index < ARRAY_SIZE(info) ? info[index] : "INVALID";
730 * A generic sense dump / resolve mechanism should be implemented across
731 * all ATAPI + SCSI devices.
733 static void pkt_dump_sense(struct pktcdvd_device *pd,
734 struct packet_command *cgc)
736 struct scsi_sense_hdr *sshdr = cgc->sshdr;
739 pkt_err(pd, "%*ph - sense %02x.%02x.%02x (%s)\n",
740 CDROM_PACKET_SIZE, cgc->cmd,
741 sshdr->sense_key, sshdr->asc, sshdr->ascq,
742 sense_key_string(sshdr->sense_key));
744 pkt_err(pd, "%*ph - no sense\n", CDROM_PACKET_SIZE, cgc->cmd);
748 * flush the drive cache to media
750 static int pkt_flush_cache(struct pktcdvd_device *pd)
752 struct packet_command cgc;
754 init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
755 cgc.cmd[0] = GPCMD_FLUSH_CACHE;
759 * the IMMED bit -- we default to not setting it, although that
760 * would allow a much faster close, this is safer
765 return pkt_generic_packet(pd, &cgc);
769 * speed is given as the normal factor, e.g. 4 for 4x
771 static noinline_for_stack int pkt_set_speed(struct pktcdvd_device *pd,
772 unsigned write_speed, unsigned read_speed)
774 struct packet_command cgc;
775 struct scsi_sense_hdr sshdr;
778 init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
780 cgc.cmd[0] = GPCMD_SET_SPEED;
781 cgc.cmd[2] = (read_speed >> 8) & 0xff;
782 cgc.cmd[3] = read_speed & 0xff;
783 cgc.cmd[4] = (write_speed >> 8) & 0xff;
784 cgc.cmd[5] = write_speed & 0xff;
786 ret = pkt_generic_packet(pd, &cgc);
788 pkt_dump_sense(pd, &cgc);
794 * Queue a bio for processing by the low-level CD device. Must be called
795 * from process context.
797 static void pkt_queue_bio(struct pktcdvd_device *pd, struct bio *bio)
799 spin_lock(&pd->iosched.lock);
800 if (bio_data_dir(bio) == READ)
801 bio_list_add(&pd->iosched.read_queue, bio);
803 bio_list_add(&pd->iosched.write_queue, bio);
804 spin_unlock(&pd->iosched.lock);
806 atomic_set(&pd->iosched.attention, 1);
807 wake_up(&pd->wqueue);
811 * Process the queued read/write requests. This function handles special
812 * requirements for CDRW drives:
813 * - A cache flush command must be inserted before a read request if the
814 * previous request was a write.
815 * - Switching between reading and writing is slow, so don't do it more often
817 * - Optimize for throughput at the expense of latency. This means that streaming
818 * writes will never be interrupted by a read, but if the drive has to seek
819 * before the next write, switch to reading instead if there are any pending
821 * - Set the read speed according to current usage pattern. When only reading
822 * from the device, it's best to use the highest possible read speed, but
823 * when switching often between reading and writing, it's better to have the
824 * same read and write speeds.
826 static void pkt_iosched_process_queue(struct pktcdvd_device *pd)
829 if (atomic_read(&pd->iosched.attention) == 0)
831 atomic_set(&pd->iosched.attention, 0);
835 int reads_queued, writes_queued;
837 spin_lock(&pd->iosched.lock);
838 reads_queued = !bio_list_empty(&pd->iosched.read_queue);
839 writes_queued = !bio_list_empty(&pd->iosched.write_queue);
840 spin_unlock(&pd->iosched.lock);
842 if (!reads_queued && !writes_queued)
845 if (pd->iosched.writing) {
846 int need_write_seek = 1;
847 spin_lock(&pd->iosched.lock);
848 bio = bio_list_peek(&pd->iosched.write_queue);
849 spin_unlock(&pd->iosched.lock);
850 if (bio && (bio->bi_iter.bi_sector ==
851 pd->iosched.last_write))
853 if (need_write_seek && reads_queued) {
854 if (atomic_read(&pd->cdrw.pending_bios) > 0) {
855 pkt_dbg(2, pd, "write, waiting\n");
859 pd->iosched.writing = 0;
862 if (!reads_queued && writes_queued) {
863 if (atomic_read(&pd->cdrw.pending_bios) > 0) {
864 pkt_dbg(2, pd, "read, waiting\n");
867 pd->iosched.writing = 1;
871 spin_lock(&pd->iosched.lock);
872 if (pd->iosched.writing)
873 bio = bio_list_pop(&pd->iosched.write_queue);
875 bio = bio_list_pop(&pd->iosched.read_queue);
876 spin_unlock(&pd->iosched.lock);
881 if (bio_data_dir(bio) == READ)
882 pd->iosched.successive_reads +=
883 bio->bi_iter.bi_size >> 10;
885 pd->iosched.successive_reads = 0;
886 pd->iosched.last_write = bio_end_sector(bio);
888 if (pd->iosched.successive_reads >= HI_SPEED_SWITCH) {
889 if (pd->read_speed == pd->write_speed) {
890 pd->read_speed = MAX_SPEED;
891 pkt_set_speed(pd, pd->write_speed, pd->read_speed);
894 if (pd->read_speed != pd->write_speed) {
895 pd->read_speed = pd->write_speed;
896 pkt_set_speed(pd, pd->write_speed, pd->read_speed);
900 atomic_inc(&pd->cdrw.pending_bios);
901 submit_bio_noacct(bio);
906 * Special care is needed if the underlying block device has a small
907 * max_phys_segments value.
909 static int pkt_set_segment_merging(struct pktcdvd_device *pd, struct request_queue *q)
911 if ((pd->settings.size << 9) / CD_FRAMESIZE
912 <= queue_max_segments(q)) {
914 * The cdrom device can handle one segment/frame
916 clear_bit(PACKET_MERGE_SEGS, &pd->flags);
918 } else if ((pd->settings.size << 9) / PAGE_SIZE
919 <= queue_max_segments(q)) {
921 * We can handle this case at the expense of some extra memory
922 * copies during write operations
924 set_bit(PACKET_MERGE_SEGS, &pd->flags);
927 pkt_err(pd, "cdrom max_phys_segments too small\n");
932 static void pkt_end_io_read(struct bio *bio)
934 struct packet_data *pkt = bio->bi_private;
935 struct pktcdvd_device *pd = pkt->pd;
938 pkt_dbg(2, pd, "bio=%p sec0=%llx sec=%llx err=%d\n",
939 bio, (unsigned long long)pkt->sector,
940 (unsigned long long)bio->bi_iter.bi_sector, bio->bi_status);
943 atomic_inc(&pkt->io_errors);
945 if (atomic_dec_and_test(&pkt->io_wait)) {
946 atomic_inc(&pkt->run_sm);
947 wake_up(&pd->wqueue);
949 pkt_bio_finished(pd);
952 static void pkt_end_io_packet_write(struct bio *bio)
954 struct packet_data *pkt = bio->bi_private;
955 struct pktcdvd_device *pd = pkt->pd;
958 pkt_dbg(2, pd, "id=%d, err=%d\n", pkt->id, bio->bi_status);
960 pd->stats.pkt_ended++;
963 pkt_bio_finished(pd);
964 atomic_dec(&pkt->io_wait);
965 atomic_inc(&pkt->run_sm);
966 wake_up(&pd->wqueue);
970 * Schedule reads for the holes in a packet
972 static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt)
977 char written[PACKET_MAX_SIZE];
979 BUG_ON(bio_list_empty(&pkt->orig_bios));
981 atomic_set(&pkt->io_wait, 0);
982 atomic_set(&pkt->io_errors, 0);
985 * Figure out which frames we need to read before we can write.
987 memset(written, 0, sizeof(written));
988 spin_lock(&pkt->lock);
989 bio_list_for_each(bio, &pkt->orig_bios) {
990 int first_frame = (bio->bi_iter.bi_sector - pkt->sector) /
992 int num_frames = bio->bi_iter.bi_size / CD_FRAMESIZE;
993 pd->stats.secs_w += num_frames * (CD_FRAMESIZE >> 9);
994 BUG_ON(first_frame < 0);
995 BUG_ON(first_frame + num_frames > pkt->frames);
996 for (f = first_frame; f < first_frame + num_frames; f++)
999 spin_unlock(&pkt->lock);
1001 if (pkt->cache_valid) {
1002 pkt_dbg(2, pd, "zone %llx cached\n",
1003 (unsigned long long)pkt->sector);
1008 * Schedule reads for missing parts of the packet.
1010 for (f = 0; f < pkt->frames; f++) {
1016 bio = pkt->r_bios[f];
1017 bio_init(bio, pd->bdev, bio->bi_inline_vecs, 1, REQ_OP_READ);
1018 bio->bi_iter.bi_sector = pkt->sector + f * (CD_FRAMESIZE >> 9);
1019 bio->bi_end_io = pkt_end_io_read;
1020 bio->bi_private = pkt;
1022 p = (f * CD_FRAMESIZE) / PAGE_SIZE;
1023 offset = (f * CD_FRAMESIZE) % PAGE_SIZE;
1024 pkt_dbg(2, pd, "Adding frame %d, page:%p offs:%d\n",
1025 f, pkt->pages[p], offset);
1026 if (!bio_add_page(bio, pkt->pages[p], CD_FRAMESIZE, offset))
1029 atomic_inc(&pkt->io_wait);
1030 pkt_queue_bio(pd, bio);
1035 pkt_dbg(2, pd, "need %d frames for zone %llx\n",
1036 frames_read, (unsigned long long)pkt->sector);
1037 pd->stats.pkt_started++;
1038 pd->stats.secs_rg += frames_read * (CD_FRAMESIZE >> 9);
1042 * Find a packet matching zone, or the least recently used packet if
1043 * there is no match.
1045 static struct packet_data *pkt_get_packet_data(struct pktcdvd_device *pd, int zone)
1047 struct packet_data *pkt;
1049 list_for_each_entry(pkt, &pd->cdrw.pkt_free_list, list) {
1050 if (pkt->sector == zone || pkt->list.next == &pd->cdrw.pkt_free_list) {
1051 list_del_init(&pkt->list);
1052 if (pkt->sector != zone)
1053 pkt->cache_valid = 0;
1061 static void pkt_put_packet_data(struct pktcdvd_device *pd, struct packet_data *pkt)
1063 if (pkt->cache_valid) {
1064 list_add(&pkt->list, &pd->cdrw.pkt_free_list);
1066 list_add_tail(&pkt->list, &pd->cdrw.pkt_free_list);
1070 static inline void pkt_set_state(struct packet_data *pkt, enum packet_data_state state)
1072 #if PACKET_DEBUG > 1
1073 static const char *state_name[] = {
1074 "IDLE", "WAITING", "READ_WAIT", "WRITE_WAIT", "RECOVERY", "FINISHED"
1076 enum packet_data_state old_state = pkt->state;
1077 pkt_dbg(2, pd, "pkt %2d : s=%6llx %s -> %s\n",
1078 pkt->id, (unsigned long long)pkt->sector,
1079 state_name[old_state], state_name[state]);
1085 * Scan the work queue to see if we can start a new packet.
1086 * returns non-zero if any work was done.
1088 static int pkt_handle_queue(struct pktcdvd_device *pd)
1090 struct packet_data *pkt, *p;
1091 struct bio *bio = NULL;
1092 sector_t zone = 0; /* Suppress gcc warning */
1093 struct pkt_rb_node *node, *first_node;
1096 atomic_set(&pd->scan_queue, 0);
1098 if (list_empty(&pd->cdrw.pkt_free_list)) {
1099 pkt_dbg(2, pd, "no pkt\n");
1104 * Try to find a zone we are not already working on.
1106 spin_lock(&pd->lock);
1107 first_node = pkt_rbtree_find(pd, pd->current_sector);
1109 n = rb_first(&pd->bio_queue);
1111 first_node = rb_entry(n, struct pkt_rb_node, rb_node);
1116 zone = get_zone(bio->bi_iter.bi_sector, pd);
1117 list_for_each_entry(p, &pd->cdrw.pkt_active_list, list) {
1118 if (p->sector == zone) {
1125 node = pkt_rbtree_next(node);
1127 n = rb_first(&pd->bio_queue);
1129 node = rb_entry(n, struct pkt_rb_node, rb_node);
1131 if (node == first_node)
1134 spin_unlock(&pd->lock);
1136 pkt_dbg(2, pd, "no bio\n");
1140 pkt = pkt_get_packet_data(pd, zone);
1142 pd->current_sector = zone + pd->settings.size;
1144 BUG_ON(pkt->frames != pd->settings.size >> 2);
1145 pkt->write_size = 0;
1148 * Scan work queue for bios in the same zone and link them
1151 spin_lock(&pd->lock);
1152 pkt_dbg(2, pd, "looking for zone %llx\n", (unsigned long long)zone);
1153 while ((node = pkt_rbtree_find(pd, zone)) != NULL) {
1155 pkt_dbg(2, pd, "found zone=%llx\n", (unsigned long long)
1156 get_zone(bio->bi_iter.bi_sector, pd));
1157 if (get_zone(bio->bi_iter.bi_sector, pd) != zone)
1159 pkt_rbtree_erase(pd, node);
1160 spin_lock(&pkt->lock);
1161 bio_list_add(&pkt->orig_bios, bio);
1162 pkt->write_size += bio->bi_iter.bi_size / CD_FRAMESIZE;
1163 spin_unlock(&pkt->lock);
1165 /* check write congestion marks, and if bio_queue_size is
1166 * below, wake up any waiters
1168 if (pd->congested &&
1169 pd->bio_queue_size <= pd->write_congestion_off) {
1170 pd->congested = false;
1171 wake_up_var(&pd->congested);
1173 spin_unlock(&pd->lock);
1175 pkt->sleep_time = max(PACKET_WAIT_TIME, 1);
1176 pkt_set_state(pkt, PACKET_WAITING_STATE);
1177 atomic_set(&pkt->run_sm, 1);
1179 spin_lock(&pd->cdrw.active_list_lock);
1180 list_add(&pkt->list, &pd->cdrw.pkt_active_list);
1181 spin_unlock(&pd->cdrw.active_list_lock);
1187 * bio_list_copy_data - copy contents of data buffers from one chain of bios to
1189 * @src: source bio list
1190 * @dst: destination bio list
1192 * Stops when it reaches the end of either the @src list or @dst list - that is,
1193 * copies min(src->bi_size, dst->bi_size) bytes (or the equivalent for lists of
1196 static void bio_list_copy_data(struct bio *dst, struct bio *src)
1198 struct bvec_iter src_iter = src->bi_iter;
1199 struct bvec_iter dst_iter = dst->bi_iter;
1202 if (!src_iter.bi_size) {
1207 src_iter = src->bi_iter;
1210 if (!dst_iter.bi_size) {
1215 dst_iter = dst->bi_iter;
1218 bio_copy_data_iter(dst, &dst_iter, src, &src_iter);
1223 * Assemble a bio to write one packet and queue the bio for processing
1224 * by the underlying block device.
1226 static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt)
1230 bio_init(pkt->w_bio, pd->bdev, pkt->w_bio->bi_inline_vecs, pkt->frames,
1232 pkt->w_bio->bi_iter.bi_sector = pkt->sector;
1233 pkt->w_bio->bi_end_io = pkt_end_io_packet_write;
1234 pkt->w_bio->bi_private = pkt;
1237 for (f = 0; f < pkt->frames; f++) {
1238 struct page *page = pkt->pages[(f * CD_FRAMESIZE) / PAGE_SIZE];
1239 unsigned offset = (f * CD_FRAMESIZE) % PAGE_SIZE;
1241 if (!bio_add_page(pkt->w_bio, page, CD_FRAMESIZE, offset))
1244 pkt_dbg(2, pd, "vcnt=%d\n", pkt->w_bio->bi_vcnt);
1247 * Fill-in bvec with data from orig_bios.
1249 spin_lock(&pkt->lock);
1250 bio_list_copy_data(pkt->w_bio, pkt->orig_bios.head);
1252 pkt_set_state(pkt, PACKET_WRITE_WAIT_STATE);
1253 spin_unlock(&pkt->lock);
1255 pkt_dbg(2, pd, "Writing %d frames for zone %llx\n",
1256 pkt->write_size, (unsigned long long)pkt->sector);
1258 if (test_bit(PACKET_MERGE_SEGS, &pd->flags) || (pkt->write_size < pkt->frames))
1259 pkt->cache_valid = 1;
1261 pkt->cache_valid = 0;
1263 /* Start the write request */
1264 atomic_set(&pkt->io_wait, 1);
1265 pkt_queue_bio(pd, pkt->w_bio);
1268 static void pkt_finish_packet(struct packet_data *pkt, blk_status_t status)
1273 pkt->cache_valid = 0;
1275 /* Finish all bios corresponding to this packet */
1276 while ((bio = bio_list_pop(&pkt->orig_bios))) {
1277 bio->bi_status = status;
1282 static void pkt_run_state_machine(struct pktcdvd_device *pd, struct packet_data *pkt)
1284 pkt_dbg(2, pd, "pkt %d\n", pkt->id);
1287 switch (pkt->state) {
1288 case PACKET_WAITING_STATE:
1289 if ((pkt->write_size < pkt->frames) && (pkt->sleep_time > 0))
1292 pkt->sleep_time = 0;
1293 pkt_gather_data(pd, pkt);
1294 pkt_set_state(pkt, PACKET_READ_WAIT_STATE);
1297 case PACKET_READ_WAIT_STATE:
1298 if (atomic_read(&pkt->io_wait) > 0)
1301 if (atomic_read(&pkt->io_errors) > 0) {
1302 pkt_set_state(pkt, PACKET_RECOVERY_STATE);
1304 pkt_start_write(pd, pkt);
1308 case PACKET_WRITE_WAIT_STATE:
1309 if (atomic_read(&pkt->io_wait) > 0)
1312 if (!pkt->w_bio->bi_status) {
1313 pkt_set_state(pkt, PACKET_FINISHED_STATE);
1315 pkt_set_state(pkt, PACKET_RECOVERY_STATE);
1319 case PACKET_RECOVERY_STATE:
1320 pkt_dbg(2, pd, "No recovery possible\n");
1321 pkt_set_state(pkt, PACKET_FINISHED_STATE);
1324 case PACKET_FINISHED_STATE:
1325 pkt_finish_packet(pkt, pkt->w_bio->bi_status);
1335 static void pkt_handle_packets(struct pktcdvd_device *pd)
1337 struct packet_data *pkt, *next;
1340 * Run state machine for active packets
1342 list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
1343 if (atomic_read(&pkt->run_sm) > 0) {
1344 atomic_set(&pkt->run_sm, 0);
1345 pkt_run_state_machine(pd, pkt);
1350 * Move no longer active packets to the free list
1352 spin_lock(&pd->cdrw.active_list_lock);
1353 list_for_each_entry_safe(pkt, next, &pd->cdrw.pkt_active_list, list) {
1354 if (pkt->state == PACKET_FINISHED_STATE) {
1355 list_del(&pkt->list);
1356 pkt_put_packet_data(pd, pkt);
1357 pkt_set_state(pkt, PACKET_IDLE_STATE);
1358 atomic_set(&pd->scan_queue, 1);
1361 spin_unlock(&pd->cdrw.active_list_lock);
1364 static void pkt_count_states(struct pktcdvd_device *pd, int *states)
1366 struct packet_data *pkt;
1369 for (i = 0; i < PACKET_NUM_STATES; i++)
1372 spin_lock(&pd->cdrw.active_list_lock);
1373 list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
1374 states[pkt->state]++;
1376 spin_unlock(&pd->cdrw.active_list_lock);
1380 * kcdrwd is woken up when writes have been queued for one of our
1381 * registered devices
1383 static int kcdrwd(void *foobar)
1385 struct pktcdvd_device *pd = foobar;
1386 struct packet_data *pkt;
1387 long min_sleep_time, residue;
1389 set_user_nice(current, MIN_NICE);
1393 DECLARE_WAITQUEUE(wait, current);
1396 * Wait until there is something to do
1398 add_wait_queue(&pd->wqueue, &wait);
1400 set_current_state(TASK_INTERRUPTIBLE);
1402 /* Check if we need to run pkt_handle_queue */
1403 if (atomic_read(&pd->scan_queue) > 0)
1406 /* Check if we need to run the state machine for some packet */
1407 list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
1408 if (atomic_read(&pkt->run_sm) > 0)
1412 /* Check if we need to process the iosched queues */
1413 if (atomic_read(&pd->iosched.attention) != 0)
1416 /* Otherwise, go to sleep */
1417 if (PACKET_DEBUG > 1) {
1418 int states[PACKET_NUM_STATES];
1419 pkt_count_states(pd, states);
1420 pkt_dbg(2, pd, "i:%d ow:%d rw:%d ww:%d rec:%d fin:%d\n",
1421 states[0], states[1], states[2],
1422 states[3], states[4], states[5]);
1425 min_sleep_time = MAX_SCHEDULE_TIMEOUT;
1426 list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
1427 if (pkt->sleep_time && pkt->sleep_time < min_sleep_time)
1428 min_sleep_time = pkt->sleep_time;
1431 pkt_dbg(2, pd, "sleeping\n");
1432 residue = schedule_timeout(min_sleep_time);
1433 pkt_dbg(2, pd, "wake up\n");
1435 /* make swsusp happy with our thread */
1438 list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
1439 if (!pkt->sleep_time)
1441 pkt->sleep_time -= min_sleep_time - residue;
1442 if (pkt->sleep_time <= 0) {
1443 pkt->sleep_time = 0;
1444 atomic_inc(&pkt->run_sm);
1448 if (kthread_should_stop())
1452 set_current_state(TASK_RUNNING);
1453 remove_wait_queue(&pd->wqueue, &wait);
1455 if (kthread_should_stop())
1459 * if pkt_handle_queue returns true, we can queue
1462 while (pkt_handle_queue(pd))
1466 * Handle packet state machine
1468 pkt_handle_packets(pd);
1471 * Handle iosched queues
1473 pkt_iosched_process_queue(pd);
1479 static void pkt_print_settings(struct pktcdvd_device *pd)
1481 pkt_info(pd, "%s packets, %u blocks, Mode-%c disc\n",
1482 pd->settings.fp ? "Fixed" : "Variable",
1483 pd->settings.size >> 2,
1484 pd->settings.block_mode == 8 ? '1' : '2');
1487 static int pkt_mode_sense(struct pktcdvd_device *pd, struct packet_command *cgc, int page_code, int page_control)
1489 memset(cgc->cmd, 0, sizeof(cgc->cmd));
1491 cgc->cmd[0] = GPCMD_MODE_SENSE_10;
1492 cgc->cmd[2] = page_code | (page_control << 6);
1493 cgc->cmd[7] = cgc->buflen >> 8;
1494 cgc->cmd[8] = cgc->buflen & 0xff;
1495 cgc->data_direction = CGC_DATA_READ;
1496 return pkt_generic_packet(pd, cgc);
1499 static int pkt_mode_select(struct pktcdvd_device *pd, struct packet_command *cgc)
1501 memset(cgc->cmd, 0, sizeof(cgc->cmd));
1502 memset(cgc->buffer, 0, 2);
1503 cgc->cmd[0] = GPCMD_MODE_SELECT_10;
1504 cgc->cmd[1] = 0x10; /* PF */
1505 cgc->cmd[7] = cgc->buflen >> 8;
1506 cgc->cmd[8] = cgc->buflen & 0xff;
1507 cgc->data_direction = CGC_DATA_WRITE;
1508 return pkt_generic_packet(pd, cgc);
1511 static int pkt_get_disc_info(struct pktcdvd_device *pd, disc_information *di)
1513 struct packet_command cgc;
1516 /* set up command and get the disc info */
1517 init_cdrom_command(&cgc, di, sizeof(*di), CGC_DATA_READ);
1518 cgc.cmd[0] = GPCMD_READ_DISC_INFO;
1519 cgc.cmd[8] = cgc.buflen = 2;
1522 ret = pkt_generic_packet(pd, &cgc);
1526 /* not all drives have the same disc_info length, so requeue
1527 * packet with the length the drive tells us it can supply
1529 cgc.buflen = be16_to_cpu(di->disc_information_length) +
1530 sizeof(di->disc_information_length);
1532 if (cgc.buflen > sizeof(disc_information))
1533 cgc.buflen = sizeof(disc_information);
1535 cgc.cmd[8] = cgc.buflen;
1536 return pkt_generic_packet(pd, &cgc);
1539 static int pkt_get_track_info(struct pktcdvd_device *pd, __u16 track, __u8 type, track_information *ti)
1541 struct packet_command cgc;
1544 init_cdrom_command(&cgc, ti, 8, CGC_DATA_READ);
1545 cgc.cmd[0] = GPCMD_READ_TRACK_RZONE_INFO;
1546 cgc.cmd[1] = type & 3;
1547 cgc.cmd[4] = (track & 0xff00) >> 8;
1548 cgc.cmd[5] = track & 0xff;
1552 ret = pkt_generic_packet(pd, &cgc);
1556 cgc.buflen = be16_to_cpu(ti->track_information_length) +
1557 sizeof(ti->track_information_length);
1559 if (cgc.buflen > sizeof(track_information))
1560 cgc.buflen = sizeof(track_information);
1562 cgc.cmd[8] = cgc.buflen;
1563 return pkt_generic_packet(pd, &cgc);
1566 static noinline_for_stack int pkt_get_last_written(struct pktcdvd_device *pd,
1569 disc_information di;
1570 track_information ti;
1574 ret = pkt_get_disc_info(pd, &di);
1578 last_track = (di.last_track_msb << 8) | di.last_track_lsb;
1579 ret = pkt_get_track_info(pd, last_track, 1, &ti);
1583 /* if this track is blank, try the previous. */
1586 ret = pkt_get_track_info(pd, last_track, 1, &ti);
1591 /* if last recorded field is valid, return it. */
1593 *last_written = be32_to_cpu(ti.last_rec_address);
1595 /* make it up instead */
1596 *last_written = be32_to_cpu(ti.track_start) +
1597 be32_to_cpu(ti.track_size);
1599 *last_written -= (be32_to_cpu(ti.free_blocks) + 7);
1605 * write mode select package based on pd->settings
1607 static noinline_for_stack int pkt_set_write_settings(struct pktcdvd_device *pd)
1609 struct packet_command cgc;
1610 struct scsi_sense_hdr sshdr;
1611 write_param_page *wp;
1615 /* doesn't apply to DVD+RW or DVD-RAM */
1616 if ((pd->mmc3_profile == 0x1a) || (pd->mmc3_profile == 0x12))
1619 memset(buffer, 0, sizeof(buffer));
1620 init_cdrom_command(&cgc, buffer, sizeof(*wp), CGC_DATA_READ);
1622 ret = pkt_mode_sense(pd, &cgc, GPMODE_WRITE_PARMS_PAGE, 0);
1624 pkt_dump_sense(pd, &cgc);
1628 size = 2 + ((buffer[0] << 8) | (buffer[1] & 0xff));
1629 pd->mode_offset = (buffer[6] << 8) | (buffer[7] & 0xff);
1630 if (size > sizeof(buffer))
1631 size = sizeof(buffer);
1636 init_cdrom_command(&cgc, buffer, size, CGC_DATA_READ);
1638 ret = pkt_mode_sense(pd, &cgc, GPMODE_WRITE_PARMS_PAGE, 0);
1640 pkt_dump_sense(pd, &cgc);
1645 * write page is offset header + block descriptor length
1647 wp = (write_param_page *) &buffer[sizeof(struct mode_page_header) + pd->mode_offset];
1649 wp->fp = pd->settings.fp;
1650 wp->track_mode = pd->settings.track_mode;
1651 wp->write_type = pd->settings.write_type;
1652 wp->data_block_type = pd->settings.block_mode;
1654 wp->multi_session = 0;
1656 #ifdef PACKET_USE_LS
1661 if (wp->data_block_type == PACKET_BLOCK_MODE1) {
1662 wp->session_format = 0;
1664 } else if (wp->data_block_type == PACKET_BLOCK_MODE2) {
1665 wp->session_format = 0x20;
1669 memcpy(&wp->mcn[1], PACKET_MCN, sizeof(wp->mcn) - 1);
1675 pkt_err(pd, "write mode wrong %d\n", wp->data_block_type);
1678 wp->packet_size = cpu_to_be32(pd->settings.size >> 2);
1680 cgc.buflen = cgc.cmd[8] = size;
1681 ret = pkt_mode_select(pd, &cgc);
1683 pkt_dump_sense(pd, &cgc);
1687 pkt_print_settings(pd);
1692 * 1 -- we can write to this track, 0 -- we can't
1694 static int pkt_writable_track(struct pktcdvd_device *pd, track_information *ti)
1696 switch (pd->mmc3_profile) {
1697 case 0x1a: /* DVD+RW */
1698 case 0x12: /* DVD-RAM */
1699 /* The track is always writable on DVD+RW/DVD-RAM */
1705 if (!ti->packet || !ti->fp)
1709 * "good" settings as per Mt Fuji.
1711 if (ti->rt == 0 && ti->blank == 0)
1714 if (ti->rt == 0 && ti->blank == 1)
1717 if (ti->rt == 1 && ti->blank == 0)
1720 pkt_err(pd, "bad state %d-%d-%d\n", ti->rt, ti->blank, ti->packet);
1725 * 1 -- we can write to this disc, 0 -- we can't
1727 static int pkt_writable_disc(struct pktcdvd_device *pd, disc_information *di)
1729 switch (pd->mmc3_profile) {
1730 case 0x0a: /* CD-RW */
1731 case 0xffff: /* MMC3 not supported */
1733 case 0x1a: /* DVD+RW */
1734 case 0x13: /* DVD-RW */
1735 case 0x12: /* DVD-RAM */
1738 pkt_dbg(2, pd, "Wrong disc profile (%x)\n",
1744 * for disc type 0xff we should probably reserve a new track.
1745 * but i'm not sure, should we leave this to user apps? probably.
1747 if (di->disc_type == 0xff) {
1748 pkt_notice(pd, "unknown disc - no track?\n");
1752 if (di->disc_type != 0x20 && di->disc_type != 0) {
1753 pkt_err(pd, "wrong disc type (%x)\n", di->disc_type);
1757 if (di->erasable == 0) {
1758 pkt_notice(pd, "disc not erasable\n");
1762 if (di->border_status == PACKET_SESSION_RESERVED) {
1763 pkt_err(pd, "can't write to last track (reserved)\n");
1770 static noinline_for_stack int pkt_probe_settings(struct pktcdvd_device *pd)
1772 struct packet_command cgc;
1773 unsigned char buf[12];
1774 disc_information di;
1775 track_information ti;
1778 init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_READ);
1779 cgc.cmd[0] = GPCMD_GET_CONFIGURATION;
1781 ret = pkt_generic_packet(pd, &cgc);
1782 pd->mmc3_profile = ret ? 0xffff : buf[6] << 8 | buf[7];
1784 memset(&di, 0, sizeof(disc_information));
1785 memset(&ti, 0, sizeof(track_information));
1787 ret = pkt_get_disc_info(pd, &di);
1789 pkt_err(pd, "failed get_disc\n");
1793 if (!pkt_writable_disc(pd, &di))
1796 pd->type = di.erasable ? PACKET_CDRW : PACKET_CDR;
1798 track = 1; /* (di.last_track_msb << 8) | di.last_track_lsb; */
1799 ret = pkt_get_track_info(pd, track, 1, &ti);
1801 pkt_err(pd, "failed get_track\n");
1805 if (!pkt_writable_track(pd, &ti)) {
1806 pkt_err(pd, "can't write to this track\n");
1811 * we keep packet size in 512 byte units, makes it easier to
1812 * deal with request calculations.
1814 pd->settings.size = be32_to_cpu(ti.fixed_packet_size) << 2;
1815 if (pd->settings.size == 0) {
1816 pkt_notice(pd, "detected zero packet size!\n");
1819 if (pd->settings.size > PACKET_MAX_SECTORS) {
1820 pkt_err(pd, "packet size is too big\n");
1823 pd->settings.fp = ti.fp;
1824 pd->offset = (be32_to_cpu(ti.track_start) << 2) & (pd->settings.size - 1);
1827 pd->nwa = be32_to_cpu(ti.next_writable);
1828 set_bit(PACKET_NWA_VALID, &pd->flags);
1832 * in theory we could use lra on -RW media as well and just zero
1833 * blocks that haven't been written yet, but in practice that
1834 * is just a no-go. we'll use that for -R, naturally.
1837 pd->lra = be32_to_cpu(ti.last_rec_address);
1838 set_bit(PACKET_LRA_VALID, &pd->flags);
1840 pd->lra = 0xffffffff;
1841 set_bit(PACKET_LRA_VALID, &pd->flags);
1847 pd->settings.link_loss = 7;
1848 pd->settings.write_type = 0; /* packet */
1849 pd->settings.track_mode = ti.track_mode;
1852 * mode1 or mode2 disc
1854 switch (ti.data_mode) {
1856 pd->settings.block_mode = PACKET_BLOCK_MODE1;
1859 pd->settings.block_mode = PACKET_BLOCK_MODE2;
1862 pkt_err(pd, "unknown data mode\n");
1869 * enable/disable write caching on drive
1871 static noinline_for_stack int pkt_write_caching(struct pktcdvd_device *pd,
1874 struct packet_command cgc;
1875 struct scsi_sense_hdr sshdr;
1876 unsigned char buf[64];
1879 init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_READ);
1881 cgc.buflen = pd->mode_offset + 12;
1884 * caching mode page might not be there, so quiet this command
1888 ret = pkt_mode_sense(pd, &cgc, GPMODE_WCACHING_PAGE, 0);
1892 buf[pd->mode_offset + 10] |= (!!set << 2);
1894 cgc.buflen = cgc.cmd[8] = 2 + ((buf[0] << 8) | (buf[1] & 0xff));
1895 ret = pkt_mode_select(pd, &cgc);
1897 pkt_err(pd, "write caching control failed\n");
1898 pkt_dump_sense(pd, &cgc);
1899 } else if (!ret && set)
1900 pkt_notice(pd, "enabled write caching\n");
1904 static int pkt_lock_door(struct pktcdvd_device *pd, int lockflag)
1906 struct packet_command cgc;
1908 init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
1909 cgc.cmd[0] = GPCMD_PREVENT_ALLOW_MEDIUM_REMOVAL;
1910 cgc.cmd[4] = lockflag ? 1 : 0;
1911 return pkt_generic_packet(pd, &cgc);
1915 * Returns drive maximum write speed
1917 static noinline_for_stack int pkt_get_max_speed(struct pktcdvd_device *pd,
1918 unsigned *write_speed)
1920 struct packet_command cgc;
1921 struct scsi_sense_hdr sshdr;
1922 unsigned char buf[256+18];
1923 unsigned char *cap_buf;
1926 cap_buf = &buf[sizeof(struct mode_page_header) + pd->mode_offset];
1927 init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_UNKNOWN);
1930 ret = pkt_mode_sense(pd, &cgc, GPMODE_CAPABILITIES_PAGE, 0);
1932 cgc.buflen = pd->mode_offset + cap_buf[1] + 2 +
1933 sizeof(struct mode_page_header);
1934 ret = pkt_mode_sense(pd, &cgc, GPMODE_CAPABILITIES_PAGE, 0);
1936 pkt_dump_sense(pd, &cgc);
1941 offset = 20; /* Obsoleted field, used by older drives */
1942 if (cap_buf[1] >= 28)
1943 offset = 28; /* Current write speed selected */
1944 if (cap_buf[1] >= 30) {
1945 /* If the drive reports at least one "Logical Unit Write
1946 * Speed Performance Descriptor Block", use the information
1947 * in the first block. (contains the highest speed)
1949 int num_spdb = (cap_buf[30] << 8) + cap_buf[31];
1954 *write_speed = (cap_buf[offset] << 8) | cap_buf[offset + 1];
1958 /* These tables from cdrecord - I don't have orange book */
1959 /* standard speed CD-RW (1-4x) */
1960 static char clv_to_speed[16] = {
1961 /* 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 */
1962 0, 2, 4, 6, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
1964 /* high speed CD-RW (-10x) */
1965 static char hs_clv_to_speed[16] = {
1966 /* 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 */
1967 0, 2, 4, 6, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
1969 /* ultra high speed CD-RW */
1970 static char us_clv_to_speed[16] = {
1971 /* 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 */
1972 0, 2, 4, 8, 0, 0,16, 0,24,32,40,48, 0, 0, 0, 0
1976 * reads the maximum media speed from ATIP
1978 static noinline_for_stack int pkt_media_speed(struct pktcdvd_device *pd,
1981 struct packet_command cgc;
1982 struct scsi_sense_hdr sshdr;
1983 unsigned char buf[64];
1984 unsigned int size, st, sp;
1987 init_cdrom_command(&cgc, buf, 2, CGC_DATA_READ);
1989 cgc.cmd[0] = GPCMD_READ_TOC_PMA_ATIP;
1991 cgc.cmd[2] = 4; /* READ ATIP */
1993 ret = pkt_generic_packet(pd, &cgc);
1995 pkt_dump_sense(pd, &cgc);
1998 size = ((unsigned int) buf[0]<<8) + buf[1] + 2;
1999 if (size > sizeof(buf))
2002 init_cdrom_command(&cgc, buf, size, CGC_DATA_READ);
2004 cgc.cmd[0] = GPCMD_READ_TOC_PMA_ATIP;
2008 ret = pkt_generic_packet(pd, &cgc);
2010 pkt_dump_sense(pd, &cgc);
2014 if (!(buf[6] & 0x40)) {
2015 pkt_notice(pd, "disc type is not CD-RW\n");
2018 if (!(buf[6] & 0x4)) {
2019 pkt_notice(pd, "A1 values on media are not valid, maybe not CDRW?\n");
2023 st = (buf[6] >> 3) & 0x7; /* disc sub-type */
2025 sp = buf[16] & 0xf; /* max speed from ATIP A1 field */
2027 /* Info from cdrecord */
2029 case 0: /* standard speed */
2030 *speed = clv_to_speed[sp];
2032 case 1: /* high speed */
2033 *speed = hs_clv_to_speed[sp];
2035 case 2: /* ultra high speed */
2036 *speed = us_clv_to_speed[sp];
2039 pkt_notice(pd, "unknown disc sub-type %d\n", st);
2043 pkt_info(pd, "maximum media speed: %d\n", *speed);
2046 pkt_notice(pd, "unknown speed %d for sub-type %d\n", sp, st);
2051 static noinline_for_stack int pkt_perform_opc(struct pktcdvd_device *pd)
2053 struct packet_command cgc;
2054 struct scsi_sense_hdr sshdr;
2057 pkt_dbg(2, pd, "Performing OPC\n");
2059 init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
2061 cgc.timeout = 60*HZ;
2062 cgc.cmd[0] = GPCMD_SEND_OPC;
2064 ret = pkt_generic_packet(pd, &cgc);
2066 pkt_dump_sense(pd, &cgc);
2070 static int pkt_open_write(struct pktcdvd_device *pd)
2073 unsigned int write_speed, media_write_speed, read_speed;
2075 ret = pkt_probe_settings(pd);
2077 pkt_dbg(2, pd, "failed probe\n");
2081 ret = pkt_set_write_settings(pd);
2083 pkt_dbg(1, pd, "failed saving write settings\n");
2087 pkt_write_caching(pd, USE_WCACHING);
2089 ret = pkt_get_max_speed(pd, &write_speed);
2091 write_speed = 16 * 177;
2092 switch (pd->mmc3_profile) {
2093 case 0x13: /* DVD-RW */
2094 case 0x1a: /* DVD+RW */
2095 case 0x12: /* DVD-RAM */
2096 pkt_dbg(1, pd, "write speed %ukB/s\n", write_speed);
2099 ret = pkt_media_speed(pd, &media_write_speed);
2101 media_write_speed = 16;
2102 write_speed = min(write_speed, media_write_speed * 177);
2103 pkt_dbg(1, pd, "write speed %ux\n", write_speed / 176);
2106 read_speed = write_speed;
2108 ret = pkt_set_speed(pd, write_speed, read_speed);
2110 pkt_dbg(1, pd, "couldn't set write speed\n");
2113 pd->write_speed = write_speed;
2114 pd->read_speed = read_speed;
2116 ret = pkt_perform_opc(pd);
2118 pkt_dbg(1, pd, "Optimum Power Calibration failed\n");
2125 * called at open time.
2127 static int pkt_open_dev(struct pktcdvd_device *pd, fmode_t write)
2131 struct request_queue *q;
2132 struct block_device *bdev;
2135 * We need to re-open the cdrom device without O_NONBLOCK to be able
2136 * to read/write from/to it. It is already opened in O_NONBLOCK mode
2137 * so open should not fail.
2139 bdev = blkdev_get_by_dev(pd->bdev->bd_dev, FMODE_READ | FMODE_EXCL, pd);
2141 ret = PTR_ERR(bdev);
2145 ret = pkt_get_last_written(pd, &lba);
2147 pkt_err(pd, "pkt_get_last_written failed\n");
2151 set_capacity(pd->disk, lba << 2);
2152 set_capacity_and_notify(pd->bdev->bd_disk, lba << 2);
2154 q = bdev_get_queue(pd->bdev);
2156 ret = pkt_open_write(pd);
2160 * Some CDRW drives can not handle writes larger than one packet,
2161 * even if the size is a multiple of the packet size.
2163 blk_queue_max_hw_sectors(q, pd->settings.size);
2164 set_bit(PACKET_WRITABLE, &pd->flags);
2166 pkt_set_speed(pd, MAX_SPEED, MAX_SPEED);
2167 clear_bit(PACKET_WRITABLE, &pd->flags);
2170 ret = pkt_set_segment_merging(pd, q);
2175 if (!pkt_grow_pktlist(pd, CONFIG_CDROM_PKTCDVD_BUFFERS)) {
2176 pkt_err(pd, "not enough memory for buffers\n");
2180 pkt_info(pd, "%lukB available on disc\n", lba << 1);
2186 blkdev_put(bdev, FMODE_READ | FMODE_EXCL);
2192 * called when the device is closed. makes sure that the device flushes
2193 * the internal cache before we close.
2195 static void pkt_release_dev(struct pktcdvd_device *pd, int flush)
2197 if (flush && pkt_flush_cache(pd))
2198 pkt_dbg(1, pd, "not flushing cache\n");
2200 pkt_lock_door(pd, 0);
2202 pkt_set_speed(pd, MAX_SPEED, MAX_SPEED);
2203 blkdev_put(pd->bdev, FMODE_READ | FMODE_EXCL);
2205 pkt_shrink_pktlist(pd);
2208 static struct pktcdvd_device *pkt_find_dev_from_minor(unsigned int dev_minor)
2210 if (dev_minor >= MAX_WRITERS)
2213 dev_minor = array_index_nospec(dev_minor, MAX_WRITERS);
2214 return pkt_devs[dev_minor];
2217 static int pkt_open(struct block_device *bdev, fmode_t mode)
2219 struct pktcdvd_device *pd = NULL;
2222 mutex_lock(&pktcdvd_mutex);
2223 mutex_lock(&ctl_mutex);
2224 pd = pkt_find_dev_from_minor(MINOR(bdev->bd_dev));
2229 BUG_ON(pd->refcnt < 0);
2232 if (pd->refcnt > 1) {
2233 if ((mode & FMODE_WRITE) &&
2234 !test_bit(PACKET_WRITABLE, &pd->flags)) {
2239 ret = pkt_open_dev(pd, mode & FMODE_WRITE);
2243 * needed here as well, since ext2 (among others) may change
2244 * the blocksize at mount time
2246 set_blocksize(bdev, CD_FRAMESIZE);
2249 mutex_unlock(&ctl_mutex);
2250 mutex_unlock(&pktcdvd_mutex);
2256 mutex_unlock(&ctl_mutex);
2257 mutex_unlock(&pktcdvd_mutex);
2261 static void pkt_close(struct gendisk *disk, fmode_t mode)
2263 struct pktcdvd_device *pd = disk->private_data;
2265 mutex_lock(&pktcdvd_mutex);
2266 mutex_lock(&ctl_mutex);
2268 BUG_ON(pd->refcnt < 0);
2269 if (pd->refcnt == 0) {
2270 int flush = test_bit(PACKET_WRITABLE, &pd->flags);
2271 pkt_release_dev(pd, flush);
2273 mutex_unlock(&ctl_mutex);
2274 mutex_unlock(&pktcdvd_mutex);
2278 static void pkt_end_io_read_cloned(struct bio *bio)
2280 struct packet_stacked_data *psd = bio->bi_private;
2281 struct pktcdvd_device *pd = psd->pd;
2283 psd->bio->bi_status = bio->bi_status;
2285 bio_endio(psd->bio);
2286 mempool_free(psd, &psd_pool);
2287 pkt_bio_finished(pd);
2290 static void pkt_make_request_read(struct pktcdvd_device *pd, struct bio *bio)
2292 struct bio *cloned_bio =
2293 bio_alloc_clone(pd->bdev, bio, GFP_NOIO, &pkt_bio_set);
2294 struct packet_stacked_data *psd = mempool_alloc(&psd_pool, GFP_NOIO);
2298 cloned_bio->bi_private = psd;
2299 cloned_bio->bi_end_io = pkt_end_io_read_cloned;
2300 pd->stats.secs_r += bio_sectors(bio);
2301 pkt_queue_bio(pd, cloned_bio);
2304 static void pkt_make_request_write(struct request_queue *q, struct bio *bio)
2306 struct pktcdvd_device *pd = q->queuedata;
2308 struct packet_data *pkt;
2309 int was_empty, blocked_bio;
2310 struct pkt_rb_node *node;
2312 zone = get_zone(bio->bi_iter.bi_sector, pd);
2315 * If we find a matching packet in state WAITING or READ_WAIT, we can
2316 * just append this bio to that packet.
2318 spin_lock(&pd->cdrw.active_list_lock);
2320 list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
2321 if (pkt->sector == zone) {
2322 spin_lock(&pkt->lock);
2323 if ((pkt->state == PACKET_WAITING_STATE) ||
2324 (pkt->state == PACKET_READ_WAIT_STATE)) {
2325 bio_list_add(&pkt->orig_bios, bio);
2327 bio->bi_iter.bi_size / CD_FRAMESIZE;
2328 if ((pkt->write_size >= pkt->frames) &&
2329 (pkt->state == PACKET_WAITING_STATE)) {
2330 atomic_inc(&pkt->run_sm);
2331 wake_up(&pd->wqueue);
2333 spin_unlock(&pkt->lock);
2334 spin_unlock(&pd->cdrw.active_list_lock);
2339 spin_unlock(&pkt->lock);
2342 spin_unlock(&pd->cdrw.active_list_lock);
2345 * Test if there is enough room left in the bio work queue
2346 * (queue size >= congestion on mark).
2347 * If not, wait till the work queue size is below the congestion off mark.
2349 spin_lock(&pd->lock);
2350 if (pd->write_congestion_on > 0
2351 && pd->bio_queue_size >= pd->write_congestion_on) {
2352 struct wait_bit_queue_entry wqe;
2354 init_wait_var_entry(&wqe, &pd->congested, 0);
2356 prepare_to_wait_event(__var_waitqueue(&pd->congested),
2358 TASK_UNINTERRUPTIBLE);
2359 if (pd->bio_queue_size <= pd->write_congestion_off)
2361 pd->congested = true;
2362 spin_unlock(&pd->lock);
2364 spin_lock(&pd->lock);
2367 spin_unlock(&pd->lock);
2370 * No matching packet found. Store the bio in the work queue.
2372 node = mempool_alloc(&pd->rb_pool, GFP_NOIO);
2374 spin_lock(&pd->lock);
2375 BUG_ON(pd->bio_queue_size < 0);
2376 was_empty = (pd->bio_queue_size == 0);
2377 pkt_rbtree_insert(pd, node);
2378 spin_unlock(&pd->lock);
2381 * Wake up the worker thread.
2383 atomic_set(&pd->scan_queue, 1);
2385 /* This wake_up is required for correct operation */
2386 wake_up(&pd->wqueue);
2387 } else if (!list_empty(&pd->cdrw.pkt_free_list) && !blocked_bio) {
2389 * This wake up is not required for correct operation,
2390 * but improves performance in some cases.
2392 wake_up(&pd->wqueue);
2396 static void pkt_submit_bio(struct bio *bio)
2398 struct pktcdvd_device *pd = bio->bi_bdev->bd_disk->queue->queuedata;
2401 bio = bio_split_to_limits(bio);
2405 pkt_dbg(2, pd, "start = %6llx stop = %6llx\n",
2406 (unsigned long long)bio->bi_iter.bi_sector,
2407 (unsigned long long)bio_end_sector(bio));
2410 * Clone READ bios so we can have our own bi_end_io callback.
2412 if (bio_data_dir(bio) == READ) {
2413 pkt_make_request_read(pd, bio);
2417 if (!test_bit(PACKET_WRITABLE, &pd->flags)) {
2418 pkt_notice(pd, "WRITE for ro device (%llu)\n",
2419 (unsigned long long)bio->bi_iter.bi_sector);
2423 if (!bio->bi_iter.bi_size || (bio->bi_iter.bi_size % CD_FRAMESIZE)) {
2424 pkt_err(pd, "wrong bio size\n");
2429 sector_t zone = get_zone(bio->bi_iter.bi_sector, pd);
2430 sector_t last_zone = get_zone(bio_end_sector(bio) - 1, pd);
2432 if (last_zone != zone) {
2433 BUG_ON(last_zone != zone + pd->settings.size);
2435 split = bio_split(bio, last_zone -
2436 bio->bi_iter.bi_sector,
2437 GFP_NOIO, &pkt_bio_set);
2438 bio_chain(split, bio);
2443 pkt_make_request_write(bio->bi_bdev->bd_disk->queue, split);
2444 } while (split != bio);
2451 static void pkt_init_queue(struct pktcdvd_device *pd)
2453 struct request_queue *q = pd->disk->queue;
2455 blk_queue_logical_block_size(q, CD_FRAMESIZE);
2456 blk_queue_max_hw_sectors(q, PACKET_MAX_SECTORS);
2460 static int pkt_seq_show(struct seq_file *m, void *p)
2462 struct pktcdvd_device *pd = m->private;
2464 int states[PACKET_NUM_STATES];
2466 seq_printf(m, "Writer %s mapped to %pg:\n", pd->name, pd->bdev);
2468 seq_printf(m, "\nSettings:\n");
2469 seq_printf(m, "\tpacket size:\t\t%dkB\n", pd->settings.size / 2);
2471 if (pd->settings.write_type == 0)
2475 seq_printf(m, "\twrite type:\t\t%s\n", msg);
2477 seq_printf(m, "\tpacket type:\t\t%s\n", pd->settings.fp ? "Fixed" : "Variable");
2478 seq_printf(m, "\tlink loss:\t\t%d\n", pd->settings.link_loss);
2480 seq_printf(m, "\ttrack mode:\t\t%d\n", pd->settings.track_mode);
2482 if (pd->settings.block_mode == PACKET_BLOCK_MODE1)
2484 else if (pd->settings.block_mode == PACKET_BLOCK_MODE2)
2488 seq_printf(m, "\tblock mode:\t\t%s\n", msg);
2490 seq_printf(m, "\nStatistics:\n");
2491 seq_printf(m, "\tpackets started:\t%lu\n", pd->stats.pkt_started);
2492 seq_printf(m, "\tpackets ended:\t\t%lu\n", pd->stats.pkt_ended);
2493 seq_printf(m, "\twritten:\t\t%lukB\n", pd->stats.secs_w >> 1);
2494 seq_printf(m, "\tread gather:\t\t%lukB\n", pd->stats.secs_rg >> 1);
2495 seq_printf(m, "\tread:\t\t\t%lukB\n", pd->stats.secs_r >> 1);
2497 seq_printf(m, "\nMisc:\n");
2498 seq_printf(m, "\treference count:\t%d\n", pd->refcnt);
2499 seq_printf(m, "\tflags:\t\t\t0x%lx\n", pd->flags);
2500 seq_printf(m, "\tread speed:\t\t%ukB/s\n", pd->read_speed);
2501 seq_printf(m, "\twrite speed:\t\t%ukB/s\n", pd->write_speed);
2502 seq_printf(m, "\tstart offset:\t\t%lu\n", pd->offset);
2503 seq_printf(m, "\tmode page offset:\t%u\n", pd->mode_offset);
2505 seq_printf(m, "\nQueue state:\n");
2506 seq_printf(m, "\tbios queued:\t\t%d\n", pd->bio_queue_size);
2507 seq_printf(m, "\tbios pending:\t\t%d\n", atomic_read(&pd->cdrw.pending_bios));
2508 seq_printf(m, "\tcurrent sector:\t\t0x%llx\n", (unsigned long long)pd->current_sector);
2510 pkt_count_states(pd, states);
2511 seq_printf(m, "\tstate:\t\t\ti:%d ow:%d rw:%d ww:%d rec:%d fin:%d\n",
2512 states[0], states[1], states[2], states[3], states[4], states[5]);
2514 seq_printf(m, "\twrite congestion marks:\toff=%d on=%d\n",
2515 pd->write_congestion_off,
2516 pd->write_congestion_on);
2520 static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev)
2523 struct block_device *bdev;
2524 struct scsi_device *sdev;
2526 if (pd->pkt_dev == dev) {
2527 pkt_err(pd, "recursive setup not allowed\n");
2530 for (i = 0; i < MAX_WRITERS; i++) {
2531 struct pktcdvd_device *pd2 = pkt_devs[i];
2534 if (pd2->bdev->bd_dev == dev) {
2535 pkt_err(pd, "%pg already setup\n", pd2->bdev);
2538 if (pd2->pkt_dev == dev) {
2539 pkt_err(pd, "can't chain pktcdvd devices\n");
2544 bdev = blkdev_get_by_dev(dev, FMODE_READ | FMODE_NDELAY, NULL);
2546 return PTR_ERR(bdev);
2547 sdev = scsi_device_from_queue(bdev->bd_disk->queue);
2549 blkdev_put(bdev, FMODE_READ | FMODE_NDELAY);
2552 put_device(&sdev->sdev_gendev);
2554 /* This is safe, since we have a reference from open(). */
2555 __module_get(THIS_MODULE);
2558 set_blocksize(bdev, CD_FRAMESIZE);
2562 atomic_set(&pd->cdrw.pending_bios, 0);
2563 pd->cdrw.thread = kthread_run(kcdrwd, pd, "%s", pd->name);
2564 if (IS_ERR(pd->cdrw.thread)) {
2565 pkt_err(pd, "can't start kernel thread\n");
2569 proc_create_single_data(pd->name, 0, pkt_proc, pkt_seq_show, pd);
2570 pkt_dbg(1, pd, "writer mapped to %pg\n", bdev);
2574 blkdev_put(bdev, FMODE_READ | FMODE_NDELAY);
2575 /* This is safe: open() is still holding a reference. */
2576 module_put(THIS_MODULE);
2580 static int pkt_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg)
2582 struct pktcdvd_device *pd = bdev->bd_disk->private_data;
2585 pkt_dbg(2, pd, "cmd %x, dev %d:%d\n",
2586 cmd, MAJOR(bdev->bd_dev), MINOR(bdev->bd_dev));
2588 mutex_lock(&pktcdvd_mutex);
2592 * The door gets locked when the device is opened, so we
2593 * have to unlock it or else the eject command fails.
2595 if (pd->refcnt == 1)
2596 pkt_lock_door(pd, 0);
2599 * forward selected CDROM ioctls to CD-ROM, for UDF
2601 case CDROMMULTISESSION:
2602 case CDROMREADTOCENTRY:
2603 case CDROM_LAST_WRITTEN:
2604 case CDROM_SEND_PACKET:
2605 case SCSI_IOCTL_SEND_COMMAND:
2606 if (!bdev->bd_disk->fops->ioctl)
2609 ret = bdev->bd_disk->fops->ioctl(bdev, mode, cmd, arg);
2612 pkt_dbg(2, pd, "Unknown ioctl (%x)\n", cmd);
2615 mutex_unlock(&pktcdvd_mutex);
2620 static unsigned int pkt_check_events(struct gendisk *disk,
2621 unsigned int clearing)
2623 struct pktcdvd_device *pd = disk->private_data;
2624 struct gendisk *attached_disk;
2630 attached_disk = pd->bdev->bd_disk;
2631 if (!attached_disk || !attached_disk->fops->check_events)
2633 return attached_disk->fops->check_events(attached_disk, clearing);
2636 static char *pkt_devnode(struct gendisk *disk, umode_t *mode)
2638 return kasprintf(GFP_KERNEL, "pktcdvd/%s", disk->disk_name);
2641 static const struct block_device_operations pktcdvd_ops = {
2642 .owner = THIS_MODULE,
2643 .submit_bio = pkt_submit_bio,
2645 .release = pkt_close,
2647 .compat_ioctl = blkdev_compat_ptr_ioctl,
2648 .check_events = pkt_check_events,
2649 .devnode = pkt_devnode,
2653 * Set up mapping from pktcdvd device to CD-ROM device.
2655 static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev)
2659 struct pktcdvd_device *pd;
2660 struct gendisk *disk;
2662 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
2664 for (idx = 0; idx < MAX_WRITERS; idx++)
2667 if (idx == MAX_WRITERS) {
2668 pr_err("max %d writers supported\n", MAX_WRITERS);
2673 pd = kzalloc(sizeof(struct pktcdvd_device), GFP_KERNEL);
2677 ret = mempool_init_kmalloc_pool(&pd->rb_pool, PKT_RB_POOL_SIZE,
2678 sizeof(struct pkt_rb_node));
2682 INIT_LIST_HEAD(&pd->cdrw.pkt_free_list);
2683 INIT_LIST_HEAD(&pd->cdrw.pkt_active_list);
2684 spin_lock_init(&pd->cdrw.active_list_lock);
2686 spin_lock_init(&pd->lock);
2687 spin_lock_init(&pd->iosched.lock);
2688 bio_list_init(&pd->iosched.read_queue);
2689 bio_list_init(&pd->iosched.write_queue);
2690 sprintf(pd->name, DRIVER_NAME"%d", idx);
2691 init_waitqueue_head(&pd->wqueue);
2692 pd->bio_queue = RB_ROOT;
2694 pd->write_congestion_on = write_congestion_on;
2695 pd->write_congestion_off = write_congestion_off;
2698 disk = blk_alloc_disk(NUMA_NO_NODE);
2702 disk->major = pktdev_major;
2703 disk->first_minor = idx;
2705 disk->fops = &pktcdvd_ops;
2706 disk->flags = GENHD_FL_REMOVABLE | GENHD_FL_NO_PART;
2707 strcpy(disk->disk_name, pd->name);
2708 disk->private_data = pd;
2710 pd->pkt_dev = MKDEV(pktdev_major, idx);
2711 ret = pkt_new_dev(pd, dev);
2715 /* inherit events of the host device */
2716 disk->events = pd->bdev->bd_disk->events;
2718 ret = add_disk(disk);
2722 pkt_sysfs_dev_new(pd);
2723 pkt_debugfs_dev_new(pd);
2727 *pkt_dev = pd->pkt_dev;
2729 mutex_unlock(&ctl_mutex);
2735 mempool_exit(&pd->rb_pool);
2738 mutex_unlock(&ctl_mutex);
2739 pr_err("setup of pktcdvd device failed\n");
2744 * Tear down mapping from pktcdvd device to CD-ROM device.
2746 static int pkt_remove_dev(dev_t pkt_dev)
2748 struct pktcdvd_device *pd;
2752 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
2754 for (idx = 0; idx < MAX_WRITERS; idx++) {
2756 if (pd && (pd->pkt_dev == pkt_dev))
2759 if (idx == MAX_WRITERS) {
2760 pr_debug("dev not setup\n");
2765 if (pd->refcnt > 0) {
2769 if (!IS_ERR(pd->cdrw.thread))
2770 kthread_stop(pd->cdrw.thread);
2772 pkt_devs[idx] = NULL;
2774 pkt_debugfs_dev_remove(pd);
2775 pkt_sysfs_dev_remove(pd);
2777 blkdev_put(pd->bdev, FMODE_READ | FMODE_NDELAY);
2779 remove_proc_entry(pd->name, pkt_proc);
2780 pkt_dbg(1, pd, "writer unmapped\n");
2782 del_gendisk(pd->disk);
2785 mempool_exit(&pd->rb_pool);
2788 /* This is safe: open() is still holding a reference. */
2789 module_put(THIS_MODULE);
2792 mutex_unlock(&ctl_mutex);
2796 static void pkt_get_status(struct pkt_ctrl_command *ctrl_cmd)
2798 struct pktcdvd_device *pd;
2800 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
2802 pd = pkt_find_dev_from_minor(ctrl_cmd->dev_index);
2804 ctrl_cmd->dev = new_encode_dev(pd->bdev->bd_dev);
2805 ctrl_cmd->pkt_dev = new_encode_dev(pd->pkt_dev);
2808 ctrl_cmd->pkt_dev = 0;
2810 ctrl_cmd->num_devices = MAX_WRITERS;
2812 mutex_unlock(&ctl_mutex);
2815 static long pkt_ctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
2817 void __user *argp = (void __user *)arg;
2818 struct pkt_ctrl_command ctrl_cmd;
2822 if (cmd != PACKET_CTRL_CMD)
2825 if (copy_from_user(&ctrl_cmd, argp, sizeof(struct pkt_ctrl_command)))
2828 switch (ctrl_cmd.command) {
2829 case PKT_CTRL_CMD_SETUP:
2830 if (!capable(CAP_SYS_ADMIN))
2832 ret = pkt_setup_dev(new_decode_dev(ctrl_cmd.dev), &pkt_dev);
2833 ctrl_cmd.pkt_dev = new_encode_dev(pkt_dev);
2835 case PKT_CTRL_CMD_TEARDOWN:
2836 if (!capable(CAP_SYS_ADMIN))
2838 ret = pkt_remove_dev(new_decode_dev(ctrl_cmd.pkt_dev));
2840 case PKT_CTRL_CMD_STATUS:
2841 pkt_get_status(&ctrl_cmd);
2847 if (copy_to_user(argp, &ctrl_cmd, sizeof(struct pkt_ctrl_command)))
2852 #ifdef CONFIG_COMPAT
2853 static long pkt_ctl_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
2855 return pkt_ctl_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
2859 static const struct file_operations pkt_ctl_fops = {
2860 .open = nonseekable_open,
2861 .unlocked_ioctl = pkt_ctl_ioctl,
2862 #ifdef CONFIG_COMPAT
2863 .compat_ioctl = pkt_ctl_compat_ioctl,
2865 .owner = THIS_MODULE,
2866 .llseek = no_llseek,
2869 static struct miscdevice pkt_misc = {
2870 .minor = MISC_DYNAMIC_MINOR,
2871 .name = DRIVER_NAME,
2872 .nodename = "pktcdvd/control",
2873 .fops = &pkt_ctl_fops
2876 static int __init pkt_init(void)
2880 mutex_init(&ctl_mutex);
2882 ret = mempool_init_kmalloc_pool(&psd_pool, PSD_POOL_SIZE,
2883 sizeof(struct packet_stacked_data));
2886 ret = bioset_init(&pkt_bio_set, BIO_POOL_SIZE, 0, 0);
2888 mempool_exit(&psd_pool);
2892 ret = register_blkdev(pktdev_major, DRIVER_NAME);
2894 pr_err("unable to register block device\n");
2900 ret = pkt_sysfs_init();
2906 ret = misc_register(&pkt_misc);
2908 pr_err("unable to register misc device\n");
2912 pkt_proc = proc_mkdir("driver/"DRIVER_NAME, NULL);
2917 pkt_debugfs_cleanup();
2918 pkt_sysfs_cleanup();
2920 unregister_blkdev(pktdev_major, DRIVER_NAME);
2922 mempool_exit(&psd_pool);
2923 bioset_exit(&pkt_bio_set);
2927 static void __exit pkt_exit(void)
2929 remove_proc_entry("driver/"DRIVER_NAME, NULL);
2930 misc_deregister(&pkt_misc);
2932 pkt_debugfs_cleanup();
2933 pkt_sysfs_cleanup();
2935 unregister_blkdev(pktdev_major, DRIVER_NAME);
2936 mempool_exit(&psd_pool);
2937 bioset_exit(&pkt_bio_set);
2940 MODULE_DESCRIPTION("Packet writing layer for CD/DVD drives");
2941 MODULE_AUTHOR("Jens Axboe <axboe@suse.de>");
2942 MODULE_LICENSE("GPL");
2944 module_init(pkt_init);
2945 module_exit(pkt_exit);