Merge tag 'drm-misc-fixes-2022-05-20' of git://anongit.freedesktop.org/drm/drm-misc...
[linux-2.6-microblaze.git] / drivers / block / pktcdvd.c
1 /*
2  * Copyright (C) 2000 Jens Axboe <axboe@suse.de>
3  * Copyright (C) 2001-2004 Peter Osterlund <petero2@telia.com>
4  * Copyright (C) 2006 Thomas Maier <balagi@justmail.de>
5  *
6  * May be copied or modified under the terms of the GNU General Public
7  * License.  See linux/COPYING for more information.
8  *
9  * Packet writing layer for ATAPI and SCSI CD-RW, DVD+RW, DVD-RW and
10  * DVD-RAM devices.
11  *
12  * Theory of operation:
13  *
14  * At the lowest level, there is the standard driver for the CD/DVD device,
15  * typically ide-cd.c or sr.c. This driver can handle read and write requests,
16  * but it doesn't know anything about the special restrictions that apply to
17  * packet writing. One restriction is that write requests must be aligned to
18  * packet boundaries on the physical media, and the size of a write request
19  * must be equal to the packet size. Another restriction is that a
20  * GPCMD_FLUSH_CACHE command has to be issued to the drive before a read
21  * command, if the previous command was a write.
22  *
23  * The purpose of the packet writing driver is to hide these restrictions from
24  * higher layers, such as file systems, and present a block device that can be
25  * randomly read and written using 2kB-sized blocks.
26  *
27  * The lowest layer in the packet writing driver is the packet I/O scheduler.
28  * Its data is defined by the struct packet_iosched and includes two bio
29  * queues with pending read and write requests. These queues are processed
30  * by the pkt_iosched_process_queue() function. The write requests in this
31  * queue are already properly aligned and sized. This layer is responsible for
32  * issuing the flush cache commands and scheduling the I/O in a good order.
33  *
34  * The next layer transforms unaligned write requests to aligned writes. This
35  * transformation requires reading missing pieces of data from the underlying
36  * block device, assembling the pieces to full packets and queuing them to the
37  * packet I/O scheduler.
38  *
39  * At the top layer there is a custom ->submit_bio function that forwards
40  * read requests directly to the iosched queue and puts write requests in the
41  * unaligned write queue. A kernel thread performs the necessary read
42  * gathering to convert the unaligned writes to aligned writes and then feeds
43  * them to the packet I/O scheduler.
44  *
45  *************************************************************************/
46
47 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
48
49 #include <linux/pktcdvd.h>
50 #include <linux/module.h>
51 #include <linux/types.h>
52 #include <linux/kernel.h>
53 #include <linux/compat.h>
54 #include <linux/kthread.h>
55 #include <linux/errno.h>
56 #include <linux/spinlock.h>
57 #include <linux/file.h>
58 #include <linux/proc_fs.h>
59 #include <linux/seq_file.h>
60 #include <linux/miscdevice.h>
61 #include <linux/freezer.h>
62 #include <linux/mutex.h>
63 #include <linux/slab.h>
64 #include <linux/backing-dev.h>
65 #include <scsi/scsi_cmnd.h>
66 #include <scsi/scsi_ioctl.h>
67 #include <scsi/scsi.h>
68 #include <linux/debugfs.h>
69 #include <linux/device.h>
70 #include <linux/nospec.h>
71 #include <linux/uaccess.h>
72
73 #define DRIVER_NAME     "pktcdvd"
74
75 #define pkt_err(pd, fmt, ...)                                           \
76         pr_err("%s: " fmt, pd->name, ##__VA_ARGS__)
77 #define pkt_notice(pd, fmt, ...)                                        \
78         pr_notice("%s: " fmt, pd->name, ##__VA_ARGS__)
79 #define pkt_info(pd, fmt, ...)                                          \
80         pr_info("%s: " fmt, pd->name, ##__VA_ARGS__)
81
82 #define pkt_dbg(level, pd, fmt, ...)                                    \
83 do {                                                                    \
84         if (level == 2 && PACKET_DEBUG >= 2)                            \
85                 pr_notice("%s: %s():" fmt,                              \
86                           pd->name, __func__, ##__VA_ARGS__);           \
87         else if (level == 1 && PACKET_DEBUG >= 1)                       \
88                 pr_notice("%s: " fmt, pd->name, ##__VA_ARGS__);         \
89 } while (0)
90
91 #define MAX_SPEED 0xffff
92
93 static DEFINE_MUTEX(pktcdvd_mutex);
94 static struct pktcdvd_device *pkt_devs[MAX_WRITERS];
95 static struct proc_dir_entry *pkt_proc;
96 static int pktdev_major;
97 static int write_congestion_on  = PKT_WRITE_CONGESTION_ON;
98 static int write_congestion_off = PKT_WRITE_CONGESTION_OFF;
99 static struct mutex ctl_mutex;  /* Serialize open/close/setup/teardown */
100 static mempool_t psd_pool;
101 static struct bio_set pkt_bio_set;
102
103 static struct class     *class_pktcdvd = NULL;    /* /sys/class/pktcdvd */
104 static struct dentry    *pkt_debugfs_root = NULL; /* /sys/kernel/debug/pktcdvd */
105
106 /* forward declaration */
107 static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev);
108 static int pkt_remove_dev(dev_t pkt_dev);
109 static int pkt_seq_show(struct seq_file *m, void *p);
110
111 static sector_t get_zone(sector_t sector, struct pktcdvd_device *pd)
112 {
113         return (sector + pd->offset) & ~(sector_t)(pd->settings.size - 1);
114 }
115
116 /**********************************************************
117  * sysfs interface for pktcdvd
118  * by (C) 2006  Thomas Maier <balagi@justmail.de>
119  
120   /sys/class/pktcdvd/pktcdvd[0-7]/
121                      stat/reset
122                      stat/packets_started
123                      stat/packets_finished
124                      stat/kb_written
125                      stat/kb_read
126                      stat/kb_read_gather
127                      write_queue/size
128                      write_queue/congestion_off
129                      write_queue/congestion_on
130  **********************************************************/
131
132 static ssize_t packets_started_show(struct device *dev,
133                                     struct device_attribute *attr, char *buf)
134 {
135         struct pktcdvd_device *pd = dev_get_drvdata(dev);
136
137         return sysfs_emit(buf, "%lu\n", pd->stats.pkt_started);
138 }
139 static DEVICE_ATTR_RO(packets_started);
140
141 static ssize_t packets_finished_show(struct device *dev,
142                                      struct device_attribute *attr, char *buf)
143 {
144         struct pktcdvd_device *pd = dev_get_drvdata(dev);
145
146         return sysfs_emit(buf, "%lu\n", pd->stats.pkt_ended);
147 }
148 static DEVICE_ATTR_RO(packets_finished);
149
150 static ssize_t kb_written_show(struct device *dev,
151                                struct device_attribute *attr, char *buf)
152 {
153         struct pktcdvd_device *pd = dev_get_drvdata(dev);
154
155         return sysfs_emit(buf, "%lu\n", pd->stats.secs_w >> 1);
156 }
157 static DEVICE_ATTR_RO(kb_written);
158
159 static ssize_t kb_read_show(struct device *dev,
160                             struct device_attribute *attr, char *buf)
161 {
162         struct pktcdvd_device *pd = dev_get_drvdata(dev);
163
164         return sysfs_emit(buf, "%lu\n", pd->stats.secs_r >> 1);
165 }
166 static DEVICE_ATTR_RO(kb_read);
167
168 static ssize_t kb_read_gather_show(struct device *dev,
169                                    struct device_attribute *attr, char *buf)
170 {
171         struct pktcdvd_device *pd = dev_get_drvdata(dev);
172
173         return sysfs_emit(buf, "%lu\n", pd->stats.secs_rg >> 1);
174 }
175 static DEVICE_ATTR_RO(kb_read_gather);
176
177 static ssize_t reset_store(struct device *dev, struct device_attribute *attr,
178                            const char *buf, size_t len)
179 {
180         struct pktcdvd_device *pd = dev_get_drvdata(dev);
181
182         if (len > 0) {
183                 pd->stats.pkt_started = 0;
184                 pd->stats.pkt_ended = 0;
185                 pd->stats.secs_w = 0;
186                 pd->stats.secs_rg = 0;
187                 pd->stats.secs_r = 0;
188         }
189         return len;
190 }
191 static DEVICE_ATTR_WO(reset);
192
193 static struct attribute *pkt_stat_attrs[] = {
194         &dev_attr_packets_finished.attr,
195         &dev_attr_packets_started.attr,
196         &dev_attr_kb_read.attr,
197         &dev_attr_kb_written.attr,
198         &dev_attr_kb_read_gather.attr,
199         &dev_attr_reset.attr,
200         NULL,
201 };
202
203 static const struct attribute_group pkt_stat_group = {
204         .name = "stat",
205         .attrs = pkt_stat_attrs,
206 };
207
208 static ssize_t size_show(struct device *dev,
209                          struct device_attribute *attr, char *buf)
210 {
211         struct pktcdvd_device *pd = dev_get_drvdata(dev);
212         int n;
213
214         spin_lock(&pd->lock);
215         n = sysfs_emit(buf, "%d\n", pd->bio_queue_size);
216         spin_unlock(&pd->lock);
217         return n;
218 }
219 static DEVICE_ATTR_RO(size);
220
221 static void init_write_congestion_marks(int* lo, int* hi)
222 {
223         if (*hi > 0) {
224                 *hi = max(*hi, 500);
225                 *hi = min(*hi, 1000000);
226                 if (*lo <= 0)
227                         *lo = *hi - 100;
228                 else {
229                         *lo = min(*lo, *hi - 100);
230                         *lo = max(*lo, 100);
231                 }
232         } else {
233                 *hi = -1;
234                 *lo = -1;
235         }
236 }
237
238 static ssize_t congestion_off_show(struct device *dev,
239                                    struct device_attribute *attr, char *buf)
240 {
241         struct pktcdvd_device *pd = dev_get_drvdata(dev);
242         int n;
243
244         spin_lock(&pd->lock);
245         n = sysfs_emit(buf, "%d\n", pd->write_congestion_off);
246         spin_unlock(&pd->lock);
247         return n;
248 }
249
250 static ssize_t congestion_off_store(struct device *dev,
251                                     struct device_attribute *attr,
252                                     const char *buf, size_t len)
253 {
254         struct pktcdvd_device *pd = dev_get_drvdata(dev);
255         int val;
256
257         if (sscanf(buf, "%d", &val) == 1) {
258                 spin_lock(&pd->lock);
259                 pd->write_congestion_off = val;
260                 init_write_congestion_marks(&pd->write_congestion_off,
261                                         &pd->write_congestion_on);
262                 spin_unlock(&pd->lock);
263         }
264         return len;
265 }
266 static DEVICE_ATTR_RW(congestion_off);
267
268 static ssize_t congestion_on_show(struct device *dev,
269                                   struct device_attribute *attr, char *buf)
270 {
271         struct pktcdvd_device *pd = dev_get_drvdata(dev);
272         int n;
273
274         spin_lock(&pd->lock);
275         n = sysfs_emit(buf, "%d\n", pd->write_congestion_on);
276         spin_unlock(&pd->lock);
277         return n;
278 }
279
280 static ssize_t congestion_on_store(struct device *dev,
281                                    struct device_attribute *attr,
282                                    const char *buf, size_t len)
283 {
284         struct pktcdvd_device *pd = dev_get_drvdata(dev);
285         int val;
286
287         if (sscanf(buf, "%d", &val) == 1) {
288                 spin_lock(&pd->lock);
289                 pd->write_congestion_on = val;
290                 init_write_congestion_marks(&pd->write_congestion_off,
291                                         &pd->write_congestion_on);
292                 spin_unlock(&pd->lock);
293         }
294         return len;
295 }
296 static DEVICE_ATTR_RW(congestion_on);
297
298 static struct attribute *pkt_wq_attrs[] = {
299         &dev_attr_congestion_on.attr,
300         &dev_attr_congestion_off.attr,
301         &dev_attr_size.attr,
302         NULL,
303 };
304
305 static const struct attribute_group pkt_wq_group = {
306         .name = "write_queue",
307         .attrs = pkt_wq_attrs,
308 };
309
310 static const struct attribute_group *pkt_groups[] = {
311         &pkt_stat_group,
312         &pkt_wq_group,
313         NULL,
314 };
315
316 static void pkt_sysfs_dev_new(struct pktcdvd_device *pd)
317 {
318         if (class_pktcdvd) {
319                 pd->dev = device_create_with_groups(class_pktcdvd, NULL,
320                                                     MKDEV(0, 0), pd, pkt_groups,
321                                                     "%s", pd->name);
322                 if (IS_ERR(pd->dev))
323                         pd->dev = NULL;
324         }
325 }
326
327 static void pkt_sysfs_dev_remove(struct pktcdvd_device *pd)
328 {
329         if (class_pktcdvd)
330                 device_unregister(pd->dev);
331 }
332
333
334 /********************************************************************
335   /sys/class/pktcdvd/
336                      add            map block device
337                      remove         unmap packet dev
338                      device_map     show mappings
339  *******************************************************************/
340
341 static void class_pktcdvd_release(struct class *cls)
342 {
343         kfree(cls);
344 }
345
346 static ssize_t device_map_show(struct class *c, struct class_attribute *attr,
347                                char *data)
348 {
349         int n = 0;
350         int idx;
351         mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
352         for (idx = 0; idx < MAX_WRITERS; idx++) {
353                 struct pktcdvd_device *pd = pkt_devs[idx];
354                 if (!pd)
355                         continue;
356                 n += sprintf(data+n, "%s %u:%u %u:%u\n",
357                         pd->name,
358                         MAJOR(pd->pkt_dev), MINOR(pd->pkt_dev),
359                         MAJOR(pd->bdev->bd_dev),
360                         MINOR(pd->bdev->bd_dev));
361         }
362         mutex_unlock(&ctl_mutex);
363         return n;
364 }
365 static CLASS_ATTR_RO(device_map);
366
367 static ssize_t add_store(struct class *c, struct class_attribute *attr,
368                          const char *buf, size_t count)
369 {
370         unsigned int major, minor;
371
372         if (sscanf(buf, "%u:%u", &major, &minor) == 2) {
373                 /* pkt_setup_dev() expects caller to hold reference to self */
374                 if (!try_module_get(THIS_MODULE))
375                         return -ENODEV;
376
377                 pkt_setup_dev(MKDEV(major, minor), NULL);
378
379                 module_put(THIS_MODULE);
380
381                 return count;
382         }
383
384         return -EINVAL;
385 }
386 static CLASS_ATTR_WO(add);
387
388 static ssize_t remove_store(struct class *c, struct class_attribute *attr,
389                             const char *buf, size_t count)
390 {
391         unsigned int major, minor;
392         if (sscanf(buf, "%u:%u", &major, &minor) == 2) {
393                 pkt_remove_dev(MKDEV(major, minor));
394                 return count;
395         }
396         return -EINVAL;
397 }
398 static CLASS_ATTR_WO(remove);
399
400 static struct attribute *class_pktcdvd_attrs[] = {
401         &class_attr_add.attr,
402         &class_attr_remove.attr,
403         &class_attr_device_map.attr,
404         NULL,
405 };
406 ATTRIBUTE_GROUPS(class_pktcdvd);
407
408 static int pkt_sysfs_init(void)
409 {
410         int ret = 0;
411
412         /*
413          * create control files in sysfs
414          * /sys/class/pktcdvd/...
415          */
416         class_pktcdvd = kzalloc(sizeof(*class_pktcdvd), GFP_KERNEL);
417         if (!class_pktcdvd)
418                 return -ENOMEM;
419         class_pktcdvd->name = DRIVER_NAME;
420         class_pktcdvd->owner = THIS_MODULE;
421         class_pktcdvd->class_release = class_pktcdvd_release;
422         class_pktcdvd->class_groups = class_pktcdvd_groups;
423         ret = class_register(class_pktcdvd);
424         if (ret) {
425                 kfree(class_pktcdvd);
426                 class_pktcdvd = NULL;
427                 pr_err("failed to create class pktcdvd\n");
428                 return ret;
429         }
430         return 0;
431 }
432
433 static void pkt_sysfs_cleanup(void)
434 {
435         if (class_pktcdvd)
436                 class_destroy(class_pktcdvd);
437         class_pktcdvd = NULL;
438 }
439
440 /********************************************************************
441   entries in debugfs
442
443   /sys/kernel/debug/pktcdvd[0-7]/
444                         info
445
446  *******************************************************************/
447
448 static int pkt_debugfs_seq_show(struct seq_file *m, void *p)
449 {
450         return pkt_seq_show(m, p);
451 }
452
453 static int pkt_debugfs_fops_open(struct inode *inode, struct file *file)
454 {
455         return single_open(file, pkt_debugfs_seq_show, inode->i_private);
456 }
457
458 static const struct file_operations debug_fops = {
459         .open           = pkt_debugfs_fops_open,
460         .read           = seq_read,
461         .llseek         = seq_lseek,
462         .release        = single_release,
463         .owner          = THIS_MODULE,
464 };
465
466 static void pkt_debugfs_dev_new(struct pktcdvd_device *pd)
467 {
468         if (!pkt_debugfs_root)
469                 return;
470         pd->dfs_d_root = debugfs_create_dir(pd->name, pkt_debugfs_root);
471         if (!pd->dfs_d_root)
472                 return;
473
474         pd->dfs_f_info = debugfs_create_file("info", 0444,
475                                              pd->dfs_d_root, pd, &debug_fops);
476 }
477
478 static void pkt_debugfs_dev_remove(struct pktcdvd_device *pd)
479 {
480         if (!pkt_debugfs_root)
481                 return;
482         debugfs_remove(pd->dfs_f_info);
483         debugfs_remove(pd->dfs_d_root);
484         pd->dfs_f_info = NULL;
485         pd->dfs_d_root = NULL;
486 }
487
488 static void pkt_debugfs_init(void)
489 {
490         pkt_debugfs_root = debugfs_create_dir(DRIVER_NAME, NULL);
491 }
492
493 static void pkt_debugfs_cleanup(void)
494 {
495         debugfs_remove(pkt_debugfs_root);
496         pkt_debugfs_root = NULL;
497 }
498
499 /* ----------------------------------------------------------*/
500
501
502 static void pkt_bio_finished(struct pktcdvd_device *pd)
503 {
504         BUG_ON(atomic_read(&pd->cdrw.pending_bios) <= 0);
505         if (atomic_dec_and_test(&pd->cdrw.pending_bios)) {
506                 pkt_dbg(2, pd, "queue empty\n");
507                 atomic_set(&pd->iosched.attention, 1);
508                 wake_up(&pd->wqueue);
509         }
510 }
511
512 /*
513  * Allocate a packet_data struct
514  */
515 static struct packet_data *pkt_alloc_packet_data(int frames)
516 {
517         int i;
518         struct packet_data *pkt;
519
520         pkt = kzalloc(sizeof(struct packet_data), GFP_KERNEL);
521         if (!pkt)
522                 goto no_pkt;
523
524         pkt->frames = frames;
525         pkt->w_bio = bio_kmalloc(GFP_KERNEL, frames);
526         if (!pkt->w_bio)
527                 goto no_bio;
528
529         for (i = 0; i < frames / FRAMES_PER_PAGE; i++) {
530                 pkt->pages[i] = alloc_page(GFP_KERNEL|__GFP_ZERO);
531                 if (!pkt->pages[i])
532                         goto no_page;
533         }
534
535         spin_lock_init(&pkt->lock);
536         bio_list_init(&pkt->orig_bios);
537
538         for (i = 0; i < frames; i++) {
539                 struct bio *bio = bio_kmalloc(GFP_KERNEL, 1);
540                 if (!bio)
541                         goto no_rd_bio;
542
543                 pkt->r_bios[i] = bio;
544         }
545
546         return pkt;
547
548 no_rd_bio:
549         for (i = 0; i < frames; i++) {
550                 struct bio *bio = pkt->r_bios[i];
551                 if (bio)
552                         bio_put(bio);
553         }
554
555 no_page:
556         for (i = 0; i < frames / FRAMES_PER_PAGE; i++)
557                 if (pkt->pages[i])
558                         __free_page(pkt->pages[i]);
559         bio_put(pkt->w_bio);
560 no_bio:
561         kfree(pkt);
562 no_pkt:
563         return NULL;
564 }
565
566 /*
567  * Free a packet_data struct
568  */
569 static void pkt_free_packet_data(struct packet_data *pkt)
570 {
571         int i;
572
573         for (i = 0; i < pkt->frames; i++) {
574                 struct bio *bio = pkt->r_bios[i];
575                 if (bio)
576                         bio_put(bio);
577         }
578         for (i = 0; i < pkt->frames / FRAMES_PER_PAGE; i++)
579                 __free_page(pkt->pages[i]);
580         bio_put(pkt->w_bio);
581         kfree(pkt);
582 }
583
584 static void pkt_shrink_pktlist(struct pktcdvd_device *pd)
585 {
586         struct packet_data *pkt, *next;
587
588         BUG_ON(!list_empty(&pd->cdrw.pkt_active_list));
589
590         list_for_each_entry_safe(pkt, next, &pd->cdrw.pkt_free_list, list) {
591                 pkt_free_packet_data(pkt);
592         }
593         INIT_LIST_HEAD(&pd->cdrw.pkt_free_list);
594 }
595
596 static int pkt_grow_pktlist(struct pktcdvd_device *pd, int nr_packets)
597 {
598         struct packet_data *pkt;
599
600         BUG_ON(!list_empty(&pd->cdrw.pkt_free_list));
601
602         while (nr_packets > 0) {
603                 pkt = pkt_alloc_packet_data(pd->settings.size >> 2);
604                 if (!pkt) {
605                         pkt_shrink_pktlist(pd);
606                         return 0;
607                 }
608                 pkt->id = nr_packets;
609                 pkt->pd = pd;
610                 list_add(&pkt->list, &pd->cdrw.pkt_free_list);
611                 nr_packets--;
612         }
613         return 1;
614 }
615
616 static inline struct pkt_rb_node *pkt_rbtree_next(struct pkt_rb_node *node)
617 {
618         struct rb_node *n = rb_next(&node->rb_node);
619         if (!n)
620                 return NULL;
621         return rb_entry(n, struct pkt_rb_node, rb_node);
622 }
623
624 static void pkt_rbtree_erase(struct pktcdvd_device *pd, struct pkt_rb_node *node)
625 {
626         rb_erase(&node->rb_node, &pd->bio_queue);
627         mempool_free(node, &pd->rb_pool);
628         pd->bio_queue_size--;
629         BUG_ON(pd->bio_queue_size < 0);
630 }
631
632 /*
633  * Find the first node in the pd->bio_queue rb tree with a starting sector >= s.
634  */
635 static struct pkt_rb_node *pkt_rbtree_find(struct pktcdvd_device *pd, sector_t s)
636 {
637         struct rb_node *n = pd->bio_queue.rb_node;
638         struct rb_node *next;
639         struct pkt_rb_node *tmp;
640
641         if (!n) {
642                 BUG_ON(pd->bio_queue_size > 0);
643                 return NULL;
644         }
645
646         for (;;) {
647                 tmp = rb_entry(n, struct pkt_rb_node, rb_node);
648                 if (s <= tmp->bio->bi_iter.bi_sector)
649                         next = n->rb_left;
650                 else
651                         next = n->rb_right;
652                 if (!next)
653                         break;
654                 n = next;
655         }
656
657         if (s > tmp->bio->bi_iter.bi_sector) {
658                 tmp = pkt_rbtree_next(tmp);
659                 if (!tmp)
660                         return NULL;
661         }
662         BUG_ON(s > tmp->bio->bi_iter.bi_sector);
663         return tmp;
664 }
665
666 /*
667  * Insert a node into the pd->bio_queue rb tree.
668  */
669 static void pkt_rbtree_insert(struct pktcdvd_device *pd, struct pkt_rb_node *node)
670 {
671         struct rb_node **p = &pd->bio_queue.rb_node;
672         struct rb_node *parent = NULL;
673         sector_t s = node->bio->bi_iter.bi_sector;
674         struct pkt_rb_node *tmp;
675
676         while (*p) {
677                 parent = *p;
678                 tmp = rb_entry(parent, struct pkt_rb_node, rb_node);
679                 if (s < tmp->bio->bi_iter.bi_sector)
680                         p = &(*p)->rb_left;
681                 else
682                         p = &(*p)->rb_right;
683         }
684         rb_link_node(&node->rb_node, parent, p);
685         rb_insert_color(&node->rb_node, &pd->bio_queue);
686         pd->bio_queue_size++;
687 }
688
689 /*
690  * Send a packet_command to the underlying block device and
691  * wait for completion.
692  */
693 static int pkt_generic_packet(struct pktcdvd_device *pd, struct packet_command *cgc)
694 {
695         struct request_queue *q = bdev_get_queue(pd->bdev);
696         struct scsi_cmnd *scmd;
697         struct request *rq;
698         int ret = 0;
699
700         rq = scsi_alloc_request(q, (cgc->data_direction == CGC_DATA_WRITE) ?
701                              REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
702         if (IS_ERR(rq))
703                 return PTR_ERR(rq);
704         scmd = blk_mq_rq_to_pdu(rq);
705
706         if (cgc->buflen) {
707                 ret = blk_rq_map_kern(q, rq, cgc->buffer, cgc->buflen,
708                                       GFP_NOIO);
709                 if (ret)
710                         goto out;
711         }
712
713         scmd->cmd_len = COMMAND_SIZE(cgc->cmd[0]);
714         memcpy(scmd->cmnd, cgc->cmd, CDROM_PACKET_SIZE);
715
716         rq->timeout = 60*HZ;
717         if (cgc->quiet)
718                 rq->rq_flags |= RQF_QUIET;
719
720         blk_execute_rq(rq, false);
721         if (scmd->result)
722                 ret = -EIO;
723 out:
724         blk_mq_free_request(rq);
725         return ret;
726 }
727
728 static const char *sense_key_string(__u8 index)
729 {
730         static const char * const info[] = {
731                 "No sense", "Recovered error", "Not ready",
732                 "Medium error", "Hardware error", "Illegal request",
733                 "Unit attention", "Data protect", "Blank check",
734         };
735
736         return index < ARRAY_SIZE(info) ? info[index] : "INVALID";
737 }
738
739 /*
740  * A generic sense dump / resolve mechanism should be implemented across
741  * all ATAPI + SCSI devices.
742  */
743 static void pkt_dump_sense(struct pktcdvd_device *pd,
744                            struct packet_command *cgc)
745 {
746         struct scsi_sense_hdr *sshdr = cgc->sshdr;
747
748         if (sshdr)
749                 pkt_err(pd, "%*ph - sense %02x.%02x.%02x (%s)\n",
750                         CDROM_PACKET_SIZE, cgc->cmd,
751                         sshdr->sense_key, sshdr->asc, sshdr->ascq,
752                         sense_key_string(sshdr->sense_key));
753         else
754                 pkt_err(pd, "%*ph - no sense\n", CDROM_PACKET_SIZE, cgc->cmd);
755 }
756
757 /*
758  * flush the drive cache to media
759  */
760 static int pkt_flush_cache(struct pktcdvd_device *pd)
761 {
762         struct packet_command cgc;
763
764         init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
765         cgc.cmd[0] = GPCMD_FLUSH_CACHE;
766         cgc.quiet = 1;
767
768         /*
769          * the IMMED bit -- we default to not setting it, although that
770          * would allow a much faster close, this is safer
771          */
772 #if 0
773         cgc.cmd[1] = 1 << 1;
774 #endif
775         return pkt_generic_packet(pd, &cgc);
776 }
777
778 /*
779  * speed is given as the normal factor, e.g. 4 for 4x
780  */
781 static noinline_for_stack int pkt_set_speed(struct pktcdvd_device *pd,
782                                 unsigned write_speed, unsigned read_speed)
783 {
784         struct packet_command cgc;
785         struct scsi_sense_hdr sshdr;
786         int ret;
787
788         init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
789         cgc.sshdr = &sshdr;
790         cgc.cmd[0] = GPCMD_SET_SPEED;
791         cgc.cmd[2] = (read_speed >> 8) & 0xff;
792         cgc.cmd[3] = read_speed & 0xff;
793         cgc.cmd[4] = (write_speed >> 8) & 0xff;
794         cgc.cmd[5] = write_speed & 0xff;
795
796         ret = pkt_generic_packet(pd, &cgc);
797         if (ret)
798                 pkt_dump_sense(pd, &cgc);
799
800         return ret;
801 }
802
803 /*
804  * Queue a bio for processing by the low-level CD device. Must be called
805  * from process context.
806  */
807 static void pkt_queue_bio(struct pktcdvd_device *pd, struct bio *bio)
808 {
809         spin_lock(&pd->iosched.lock);
810         if (bio_data_dir(bio) == READ)
811                 bio_list_add(&pd->iosched.read_queue, bio);
812         else
813                 bio_list_add(&pd->iosched.write_queue, bio);
814         spin_unlock(&pd->iosched.lock);
815
816         atomic_set(&pd->iosched.attention, 1);
817         wake_up(&pd->wqueue);
818 }
819
820 /*
821  * Process the queued read/write requests. This function handles special
822  * requirements for CDRW drives:
823  * - A cache flush command must be inserted before a read request if the
824  *   previous request was a write.
825  * - Switching between reading and writing is slow, so don't do it more often
826  *   than necessary.
827  * - Optimize for throughput at the expense of latency. This means that streaming
828  *   writes will never be interrupted by a read, but if the drive has to seek
829  *   before the next write, switch to reading instead if there are any pending
830  *   read requests.
831  * - Set the read speed according to current usage pattern. When only reading
832  *   from the device, it's best to use the highest possible read speed, but
833  *   when switching often between reading and writing, it's better to have the
834  *   same read and write speeds.
835  */
836 static void pkt_iosched_process_queue(struct pktcdvd_device *pd)
837 {
838
839         if (atomic_read(&pd->iosched.attention) == 0)
840                 return;
841         atomic_set(&pd->iosched.attention, 0);
842
843         for (;;) {
844                 struct bio *bio;
845                 int reads_queued, writes_queued;
846
847                 spin_lock(&pd->iosched.lock);
848                 reads_queued = !bio_list_empty(&pd->iosched.read_queue);
849                 writes_queued = !bio_list_empty(&pd->iosched.write_queue);
850                 spin_unlock(&pd->iosched.lock);
851
852                 if (!reads_queued && !writes_queued)
853                         break;
854
855                 if (pd->iosched.writing) {
856                         int need_write_seek = 1;
857                         spin_lock(&pd->iosched.lock);
858                         bio = bio_list_peek(&pd->iosched.write_queue);
859                         spin_unlock(&pd->iosched.lock);
860                         if (bio && (bio->bi_iter.bi_sector ==
861                                     pd->iosched.last_write))
862                                 need_write_seek = 0;
863                         if (need_write_seek && reads_queued) {
864                                 if (atomic_read(&pd->cdrw.pending_bios) > 0) {
865                                         pkt_dbg(2, pd, "write, waiting\n");
866                                         break;
867                                 }
868                                 pkt_flush_cache(pd);
869                                 pd->iosched.writing = 0;
870                         }
871                 } else {
872                         if (!reads_queued && writes_queued) {
873                                 if (atomic_read(&pd->cdrw.pending_bios) > 0) {
874                                         pkt_dbg(2, pd, "read, waiting\n");
875                                         break;
876                                 }
877                                 pd->iosched.writing = 1;
878                         }
879                 }
880
881                 spin_lock(&pd->iosched.lock);
882                 if (pd->iosched.writing)
883                         bio = bio_list_pop(&pd->iosched.write_queue);
884                 else
885                         bio = bio_list_pop(&pd->iosched.read_queue);
886                 spin_unlock(&pd->iosched.lock);
887
888                 if (!bio)
889                         continue;
890
891                 if (bio_data_dir(bio) == READ)
892                         pd->iosched.successive_reads +=
893                                 bio->bi_iter.bi_size >> 10;
894                 else {
895                         pd->iosched.successive_reads = 0;
896                         pd->iosched.last_write = bio_end_sector(bio);
897                 }
898                 if (pd->iosched.successive_reads >= HI_SPEED_SWITCH) {
899                         if (pd->read_speed == pd->write_speed) {
900                                 pd->read_speed = MAX_SPEED;
901                                 pkt_set_speed(pd, pd->write_speed, pd->read_speed);
902                         }
903                 } else {
904                         if (pd->read_speed != pd->write_speed) {
905                                 pd->read_speed = pd->write_speed;
906                                 pkt_set_speed(pd, pd->write_speed, pd->read_speed);
907                         }
908                 }
909
910                 atomic_inc(&pd->cdrw.pending_bios);
911                 submit_bio_noacct(bio);
912         }
913 }
914
915 /*
916  * Special care is needed if the underlying block device has a small
917  * max_phys_segments value.
918  */
919 static int pkt_set_segment_merging(struct pktcdvd_device *pd, struct request_queue *q)
920 {
921         if ((pd->settings.size << 9) / CD_FRAMESIZE
922             <= queue_max_segments(q)) {
923                 /*
924                  * The cdrom device can handle one segment/frame
925                  */
926                 clear_bit(PACKET_MERGE_SEGS, &pd->flags);
927                 return 0;
928         } else if ((pd->settings.size << 9) / PAGE_SIZE
929                    <= queue_max_segments(q)) {
930                 /*
931                  * We can handle this case at the expense of some extra memory
932                  * copies during write operations
933                  */
934                 set_bit(PACKET_MERGE_SEGS, &pd->flags);
935                 return 0;
936         } else {
937                 pkt_err(pd, "cdrom max_phys_segments too small\n");
938                 return -EIO;
939         }
940 }
941
942 static void pkt_end_io_read(struct bio *bio)
943 {
944         struct packet_data *pkt = bio->bi_private;
945         struct pktcdvd_device *pd = pkt->pd;
946         BUG_ON(!pd);
947
948         pkt_dbg(2, pd, "bio=%p sec0=%llx sec=%llx err=%d\n",
949                 bio, (unsigned long long)pkt->sector,
950                 (unsigned long long)bio->bi_iter.bi_sector, bio->bi_status);
951
952         if (bio->bi_status)
953                 atomic_inc(&pkt->io_errors);
954         if (atomic_dec_and_test(&pkt->io_wait)) {
955                 atomic_inc(&pkt->run_sm);
956                 wake_up(&pd->wqueue);
957         }
958         pkt_bio_finished(pd);
959 }
960
961 static void pkt_end_io_packet_write(struct bio *bio)
962 {
963         struct packet_data *pkt = bio->bi_private;
964         struct pktcdvd_device *pd = pkt->pd;
965         BUG_ON(!pd);
966
967         pkt_dbg(2, pd, "id=%d, err=%d\n", pkt->id, bio->bi_status);
968
969         pd->stats.pkt_ended++;
970
971         pkt_bio_finished(pd);
972         atomic_dec(&pkt->io_wait);
973         atomic_inc(&pkt->run_sm);
974         wake_up(&pd->wqueue);
975 }
976
977 /*
978  * Schedule reads for the holes in a packet
979  */
980 static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt)
981 {
982         int frames_read = 0;
983         struct bio *bio;
984         int f;
985         char written[PACKET_MAX_SIZE];
986
987         BUG_ON(bio_list_empty(&pkt->orig_bios));
988
989         atomic_set(&pkt->io_wait, 0);
990         atomic_set(&pkt->io_errors, 0);
991
992         /*
993          * Figure out which frames we need to read before we can write.
994          */
995         memset(written, 0, sizeof(written));
996         spin_lock(&pkt->lock);
997         bio_list_for_each(bio, &pkt->orig_bios) {
998                 int first_frame = (bio->bi_iter.bi_sector - pkt->sector) /
999                         (CD_FRAMESIZE >> 9);
1000                 int num_frames = bio->bi_iter.bi_size / CD_FRAMESIZE;
1001                 pd->stats.secs_w += num_frames * (CD_FRAMESIZE >> 9);
1002                 BUG_ON(first_frame < 0);
1003                 BUG_ON(first_frame + num_frames > pkt->frames);
1004                 for (f = first_frame; f < first_frame + num_frames; f++)
1005                         written[f] = 1;
1006         }
1007         spin_unlock(&pkt->lock);
1008
1009         if (pkt->cache_valid) {
1010                 pkt_dbg(2, pd, "zone %llx cached\n",
1011                         (unsigned long long)pkt->sector);
1012                 goto out_account;
1013         }
1014
1015         /*
1016          * Schedule reads for missing parts of the packet.
1017          */
1018         for (f = 0; f < pkt->frames; f++) {
1019                 int p, offset;
1020
1021                 if (written[f])
1022                         continue;
1023
1024                 bio = pkt->r_bios[f];
1025                 bio_reset(bio, pd->bdev, REQ_OP_READ);
1026                 bio->bi_iter.bi_sector = pkt->sector + f * (CD_FRAMESIZE >> 9);
1027                 bio->bi_end_io = pkt_end_io_read;
1028                 bio->bi_private = pkt;
1029
1030                 p = (f * CD_FRAMESIZE) / PAGE_SIZE;
1031                 offset = (f * CD_FRAMESIZE) % PAGE_SIZE;
1032                 pkt_dbg(2, pd, "Adding frame %d, page:%p offs:%d\n",
1033                         f, pkt->pages[p], offset);
1034                 if (!bio_add_page(bio, pkt->pages[p], CD_FRAMESIZE, offset))
1035                         BUG();
1036
1037                 atomic_inc(&pkt->io_wait);
1038                 pkt_queue_bio(pd, bio);
1039                 frames_read++;
1040         }
1041
1042 out_account:
1043         pkt_dbg(2, pd, "need %d frames for zone %llx\n",
1044                 frames_read, (unsigned long long)pkt->sector);
1045         pd->stats.pkt_started++;
1046         pd->stats.secs_rg += frames_read * (CD_FRAMESIZE >> 9);
1047 }
1048
1049 /*
1050  * Find a packet matching zone, or the least recently used packet if
1051  * there is no match.
1052  */
1053 static struct packet_data *pkt_get_packet_data(struct pktcdvd_device *pd, int zone)
1054 {
1055         struct packet_data *pkt;
1056
1057         list_for_each_entry(pkt, &pd->cdrw.pkt_free_list, list) {
1058                 if (pkt->sector == zone || pkt->list.next == &pd->cdrw.pkt_free_list) {
1059                         list_del_init(&pkt->list);
1060                         if (pkt->sector != zone)
1061                                 pkt->cache_valid = 0;
1062                         return pkt;
1063                 }
1064         }
1065         BUG();
1066         return NULL;
1067 }
1068
1069 static void pkt_put_packet_data(struct pktcdvd_device *pd, struct packet_data *pkt)
1070 {
1071         if (pkt->cache_valid) {
1072                 list_add(&pkt->list, &pd->cdrw.pkt_free_list);
1073         } else {
1074                 list_add_tail(&pkt->list, &pd->cdrw.pkt_free_list);
1075         }
1076 }
1077
1078 static inline void pkt_set_state(struct packet_data *pkt, enum packet_data_state state)
1079 {
1080 #if PACKET_DEBUG > 1
1081         static const char *state_name[] = {
1082                 "IDLE", "WAITING", "READ_WAIT", "WRITE_WAIT", "RECOVERY", "FINISHED"
1083         };
1084         enum packet_data_state old_state = pkt->state;
1085         pkt_dbg(2, pd, "pkt %2d : s=%6llx %s -> %s\n",
1086                 pkt->id, (unsigned long long)pkt->sector,
1087                 state_name[old_state], state_name[state]);
1088 #endif
1089         pkt->state = state;
1090 }
1091
1092 /*
1093  * Scan the work queue to see if we can start a new packet.
1094  * returns non-zero if any work was done.
1095  */
1096 static int pkt_handle_queue(struct pktcdvd_device *pd)
1097 {
1098         struct packet_data *pkt, *p;
1099         struct bio *bio = NULL;
1100         sector_t zone = 0; /* Suppress gcc warning */
1101         struct pkt_rb_node *node, *first_node;
1102         struct rb_node *n;
1103
1104         atomic_set(&pd->scan_queue, 0);
1105
1106         if (list_empty(&pd->cdrw.pkt_free_list)) {
1107                 pkt_dbg(2, pd, "no pkt\n");
1108                 return 0;
1109         }
1110
1111         /*
1112          * Try to find a zone we are not already working on.
1113          */
1114         spin_lock(&pd->lock);
1115         first_node = pkt_rbtree_find(pd, pd->current_sector);
1116         if (!first_node) {
1117                 n = rb_first(&pd->bio_queue);
1118                 if (n)
1119                         first_node = rb_entry(n, struct pkt_rb_node, rb_node);
1120         }
1121         node = first_node;
1122         while (node) {
1123                 bio = node->bio;
1124                 zone = get_zone(bio->bi_iter.bi_sector, pd);
1125                 list_for_each_entry(p, &pd->cdrw.pkt_active_list, list) {
1126                         if (p->sector == zone) {
1127                                 bio = NULL;
1128                                 goto try_next_bio;
1129                         }
1130                 }
1131                 break;
1132 try_next_bio:
1133                 node = pkt_rbtree_next(node);
1134                 if (!node) {
1135                         n = rb_first(&pd->bio_queue);
1136                         if (n)
1137                                 node = rb_entry(n, struct pkt_rb_node, rb_node);
1138                 }
1139                 if (node == first_node)
1140                         node = NULL;
1141         }
1142         spin_unlock(&pd->lock);
1143         if (!bio) {
1144                 pkt_dbg(2, pd, "no bio\n");
1145                 return 0;
1146         }
1147
1148         pkt = pkt_get_packet_data(pd, zone);
1149
1150         pd->current_sector = zone + pd->settings.size;
1151         pkt->sector = zone;
1152         BUG_ON(pkt->frames != pd->settings.size >> 2);
1153         pkt->write_size = 0;
1154
1155         /*
1156          * Scan work queue for bios in the same zone and link them
1157          * to this packet.
1158          */
1159         spin_lock(&pd->lock);
1160         pkt_dbg(2, pd, "looking for zone %llx\n", (unsigned long long)zone);
1161         while ((node = pkt_rbtree_find(pd, zone)) != NULL) {
1162                 bio = node->bio;
1163                 pkt_dbg(2, pd, "found zone=%llx\n", (unsigned long long)
1164                         get_zone(bio->bi_iter.bi_sector, pd));
1165                 if (get_zone(bio->bi_iter.bi_sector, pd) != zone)
1166                         break;
1167                 pkt_rbtree_erase(pd, node);
1168                 spin_lock(&pkt->lock);
1169                 bio_list_add(&pkt->orig_bios, bio);
1170                 pkt->write_size += bio->bi_iter.bi_size / CD_FRAMESIZE;
1171                 spin_unlock(&pkt->lock);
1172         }
1173         /* check write congestion marks, and if bio_queue_size is
1174          * below, wake up any waiters
1175          */
1176         if (pd->congested &&
1177             pd->bio_queue_size <= pd->write_congestion_off) {
1178                 pd->congested = false;
1179                 wake_up_var(&pd->congested);
1180         }
1181         spin_unlock(&pd->lock);
1182
1183         pkt->sleep_time = max(PACKET_WAIT_TIME, 1);
1184         pkt_set_state(pkt, PACKET_WAITING_STATE);
1185         atomic_set(&pkt->run_sm, 1);
1186
1187         spin_lock(&pd->cdrw.active_list_lock);
1188         list_add(&pkt->list, &pd->cdrw.pkt_active_list);
1189         spin_unlock(&pd->cdrw.active_list_lock);
1190
1191         return 1;
1192 }
1193
1194 /**
1195  * bio_list_copy_data - copy contents of data buffers from one chain of bios to
1196  * another
1197  * @src: source bio list
1198  * @dst: destination bio list
1199  *
1200  * Stops when it reaches the end of either the @src list or @dst list - that is,
1201  * copies min(src->bi_size, dst->bi_size) bytes (or the equivalent for lists of
1202  * bios).
1203  */
1204 static void bio_list_copy_data(struct bio *dst, struct bio *src)
1205 {
1206         struct bvec_iter src_iter = src->bi_iter;
1207         struct bvec_iter dst_iter = dst->bi_iter;
1208
1209         while (1) {
1210                 if (!src_iter.bi_size) {
1211                         src = src->bi_next;
1212                         if (!src)
1213                                 break;
1214
1215                         src_iter = src->bi_iter;
1216                 }
1217
1218                 if (!dst_iter.bi_size) {
1219                         dst = dst->bi_next;
1220                         if (!dst)
1221                                 break;
1222
1223                         dst_iter = dst->bi_iter;
1224                 }
1225
1226                 bio_copy_data_iter(dst, &dst_iter, src, &src_iter);
1227         }
1228 }
1229
1230 /*
1231  * Assemble a bio to write one packet and queue the bio for processing
1232  * by the underlying block device.
1233  */
1234 static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt)
1235 {
1236         int f;
1237
1238         bio_reset(pkt->w_bio, pd->bdev, REQ_OP_WRITE);
1239         pkt->w_bio->bi_iter.bi_sector = pkt->sector;
1240         pkt->w_bio->bi_end_io = pkt_end_io_packet_write;
1241         pkt->w_bio->bi_private = pkt;
1242
1243         /* XXX: locking? */
1244         for (f = 0; f < pkt->frames; f++) {
1245                 struct page *page = pkt->pages[(f * CD_FRAMESIZE) / PAGE_SIZE];
1246                 unsigned offset = (f * CD_FRAMESIZE) % PAGE_SIZE;
1247
1248                 if (!bio_add_page(pkt->w_bio, page, CD_FRAMESIZE, offset))
1249                         BUG();
1250         }
1251         pkt_dbg(2, pd, "vcnt=%d\n", pkt->w_bio->bi_vcnt);
1252
1253         /*
1254          * Fill-in bvec with data from orig_bios.
1255          */
1256         spin_lock(&pkt->lock);
1257         bio_list_copy_data(pkt->w_bio, pkt->orig_bios.head);
1258
1259         pkt_set_state(pkt, PACKET_WRITE_WAIT_STATE);
1260         spin_unlock(&pkt->lock);
1261
1262         pkt_dbg(2, pd, "Writing %d frames for zone %llx\n",
1263                 pkt->write_size, (unsigned long long)pkt->sector);
1264
1265         if (test_bit(PACKET_MERGE_SEGS, &pd->flags) || (pkt->write_size < pkt->frames))
1266                 pkt->cache_valid = 1;
1267         else
1268                 pkt->cache_valid = 0;
1269
1270         /* Start the write request */
1271         atomic_set(&pkt->io_wait, 1);
1272         pkt_queue_bio(pd, pkt->w_bio);
1273 }
1274
1275 static void pkt_finish_packet(struct packet_data *pkt, blk_status_t status)
1276 {
1277         struct bio *bio;
1278
1279         if (status)
1280                 pkt->cache_valid = 0;
1281
1282         /* Finish all bios corresponding to this packet */
1283         while ((bio = bio_list_pop(&pkt->orig_bios))) {
1284                 bio->bi_status = status;
1285                 bio_endio(bio);
1286         }
1287 }
1288
1289 static void pkt_run_state_machine(struct pktcdvd_device *pd, struct packet_data *pkt)
1290 {
1291         pkt_dbg(2, pd, "pkt %d\n", pkt->id);
1292
1293         for (;;) {
1294                 switch (pkt->state) {
1295                 case PACKET_WAITING_STATE:
1296                         if ((pkt->write_size < pkt->frames) && (pkt->sleep_time > 0))
1297                                 return;
1298
1299                         pkt->sleep_time = 0;
1300                         pkt_gather_data(pd, pkt);
1301                         pkt_set_state(pkt, PACKET_READ_WAIT_STATE);
1302                         break;
1303
1304                 case PACKET_READ_WAIT_STATE:
1305                         if (atomic_read(&pkt->io_wait) > 0)
1306                                 return;
1307
1308                         if (atomic_read(&pkt->io_errors) > 0) {
1309                                 pkt_set_state(pkt, PACKET_RECOVERY_STATE);
1310                         } else {
1311                                 pkt_start_write(pd, pkt);
1312                         }
1313                         break;
1314
1315                 case PACKET_WRITE_WAIT_STATE:
1316                         if (atomic_read(&pkt->io_wait) > 0)
1317                                 return;
1318
1319                         if (!pkt->w_bio->bi_status) {
1320                                 pkt_set_state(pkt, PACKET_FINISHED_STATE);
1321                         } else {
1322                                 pkt_set_state(pkt, PACKET_RECOVERY_STATE);
1323                         }
1324                         break;
1325
1326                 case PACKET_RECOVERY_STATE:
1327                         pkt_dbg(2, pd, "No recovery possible\n");
1328                         pkt_set_state(pkt, PACKET_FINISHED_STATE);
1329                         break;
1330
1331                 case PACKET_FINISHED_STATE:
1332                         pkt_finish_packet(pkt, pkt->w_bio->bi_status);
1333                         return;
1334
1335                 default:
1336                         BUG();
1337                         break;
1338                 }
1339         }
1340 }
1341
1342 static void pkt_handle_packets(struct pktcdvd_device *pd)
1343 {
1344         struct packet_data *pkt, *next;
1345
1346         /*
1347          * Run state machine for active packets
1348          */
1349         list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
1350                 if (atomic_read(&pkt->run_sm) > 0) {
1351                         atomic_set(&pkt->run_sm, 0);
1352                         pkt_run_state_machine(pd, pkt);
1353                 }
1354         }
1355
1356         /*
1357          * Move no longer active packets to the free list
1358          */
1359         spin_lock(&pd->cdrw.active_list_lock);
1360         list_for_each_entry_safe(pkt, next, &pd->cdrw.pkt_active_list, list) {
1361                 if (pkt->state == PACKET_FINISHED_STATE) {
1362                         list_del(&pkt->list);
1363                         pkt_put_packet_data(pd, pkt);
1364                         pkt_set_state(pkt, PACKET_IDLE_STATE);
1365                         atomic_set(&pd->scan_queue, 1);
1366                 }
1367         }
1368         spin_unlock(&pd->cdrw.active_list_lock);
1369 }
1370
1371 static void pkt_count_states(struct pktcdvd_device *pd, int *states)
1372 {
1373         struct packet_data *pkt;
1374         int i;
1375
1376         for (i = 0; i < PACKET_NUM_STATES; i++)
1377                 states[i] = 0;
1378
1379         spin_lock(&pd->cdrw.active_list_lock);
1380         list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
1381                 states[pkt->state]++;
1382         }
1383         spin_unlock(&pd->cdrw.active_list_lock);
1384 }
1385
1386 /*
1387  * kcdrwd is woken up when writes have been queued for one of our
1388  * registered devices
1389  */
1390 static int kcdrwd(void *foobar)
1391 {
1392         struct pktcdvd_device *pd = foobar;
1393         struct packet_data *pkt;
1394         long min_sleep_time, residue;
1395
1396         set_user_nice(current, MIN_NICE);
1397         set_freezable();
1398
1399         for (;;) {
1400                 DECLARE_WAITQUEUE(wait, current);
1401
1402                 /*
1403                  * Wait until there is something to do
1404                  */
1405                 add_wait_queue(&pd->wqueue, &wait);
1406                 for (;;) {
1407                         set_current_state(TASK_INTERRUPTIBLE);
1408
1409                         /* Check if we need to run pkt_handle_queue */
1410                         if (atomic_read(&pd->scan_queue) > 0)
1411                                 goto work_to_do;
1412
1413                         /* Check if we need to run the state machine for some packet */
1414                         list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
1415                                 if (atomic_read(&pkt->run_sm) > 0)
1416                                         goto work_to_do;
1417                         }
1418
1419                         /* Check if we need to process the iosched queues */
1420                         if (atomic_read(&pd->iosched.attention) != 0)
1421                                 goto work_to_do;
1422
1423                         /* Otherwise, go to sleep */
1424                         if (PACKET_DEBUG > 1) {
1425                                 int states[PACKET_NUM_STATES];
1426                                 pkt_count_states(pd, states);
1427                                 pkt_dbg(2, pd, "i:%d ow:%d rw:%d ww:%d rec:%d fin:%d\n",
1428                                         states[0], states[1], states[2],
1429                                         states[3], states[4], states[5]);
1430                         }
1431
1432                         min_sleep_time = MAX_SCHEDULE_TIMEOUT;
1433                         list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
1434                                 if (pkt->sleep_time && pkt->sleep_time < min_sleep_time)
1435                                         min_sleep_time = pkt->sleep_time;
1436                         }
1437
1438                         pkt_dbg(2, pd, "sleeping\n");
1439                         residue = schedule_timeout(min_sleep_time);
1440                         pkt_dbg(2, pd, "wake up\n");
1441
1442                         /* make swsusp happy with our thread */
1443                         try_to_freeze();
1444
1445                         list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
1446                                 if (!pkt->sleep_time)
1447                                         continue;
1448                                 pkt->sleep_time -= min_sleep_time - residue;
1449                                 if (pkt->sleep_time <= 0) {
1450                                         pkt->sleep_time = 0;
1451                                         atomic_inc(&pkt->run_sm);
1452                                 }
1453                         }
1454
1455                         if (kthread_should_stop())
1456                                 break;
1457                 }
1458 work_to_do:
1459                 set_current_state(TASK_RUNNING);
1460                 remove_wait_queue(&pd->wqueue, &wait);
1461
1462                 if (kthread_should_stop())
1463                         break;
1464
1465                 /*
1466                  * if pkt_handle_queue returns true, we can queue
1467                  * another request.
1468                  */
1469                 while (pkt_handle_queue(pd))
1470                         ;
1471
1472                 /*
1473                  * Handle packet state machine
1474                  */
1475                 pkt_handle_packets(pd);
1476
1477                 /*
1478                  * Handle iosched queues
1479                  */
1480                 pkt_iosched_process_queue(pd);
1481         }
1482
1483         return 0;
1484 }
1485
1486 static void pkt_print_settings(struct pktcdvd_device *pd)
1487 {
1488         pkt_info(pd, "%s packets, %u blocks, Mode-%c disc\n",
1489                  pd->settings.fp ? "Fixed" : "Variable",
1490                  pd->settings.size >> 2,
1491                  pd->settings.block_mode == 8 ? '1' : '2');
1492 }
1493
1494 static int pkt_mode_sense(struct pktcdvd_device *pd, struct packet_command *cgc, int page_code, int page_control)
1495 {
1496         memset(cgc->cmd, 0, sizeof(cgc->cmd));
1497
1498         cgc->cmd[0] = GPCMD_MODE_SENSE_10;
1499         cgc->cmd[2] = page_code | (page_control << 6);
1500         cgc->cmd[7] = cgc->buflen >> 8;
1501         cgc->cmd[8] = cgc->buflen & 0xff;
1502         cgc->data_direction = CGC_DATA_READ;
1503         return pkt_generic_packet(pd, cgc);
1504 }
1505
1506 static int pkt_mode_select(struct pktcdvd_device *pd, struct packet_command *cgc)
1507 {
1508         memset(cgc->cmd, 0, sizeof(cgc->cmd));
1509         memset(cgc->buffer, 0, 2);
1510         cgc->cmd[0] = GPCMD_MODE_SELECT_10;
1511         cgc->cmd[1] = 0x10;             /* PF */
1512         cgc->cmd[7] = cgc->buflen >> 8;
1513         cgc->cmd[8] = cgc->buflen & 0xff;
1514         cgc->data_direction = CGC_DATA_WRITE;
1515         return pkt_generic_packet(pd, cgc);
1516 }
1517
1518 static int pkt_get_disc_info(struct pktcdvd_device *pd, disc_information *di)
1519 {
1520         struct packet_command cgc;
1521         int ret;
1522
1523         /* set up command and get the disc info */
1524         init_cdrom_command(&cgc, di, sizeof(*di), CGC_DATA_READ);
1525         cgc.cmd[0] = GPCMD_READ_DISC_INFO;
1526         cgc.cmd[8] = cgc.buflen = 2;
1527         cgc.quiet = 1;
1528
1529         ret = pkt_generic_packet(pd, &cgc);
1530         if (ret)
1531                 return ret;
1532
1533         /* not all drives have the same disc_info length, so requeue
1534          * packet with the length the drive tells us it can supply
1535          */
1536         cgc.buflen = be16_to_cpu(di->disc_information_length) +
1537                      sizeof(di->disc_information_length);
1538
1539         if (cgc.buflen > sizeof(disc_information))
1540                 cgc.buflen = sizeof(disc_information);
1541
1542         cgc.cmd[8] = cgc.buflen;
1543         return pkt_generic_packet(pd, &cgc);
1544 }
1545
1546 static int pkt_get_track_info(struct pktcdvd_device *pd, __u16 track, __u8 type, track_information *ti)
1547 {
1548         struct packet_command cgc;
1549         int ret;
1550
1551         init_cdrom_command(&cgc, ti, 8, CGC_DATA_READ);
1552         cgc.cmd[0] = GPCMD_READ_TRACK_RZONE_INFO;
1553         cgc.cmd[1] = type & 3;
1554         cgc.cmd[4] = (track & 0xff00) >> 8;
1555         cgc.cmd[5] = track & 0xff;
1556         cgc.cmd[8] = 8;
1557         cgc.quiet = 1;
1558
1559         ret = pkt_generic_packet(pd, &cgc);
1560         if (ret)
1561                 return ret;
1562
1563         cgc.buflen = be16_to_cpu(ti->track_information_length) +
1564                      sizeof(ti->track_information_length);
1565
1566         if (cgc.buflen > sizeof(track_information))
1567                 cgc.buflen = sizeof(track_information);
1568
1569         cgc.cmd[8] = cgc.buflen;
1570         return pkt_generic_packet(pd, &cgc);
1571 }
1572
1573 static noinline_for_stack int pkt_get_last_written(struct pktcdvd_device *pd,
1574                                                 long *last_written)
1575 {
1576         disc_information di;
1577         track_information ti;
1578         __u32 last_track;
1579         int ret;
1580
1581         ret = pkt_get_disc_info(pd, &di);
1582         if (ret)
1583                 return ret;
1584
1585         last_track = (di.last_track_msb << 8) | di.last_track_lsb;
1586         ret = pkt_get_track_info(pd, last_track, 1, &ti);
1587         if (ret)
1588                 return ret;
1589
1590         /* if this track is blank, try the previous. */
1591         if (ti.blank) {
1592                 last_track--;
1593                 ret = pkt_get_track_info(pd, last_track, 1, &ti);
1594                 if (ret)
1595                         return ret;
1596         }
1597
1598         /* if last recorded field is valid, return it. */
1599         if (ti.lra_v) {
1600                 *last_written = be32_to_cpu(ti.last_rec_address);
1601         } else {
1602                 /* make it up instead */
1603                 *last_written = be32_to_cpu(ti.track_start) +
1604                                 be32_to_cpu(ti.track_size);
1605                 if (ti.free_blocks)
1606                         *last_written -= (be32_to_cpu(ti.free_blocks) + 7);
1607         }
1608         return 0;
1609 }
1610
1611 /*
1612  * write mode select package based on pd->settings
1613  */
1614 static noinline_for_stack int pkt_set_write_settings(struct pktcdvd_device *pd)
1615 {
1616         struct packet_command cgc;
1617         struct scsi_sense_hdr sshdr;
1618         write_param_page *wp;
1619         char buffer[128];
1620         int ret, size;
1621
1622         /* doesn't apply to DVD+RW or DVD-RAM */
1623         if ((pd->mmc3_profile == 0x1a) || (pd->mmc3_profile == 0x12))
1624                 return 0;
1625
1626         memset(buffer, 0, sizeof(buffer));
1627         init_cdrom_command(&cgc, buffer, sizeof(*wp), CGC_DATA_READ);
1628         cgc.sshdr = &sshdr;
1629         ret = pkt_mode_sense(pd, &cgc, GPMODE_WRITE_PARMS_PAGE, 0);
1630         if (ret) {
1631                 pkt_dump_sense(pd, &cgc);
1632                 return ret;
1633         }
1634
1635         size = 2 + ((buffer[0] << 8) | (buffer[1] & 0xff));
1636         pd->mode_offset = (buffer[6] << 8) | (buffer[7] & 0xff);
1637         if (size > sizeof(buffer))
1638                 size = sizeof(buffer);
1639
1640         /*
1641          * now get it all
1642          */
1643         init_cdrom_command(&cgc, buffer, size, CGC_DATA_READ);
1644         cgc.sshdr = &sshdr;
1645         ret = pkt_mode_sense(pd, &cgc, GPMODE_WRITE_PARMS_PAGE, 0);
1646         if (ret) {
1647                 pkt_dump_sense(pd, &cgc);
1648                 return ret;
1649         }
1650
1651         /*
1652          * write page is offset header + block descriptor length
1653          */
1654         wp = (write_param_page *) &buffer[sizeof(struct mode_page_header) + pd->mode_offset];
1655
1656         wp->fp = pd->settings.fp;
1657         wp->track_mode = pd->settings.track_mode;
1658         wp->write_type = pd->settings.write_type;
1659         wp->data_block_type = pd->settings.block_mode;
1660
1661         wp->multi_session = 0;
1662
1663 #ifdef PACKET_USE_LS
1664         wp->link_size = 7;
1665         wp->ls_v = 1;
1666 #endif
1667
1668         if (wp->data_block_type == PACKET_BLOCK_MODE1) {
1669                 wp->session_format = 0;
1670                 wp->subhdr2 = 0x20;
1671         } else if (wp->data_block_type == PACKET_BLOCK_MODE2) {
1672                 wp->session_format = 0x20;
1673                 wp->subhdr2 = 8;
1674 #if 0
1675                 wp->mcn[0] = 0x80;
1676                 memcpy(&wp->mcn[1], PACKET_MCN, sizeof(wp->mcn) - 1);
1677 #endif
1678         } else {
1679                 /*
1680                  * paranoia
1681                  */
1682                 pkt_err(pd, "write mode wrong %d\n", wp->data_block_type);
1683                 return 1;
1684         }
1685         wp->packet_size = cpu_to_be32(pd->settings.size >> 2);
1686
1687         cgc.buflen = cgc.cmd[8] = size;
1688         ret = pkt_mode_select(pd, &cgc);
1689         if (ret) {
1690                 pkt_dump_sense(pd, &cgc);
1691                 return ret;
1692         }
1693
1694         pkt_print_settings(pd);
1695         return 0;
1696 }
1697
1698 /*
1699  * 1 -- we can write to this track, 0 -- we can't
1700  */
1701 static int pkt_writable_track(struct pktcdvd_device *pd, track_information *ti)
1702 {
1703         switch (pd->mmc3_profile) {
1704                 case 0x1a: /* DVD+RW */
1705                 case 0x12: /* DVD-RAM */
1706                         /* The track is always writable on DVD+RW/DVD-RAM */
1707                         return 1;
1708                 default:
1709                         break;
1710         }
1711
1712         if (!ti->packet || !ti->fp)
1713                 return 0;
1714
1715         /*
1716          * "good" settings as per Mt Fuji.
1717          */
1718         if (ti->rt == 0 && ti->blank == 0)
1719                 return 1;
1720
1721         if (ti->rt == 0 && ti->blank == 1)
1722                 return 1;
1723
1724         if (ti->rt == 1 && ti->blank == 0)
1725                 return 1;
1726
1727         pkt_err(pd, "bad state %d-%d-%d\n", ti->rt, ti->blank, ti->packet);
1728         return 0;
1729 }
1730
1731 /*
1732  * 1 -- we can write to this disc, 0 -- we can't
1733  */
1734 static int pkt_writable_disc(struct pktcdvd_device *pd, disc_information *di)
1735 {
1736         switch (pd->mmc3_profile) {
1737                 case 0x0a: /* CD-RW */
1738                 case 0xffff: /* MMC3 not supported */
1739                         break;
1740                 case 0x1a: /* DVD+RW */
1741                 case 0x13: /* DVD-RW */
1742                 case 0x12: /* DVD-RAM */
1743                         return 1;
1744                 default:
1745                         pkt_dbg(2, pd, "Wrong disc profile (%x)\n",
1746                                 pd->mmc3_profile);
1747                         return 0;
1748         }
1749
1750         /*
1751          * for disc type 0xff we should probably reserve a new track.
1752          * but i'm not sure, should we leave this to user apps? probably.
1753          */
1754         if (di->disc_type == 0xff) {
1755                 pkt_notice(pd, "unknown disc - no track?\n");
1756                 return 0;
1757         }
1758
1759         if (di->disc_type != 0x20 && di->disc_type != 0) {
1760                 pkt_err(pd, "wrong disc type (%x)\n", di->disc_type);
1761                 return 0;
1762         }
1763
1764         if (di->erasable == 0) {
1765                 pkt_notice(pd, "disc not erasable\n");
1766                 return 0;
1767         }
1768
1769         if (di->border_status == PACKET_SESSION_RESERVED) {
1770                 pkt_err(pd, "can't write to last track (reserved)\n");
1771                 return 0;
1772         }
1773
1774         return 1;
1775 }
1776
1777 static noinline_for_stack int pkt_probe_settings(struct pktcdvd_device *pd)
1778 {
1779         struct packet_command cgc;
1780         unsigned char buf[12];
1781         disc_information di;
1782         track_information ti;
1783         int ret, track;
1784
1785         init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_READ);
1786         cgc.cmd[0] = GPCMD_GET_CONFIGURATION;
1787         cgc.cmd[8] = 8;
1788         ret = pkt_generic_packet(pd, &cgc);
1789         pd->mmc3_profile = ret ? 0xffff : buf[6] << 8 | buf[7];
1790
1791         memset(&di, 0, sizeof(disc_information));
1792         memset(&ti, 0, sizeof(track_information));
1793
1794         ret = pkt_get_disc_info(pd, &di);
1795         if (ret) {
1796                 pkt_err(pd, "failed get_disc\n");
1797                 return ret;
1798         }
1799
1800         if (!pkt_writable_disc(pd, &di))
1801                 return -EROFS;
1802
1803         pd->type = di.erasable ? PACKET_CDRW : PACKET_CDR;
1804
1805         track = 1; /* (di.last_track_msb << 8) | di.last_track_lsb; */
1806         ret = pkt_get_track_info(pd, track, 1, &ti);
1807         if (ret) {
1808                 pkt_err(pd, "failed get_track\n");
1809                 return ret;
1810         }
1811
1812         if (!pkt_writable_track(pd, &ti)) {
1813                 pkt_err(pd, "can't write to this track\n");
1814                 return -EROFS;
1815         }
1816
1817         /*
1818          * we keep packet size in 512 byte units, makes it easier to
1819          * deal with request calculations.
1820          */
1821         pd->settings.size = be32_to_cpu(ti.fixed_packet_size) << 2;
1822         if (pd->settings.size == 0) {
1823                 pkt_notice(pd, "detected zero packet size!\n");
1824                 return -ENXIO;
1825         }
1826         if (pd->settings.size > PACKET_MAX_SECTORS) {
1827                 pkt_err(pd, "packet size is too big\n");
1828                 return -EROFS;
1829         }
1830         pd->settings.fp = ti.fp;
1831         pd->offset = (be32_to_cpu(ti.track_start) << 2) & (pd->settings.size - 1);
1832
1833         if (ti.nwa_v) {
1834                 pd->nwa = be32_to_cpu(ti.next_writable);
1835                 set_bit(PACKET_NWA_VALID, &pd->flags);
1836         }
1837
1838         /*
1839          * in theory we could use lra on -RW media as well and just zero
1840          * blocks that haven't been written yet, but in practice that
1841          * is just a no-go. we'll use that for -R, naturally.
1842          */
1843         if (ti.lra_v) {
1844                 pd->lra = be32_to_cpu(ti.last_rec_address);
1845                 set_bit(PACKET_LRA_VALID, &pd->flags);
1846         } else {
1847                 pd->lra = 0xffffffff;
1848                 set_bit(PACKET_LRA_VALID, &pd->flags);
1849         }
1850
1851         /*
1852          * fine for now
1853          */
1854         pd->settings.link_loss = 7;
1855         pd->settings.write_type = 0;    /* packet */
1856         pd->settings.track_mode = ti.track_mode;
1857
1858         /*
1859          * mode1 or mode2 disc
1860          */
1861         switch (ti.data_mode) {
1862                 case PACKET_MODE1:
1863                         pd->settings.block_mode = PACKET_BLOCK_MODE1;
1864                         break;
1865                 case PACKET_MODE2:
1866                         pd->settings.block_mode = PACKET_BLOCK_MODE2;
1867                         break;
1868                 default:
1869                         pkt_err(pd, "unknown data mode\n");
1870                         return -EROFS;
1871         }
1872         return 0;
1873 }
1874
1875 /*
1876  * enable/disable write caching on drive
1877  */
1878 static noinline_for_stack int pkt_write_caching(struct pktcdvd_device *pd,
1879                                                 int set)
1880 {
1881         struct packet_command cgc;
1882         struct scsi_sense_hdr sshdr;
1883         unsigned char buf[64];
1884         int ret;
1885
1886         init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_READ);
1887         cgc.sshdr = &sshdr;
1888         cgc.buflen = pd->mode_offset + 12;
1889
1890         /*
1891          * caching mode page might not be there, so quiet this command
1892          */
1893         cgc.quiet = 1;
1894
1895         ret = pkt_mode_sense(pd, &cgc, GPMODE_WCACHING_PAGE, 0);
1896         if (ret)
1897                 return ret;
1898
1899         buf[pd->mode_offset + 10] |= (!!set << 2);
1900
1901         cgc.buflen = cgc.cmd[8] = 2 + ((buf[0] << 8) | (buf[1] & 0xff));
1902         ret = pkt_mode_select(pd, &cgc);
1903         if (ret) {
1904                 pkt_err(pd, "write caching control failed\n");
1905                 pkt_dump_sense(pd, &cgc);
1906         } else if (!ret && set)
1907                 pkt_notice(pd, "enabled write caching\n");
1908         return ret;
1909 }
1910
1911 static int pkt_lock_door(struct pktcdvd_device *pd, int lockflag)
1912 {
1913         struct packet_command cgc;
1914
1915         init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
1916         cgc.cmd[0] = GPCMD_PREVENT_ALLOW_MEDIUM_REMOVAL;
1917         cgc.cmd[4] = lockflag ? 1 : 0;
1918         return pkt_generic_packet(pd, &cgc);
1919 }
1920
1921 /*
1922  * Returns drive maximum write speed
1923  */
1924 static noinline_for_stack int pkt_get_max_speed(struct pktcdvd_device *pd,
1925                                                 unsigned *write_speed)
1926 {
1927         struct packet_command cgc;
1928         struct scsi_sense_hdr sshdr;
1929         unsigned char buf[256+18];
1930         unsigned char *cap_buf;
1931         int ret, offset;
1932
1933         cap_buf = &buf[sizeof(struct mode_page_header) + pd->mode_offset];
1934         init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_UNKNOWN);
1935         cgc.sshdr = &sshdr;
1936
1937         ret = pkt_mode_sense(pd, &cgc, GPMODE_CAPABILITIES_PAGE, 0);
1938         if (ret) {
1939                 cgc.buflen = pd->mode_offset + cap_buf[1] + 2 +
1940                              sizeof(struct mode_page_header);
1941                 ret = pkt_mode_sense(pd, &cgc, GPMODE_CAPABILITIES_PAGE, 0);
1942                 if (ret) {
1943                         pkt_dump_sense(pd, &cgc);
1944                         return ret;
1945                 }
1946         }
1947
1948         offset = 20;                        /* Obsoleted field, used by older drives */
1949         if (cap_buf[1] >= 28)
1950                 offset = 28;                /* Current write speed selected */
1951         if (cap_buf[1] >= 30) {
1952                 /* If the drive reports at least one "Logical Unit Write
1953                  * Speed Performance Descriptor Block", use the information
1954                  * in the first block. (contains the highest speed)
1955                  */
1956                 int num_spdb = (cap_buf[30] << 8) + cap_buf[31];
1957                 if (num_spdb > 0)
1958                         offset = 34;
1959         }
1960
1961         *write_speed = (cap_buf[offset] << 8) | cap_buf[offset + 1];
1962         return 0;
1963 }
1964
1965 /* These tables from cdrecord - I don't have orange book */
1966 /* standard speed CD-RW (1-4x) */
1967 static char clv_to_speed[16] = {
1968         /* 0  1  2  3  4  5  6  7  8  9 10 11 12 13 14 15 */
1969            0, 2, 4, 6, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
1970 };
1971 /* high speed CD-RW (-10x) */
1972 static char hs_clv_to_speed[16] = {
1973         /* 0  1  2  3  4  5  6  7  8  9 10 11 12 13 14 15 */
1974            0, 2, 4, 6, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
1975 };
1976 /* ultra high speed CD-RW */
1977 static char us_clv_to_speed[16] = {
1978         /* 0  1  2  3  4  5  6  7  8  9 10 11 12 13 14 15 */
1979            0, 2, 4, 8, 0, 0,16, 0,24,32,40,48, 0, 0, 0, 0
1980 };
1981
1982 /*
1983  * reads the maximum media speed from ATIP
1984  */
1985 static noinline_for_stack int pkt_media_speed(struct pktcdvd_device *pd,
1986                                                 unsigned *speed)
1987 {
1988         struct packet_command cgc;
1989         struct scsi_sense_hdr sshdr;
1990         unsigned char buf[64];
1991         unsigned int size, st, sp;
1992         int ret;
1993
1994         init_cdrom_command(&cgc, buf, 2, CGC_DATA_READ);
1995         cgc.sshdr = &sshdr;
1996         cgc.cmd[0] = GPCMD_READ_TOC_PMA_ATIP;
1997         cgc.cmd[1] = 2;
1998         cgc.cmd[2] = 4; /* READ ATIP */
1999         cgc.cmd[8] = 2;
2000         ret = pkt_generic_packet(pd, &cgc);
2001         if (ret) {
2002                 pkt_dump_sense(pd, &cgc);
2003                 return ret;
2004         }
2005         size = ((unsigned int) buf[0]<<8) + buf[1] + 2;
2006         if (size > sizeof(buf))
2007                 size = sizeof(buf);
2008
2009         init_cdrom_command(&cgc, buf, size, CGC_DATA_READ);
2010         cgc.sshdr = &sshdr;
2011         cgc.cmd[0] = GPCMD_READ_TOC_PMA_ATIP;
2012         cgc.cmd[1] = 2;
2013         cgc.cmd[2] = 4;
2014         cgc.cmd[8] = size;
2015         ret = pkt_generic_packet(pd, &cgc);
2016         if (ret) {
2017                 pkt_dump_sense(pd, &cgc);
2018                 return ret;
2019         }
2020
2021         if (!(buf[6] & 0x40)) {
2022                 pkt_notice(pd, "disc type is not CD-RW\n");
2023                 return 1;
2024         }
2025         if (!(buf[6] & 0x4)) {
2026                 pkt_notice(pd, "A1 values on media are not valid, maybe not CDRW?\n");
2027                 return 1;
2028         }
2029
2030         st = (buf[6] >> 3) & 0x7; /* disc sub-type */
2031
2032         sp = buf[16] & 0xf; /* max speed from ATIP A1 field */
2033
2034         /* Info from cdrecord */
2035         switch (st) {
2036                 case 0: /* standard speed */
2037                         *speed = clv_to_speed[sp];
2038                         break;
2039                 case 1: /* high speed */
2040                         *speed = hs_clv_to_speed[sp];
2041                         break;
2042                 case 2: /* ultra high speed */
2043                         *speed = us_clv_to_speed[sp];
2044                         break;
2045                 default:
2046                         pkt_notice(pd, "unknown disc sub-type %d\n", st);
2047                         return 1;
2048         }
2049         if (*speed) {
2050                 pkt_info(pd, "maximum media speed: %d\n", *speed);
2051                 return 0;
2052         } else {
2053                 pkt_notice(pd, "unknown speed %d for sub-type %d\n", sp, st);
2054                 return 1;
2055         }
2056 }
2057
2058 static noinline_for_stack int pkt_perform_opc(struct pktcdvd_device *pd)
2059 {
2060         struct packet_command cgc;
2061         struct scsi_sense_hdr sshdr;
2062         int ret;
2063
2064         pkt_dbg(2, pd, "Performing OPC\n");
2065
2066         init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
2067         cgc.sshdr = &sshdr;
2068         cgc.timeout = 60*HZ;
2069         cgc.cmd[0] = GPCMD_SEND_OPC;
2070         cgc.cmd[1] = 1;
2071         ret = pkt_generic_packet(pd, &cgc);
2072         if (ret)
2073                 pkt_dump_sense(pd, &cgc);
2074         return ret;
2075 }
2076
2077 static int pkt_open_write(struct pktcdvd_device *pd)
2078 {
2079         int ret;
2080         unsigned int write_speed, media_write_speed, read_speed;
2081
2082         ret = pkt_probe_settings(pd);
2083         if (ret) {
2084                 pkt_dbg(2, pd, "failed probe\n");
2085                 return ret;
2086         }
2087
2088         ret = pkt_set_write_settings(pd);
2089         if (ret) {
2090                 pkt_dbg(1, pd, "failed saving write settings\n");
2091                 return -EIO;
2092         }
2093
2094         pkt_write_caching(pd, USE_WCACHING);
2095
2096         ret = pkt_get_max_speed(pd, &write_speed);
2097         if (ret)
2098                 write_speed = 16 * 177;
2099         switch (pd->mmc3_profile) {
2100                 case 0x13: /* DVD-RW */
2101                 case 0x1a: /* DVD+RW */
2102                 case 0x12: /* DVD-RAM */
2103                         pkt_dbg(1, pd, "write speed %ukB/s\n", write_speed);
2104                         break;
2105                 default:
2106                         ret = pkt_media_speed(pd, &media_write_speed);
2107                         if (ret)
2108                                 media_write_speed = 16;
2109                         write_speed = min(write_speed, media_write_speed * 177);
2110                         pkt_dbg(1, pd, "write speed %ux\n", write_speed / 176);
2111                         break;
2112         }
2113         read_speed = write_speed;
2114
2115         ret = pkt_set_speed(pd, write_speed, read_speed);
2116         if (ret) {
2117                 pkt_dbg(1, pd, "couldn't set write speed\n");
2118                 return -EIO;
2119         }
2120         pd->write_speed = write_speed;
2121         pd->read_speed = read_speed;
2122
2123         ret = pkt_perform_opc(pd);
2124         if (ret) {
2125                 pkt_dbg(1, pd, "Optimum Power Calibration failed\n");
2126         }
2127
2128         return 0;
2129 }
2130
2131 /*
2132  * called at open time.
2133  */
2134 static int pkt_open_dev(struct pktcdvd_device *pd, fmode_t write)
2135 {
2136         int ret;
2137         long lba;
2138         struct request_queue *q;
2139         struct block_device *bdev;
2140
2141         /*
2142          * We need to re-open the cdrom device without O_NONBLOCK to be able
2143          * to read/write from/to it. It is already opened in O_NONBLOCK mode
2144          * so open should not fail.
2145          */
2146         bdev = blkdev_get_by_dev(pd->bdev->bd_dev, FMODE_READ | FMODE_EXCL, pd);
2147         if (IS_ERR(bdev)) {
2148                 ret = PTR_ERR(bdev);
2149                 goto out;
2150         }
2151
2152         ret = pkt_get_last_written(pd, &lba);
2153         if (ret) {
2154                 pkt_err(pd, "pkt_get_last_written failed\n");
2155                 goto out_putdev;
2156         }
2157
2158         set_capacity(pd->disk, lba << 2);
2159         set_capacity_and_notify(pd->bdev->bd_disk, lba << 2);
2160
2161         q = bdev_get_queue(pd->bdev);
2162         if (write) {
2163                 ret = pkt_open_write(pd);
2164                 if (ret)
2165                         goto out_putdev;
2166                 /*
2167                  * Some CDRW drives can not handle writes larger than one packet,
2168                  * even if the size is a multiple of the packet size.
2169                  */
2170                 blk_queue_max_hw_sectors(q, pd->settings.size);
2171                 set_bit(PACKET_WRITABLE, &pd->flags);
2172         } else {
2173                 pkt_set_speed(pd, MAX_SPEED, MAX_SPEED);
2174                 clear_bit(PACKET_WRITABLE, &pd->flags);
2175         }
2176
2177         ret = pkt_set_segment_merging(pd, q);
2178         if (ret)
2179                 goto out_putdev;
2180
2181         if (write) {
2182                 if (!pkt_grow_pktlist(pd, CONFIG_CDROM_PKTCDVD_BUFFERS)) {
2183                         pkt_err(pd, "not enough memory for buffers\n");
2184                         ret = -ENOMEM;
2185                         goto out_putdev;
2186                 }
2187                 pkt_info(pd, "%lukB available on disc\n", lba << 1);
2188         }
2189
2190         return 0;
2191
2192 out_putdev:
2193         blkdev_put(bdev, FMODE_READ | FMODE_EXCL);
2194 out:
2195         return ret;
2196 }
2197
2198 /*
2199  * called when the device is closed. makes sure that the device flushes
2200  * the internal cache before we close.
2201  */
2202 static void pkt_release_dev(struct pktcdvd_device *pd, int flush)
2203 {
2204         if (flush && pkt_flush_cache(pd))
2205                 pkt_dbg(1, pd, "not flushing cache\n");
2206
2207         pkt_lock_door(pd, 0);
2208
2209         pkt_set_speed(pd, MAX_SPEED, MAX_SPEED);
2210         blkdev_put(pd->bdev, FMODE_READ | FMODE_EXCL);
2211
2212         pkt_shrink_pktlist(pd);
2213 }
2214
2215 static struct pktcdvd_device *pkt_find_dev_from_minor(unsigned int dev_minor)
2216 {
2217         if (dev_minor >= MAX_WRITERS)
2218                 return NULL;
2219
2220         dev_minor = array_index_nospec(dev_minor, MAX_WRITERS);
2221         return pkt_devs[dev_minor];
2222 }
2223
2224 static int pkt_open(struct block_device *bdev, fmode_t mode)
2225 {
2226         struct pktcdvd_device *pd = NULL;
2227         int ret;
2228
2229         mutex_lock(&pktcdvd_mutex);
2230         mutex_lock(&ctl_mutex);
2231         pd = pkt_find_dev_from_minor(MINOR(bdev->bd_dev));
2232         if (!pd) {
2233                 ret = -ENODEV;
2234                 goto out;
2235         }
2236         BUG_ON(pd->refcnt < 0);
2237
2238         pd->refcnt++;
2239         if (pd->refcnt > 1) {
2240                 if ((mode & FMODE_WRITE) &&
2241                     !test_bit(PACKET_WRITABLE, &pd->flags)) {
2242                         ret = -EBUSY;
2243                         goto out_dec;
2244                 }
2245         } else {
2246                 ret = pkt_open_dev(pd, mode & FMODE_WRITE);
2247                 if (ret)
2248                         goto out_dec;
2249                 /*
2250                  * needed here as well, since ext2 (among others) may change
2251                  * the blocksize at mount time
2252                  */
2253                 set_blocksize(bdev, CD_FRAMESIZE);
2254         }
2255
2256         mutex_unlock(&ctl_mutex);
2257         mutex_unlock(&pktcdvd_mutex);
2258         return 0;
2259
2260 out_dec:
2261         pd->refcnt--;
2262 out:
2263         mutex_unlock(&ctl_mutex);
2264         mutex_unlock(&pktcdvd_mutex);
2265         return ret;
2266 }
2267
2268 static void pkt_close(struct gendisk *disk, fmode_t mode)
2269 {
2270         struct pktcdvd_device *pd = disk->private_data;
2271
2272         mutex_lock(&pktcdvd_mutex);
2273         mutex_lock(&ctl_mutex);
2274         pd->refcnt--;
2275         BUG_ON(pd->refcnt < 0);
2276         if (pd->refcnt == 0) {
2277                 int flush = test_bit(PACKET_WRITABLE, &pd->flags);
2278                 pkt_release_dev(pd, flush);
2279         }
2280         mutex_unlock(&ctl_mutex);
2281         mutex_unlock(&pktcdvd_mutex);
2282 }
2283
2284
2285 static void pkt_end_io_read_cloned(struct bio *bio)
2286 {
2287         struct packet_stacked_data *psd = bio->bi_private;
2288         struct pktcdvd_device *pd = psd->pd;
2289
2290         psd->bio->bi_status = bio->bi_status;
2291         bio_put(bio);
2292         bio_endio(psd->bio);
2293         mempool_free(psd, &psd_pool);
2294         pkt_bio_finished(pd);
2295 }
2296
2297 static void pkt_make_request_read(struct pktcdvd_device *pd, struct bio *bio)
2298 {
2299         struct bio *cloned_bio =
2300                 bio_alloc_clone(pd->bdev, bio, GFP_NOIO, &pkt_bio_set);
2301         struct packet_stacked_data *psd = mempool_alloc(&psd_pool, GFP_NOIO);
2302
2303         psd->pd = pd;
2304         psd->bio = bio;
2305         cloned_bio->bi_private = psd;
2306         cloned_bio->bi_end_io = pkt_end_io_read_cloned;
2307         pd->stats.secs_r += bio_sectors(bio);
2308         pkt_queue_bio(pd, cloned_bio);
2309 }
2310
2311 static void pkt_make_request_write(struct request_queue *q, struct bio *bio)
2312 {
2313         struct pktcdvd_device *pd = q->queuedata;
2314         sector_t zone;
2315         struct packet_data *pkt;
2316         int was_empty, blocked_bio;
2317         struct pkt_rb_node *node;
2318
2319         zone = get_zone(bio->bi_iter.bi_sector, pd);
2320
2321         /*
2322          * If we find a matching packet in state WAITING or READ_WAIT, we can
2323          * just append this bio to that packet.
2324          */
2325         spin_lock(&pd->cdrw.active_list_lock);
2326         blocked_bio = 0;
2327         list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
2328                 if (pkt->sector == zone) {
2329                         spin_lock(&pkt->lock);
2330                         if ((pkt->state == PACKET_WAITING_STATE) ||
2331                             (pkt->state == PACKET_READ_WAIT_STATE)) {
2332                                 bio_list_add(&pkt->orig_bios, bio);
2333                                 pkt->write_size +=
2334                                         bio->bi_iter.bi_size / CD_FRAMESIZE;
2335                                 if ((pkt->write_size >= pkt->frames) &&
2336                                     (pkt->state == PACKET_WAITING_STATE)) {
2337                                         atomic_inc(&pkt->run_sm);
2338                                         wake_up(&pd->wqueue);
2339                                 }
2340                                 spin_unlock(&pkt->lock);
2341                                 spin_unlock(&pd->cdrw.active_list_lock);
2342                                 return;
2343                         } else {
2344                                 blocked_bio = 1;
2345                         }
2346                         spin_unlock(&pkt->lock);
2347                 }
2348         }
2349         spin_unlock(&pd->cdrw.active_list_lock);
2350
2351         /*
2352          * Test if there is enough room left in the bio work queue
2353          * (queue size >= congestion on mark).
2354          * If not, wait till the work queue size is below the congestion off mark.
2355          */
2356         spin_lock(&pd->lock);
2357         if (pd->write_congestion_on > 0
2358             && pd->bio_queue_size >= pd->write_congestion_on) {
2359                 struct wait_bit_queue_entry wqe;
2360
2361                 init_wait_var_entry(&wqe, &pd->congested, 0);
2362                 for (;;) {
2363                         prepare_to_wait_event(__var_waitqueue(&pd->congested),
2364                                               &wqe.wq_entry,
2365                                               TASK_UNINTERRUPTIBLE);
2366                         if (pd->bio_queue_size <= pd->write_congestion_off)
2367                                 break;
2368                         pd->congested = true;
2369                         spin_unlock(&pd->lock);
2370                         schedule();
2371                         spin_lock(&pd->lock);
2372                 }
2373         }
2374         spin_unlock(&pd->lock);
2375
2376         /*
2377          * No matching packet found. Store the bio in the work queue.
2378          */
2379         node = mempool_alloc(&pd->rb_pool, GFP_NOIO);
2380         node->bio = bio;
2381         spin_lock(&pd->lock);
2382         BUG_ON(pd->bio_queue_size < 0);
2383         was_empty = (pd->bio_queue_size == 0);
2384         pkt_rbtree_insert(pd, node);
2385         spin_unlock(&pd->lock);
2386
2387         /*
2388          * Wake up the worker thread.
2389          */
2390         atomic_set(&pd->scan_queue, 1);
2391         if (was_empty) {
2392                 /* This wake_up is required for correct operation */
2393                 wake_up(&pd->wqueue);
2394         } else if (!list_empty(&pd->cdrw.pkt_free_list) && !blocked_bio) {
2395                 /*
2396                  * This wake up is not required for correct operation,
2397                  * but improves performance in some cases.
2398                  */
2399                 wake_up(&pd->wqueue);
2400         }
2401 }
2402
2403 static void pkt_submit_bio(struct bio *bio)
2404 {
2405         struct pktcdvd_device *pd = bio->bi_bdev->bd_disk->queue->queuedata;
2406         struct bio *split;
2407
2408         blk_queue_split(&bio);
2409
2410         pkt_dbg(2, pd, "start = %6llx stop = %6llx\n",
2411                 (unsigned long long)bio->bi_iter.bi_sector,
2412                 (unsigned long long)bio_end_sector(bio));
2413
2414         /*
2415          * Clone READ bios so we can have our own bi_end_io callback.
2416          */
2417         if (bio_data_dir(bio) == READ) {
2418                 pkt_make_request_read(pd, bio);
2419                 return;
2420         }
2421
2422         if (!test_bit(PACKET_WRITABLE, &pd->flags)) {
2423                 pkt_notice(pd, "WRITE for ro device (%llu)\n",
2424                            (unsigned long long)bio->bi_iter.bi_sector);
2425                 goto end_io;
2426         }
2427
2428         if (!bio->bi_iter.bi_size || (bio->bi_iter.bi_size % CD_FRAMESIZE)) {
2429                 pkt_err(pd, "wrong bio size\n");
2430                 goto end_io;
2431         }
2432
2433         do {
2434                 sector_t zone = get_zone(bio->bi_iter.bi_sector, pd);
2435                 sector_t last_zone = get_zone(bio_end_sector(bio) - 1, pd);
2436
2437                 if (last_zone != zone) {
2438                         BUG_ON(last_zone != zone + pd->settings.size);
2439
2440                         split = bio_split(bio, last_zone -
2441                                           bio->bi_iter.bi_sector,
2442                                           GFP_NOIO, &pkt_bio_set);
2443                         bio_chain(split, bio);
2444                 } else {
2445                         split = bio;
2446                 }
2447
2448                 pkt_make_request_write(bio->bi_bdev->bd_disk->queue, split);
2449         } while (split != bio);
2450
2451         return;
2452 end_io:
2453         bio_io_error(bio);
2454 }
2455
2456 static void pkt_init_queue(struct pktcdvd_device *pd)
2457 {
2458         struct request_queue *q = pd->disk->queue;
2459
2460         blk_queue_logical_block_size(q, CD_FRAMESIZE);
2461         blk_queue_max_hw_sectors(q, PACKET_MAX_SECTORS);
2462         q->queuedata = pd;
2463 }
2464
2465 static int pkt_seq_show(struct seq_file *m, void *p)
2466 {
2467         struct pktcdvd_device *pd = m->private;
2468         char *msg;
2469         char bdev_buf[BDEVNAME_SIZE];
2470         int states[PACKET_NUM_STATES];
2471
2472         seq_printf(m, "Writer %s mapped to %s:\n", pd->name,
2473                    bdevname(pd->bdev, bdev_buf));
2474
2475         seq_printf(m, "\nSettings:\n");
2476         seq_printf(m, "\tpacket size:\t\t%dkB\n", pd->settings.size / 2);
2477
2478         if (pd->settings.write_type == 0)
2479                 msg = "Packet";
2480         else
2481                 msg = "Unknown";
2482         seq_printf(m, "\twrite type:\t\t%s\n", msg);
2483
2484         seq_printf(m, "\tpacket type:\t\t%s\n", pd->settings.fp ? "Fixed" : "Variable");
2485         seq_printf(m, "\tlink loss:\t\t%d\n", pd->settings.link_loss);
2486
2487         seq_printf(m, "\ttrack mode:\t\t%d\n", pd->settings.track_mode);
2488
2489         if (pd->settings.block_mode == PACKET_BLOCK_MODE1)
2490                 msg = "Mode 1";
2491         else if (pd->settings.block_mode == PACKET_BLOCK_MODE2)
2492                 msg = "Mode 2";
2493         else
2494                 msg = "Unknown";
2495         seq_printf(m, "\tblock mode:\t\t%s\n", msg);
2496
2497         seq_printf(m, "\nStatistics:\n");
2498         seq_printf(m, "\tpackets started:\t%lu\n", pd->stats.pkt_started);
2499         seq_printf(m, "\tpackets ended:\t\t%lu\n", pd->stats.pkt_ended);
2500         seq_printf(m, "\twritten:\t\t%lukB\n", pd->stats.secs_w >> 1);
2501         seq_printf(m, "\tread gather:\t\t%lukB\n", pd->stats.secs_rg >> 1);
2502         seq_printf(m, "\tread:\t\t\t%lukB\n", pd->stats.secs_r >> 1);
2503
2504         seq_printf(m, "\nMisc:\n");
2505         seq_printf(m, "\treference count:\t%d\n", pd->refcnt);
2506         seq_printf(m, "\tflags:\t\t\t0x%lx\n", pd->flags);
2507         seq_printf(m, "\tread speed:\t\t%ukB/s\n", pd->read_speed);
2508         seq_printf(m, "\twrite speed:\t\t%ukB/s\n", pd->write_speed);
2509         seq_printf(m, "\tstart offset:\t\t%lu\n", pd->offset);
2510         seq_printf(m, "\tmode page offset:\t%u\n", pd->mode_offset);
2511
2512         seq_printf(m, "\nQueue state:\n");
2513         seq_printf(m, "\tbios queued:\t\t%d\n", pd->bio_queue_size);
2514         seq_printf(m, "\tbios pending:\t\t%d\n", atomic_read(&pd->cdrw.pending_bios));
2515         seq_printf(m, "\tcurrent sector:\t\t0x%llx\n", (unsigned long long)pd->current_sector);
2516
2517         pkt_count_states(pd, states);
2518         seq_printf(m, "\tstate:\t\t\ti:%d ow:%d rw:%d ww:%d rec:%d fin:%d\n",
2519                    states[0], states[1], states[2], states[3], states[4], states[5]);
2520
2521         seq_printf(m, "\twrite congestion marks:\toff=%d on=%d\n",
2522                         pd->write_congestion_off,
2523                         pd->write_congestion_on);
2524         return 0;
2525 }
2526
2527 static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev)
2528 {
2529         int i;
2530         char b[BDEVNAME_SIZE];
2531         struct block_device *bdev;
2532         struct scsi_device *sdev;
2533
2534         if (pd->pkt_dev == dev) {
2535                 pkt_err(pd, "recursive setup not allowed\n");
2536                 return -EBUSY;
2537         }
2538         for (i = 0; i < MAX_WRITERS; i++) {
2539                 struct pktcdvd_device *pd2 = pkt_devs[i];
2540                 if (!pd2)
2541                         continue;
2542                 if (pd2->bdev->bd_dev == dev) {
2543                         pkt_err(pd, "%s already setup\n",
2544                                 bdevname(pd2->bdev, b));
2545                         return -EBUSY;
2546                 }
2547                 if (pd2->pkt_dev == dev) {
2548                         pkt_err(pd, "can't chain pktcdvd devices\n");
2549                         return -EBUSY;
2550                 }
2551         }
2552
2553         bdev = blkdev_get_by_dev(dev, FMODE_READ | FMODE_NDELAY, NULL);
2554         if (IS_ERR(bdev))
2555                 return PTR_ERR(bdev);
2556         sdev = scsi_device_from_queue(bdev->bd_disk->queue);
2557         if (!sdev) {
2558                 blkdev_put(bdev, FMODE_READ | FMODE_NDELAY);
2559                 return -EINVAL;
2560         }
2561         put_device(&sdev->sdev_gendev);
2562
2563         /* This is safe, since we have a reference from open(). */
2564         __module_get(THIS_MODULE);
2565
2566         pd->bdev = bdev;
2567         set_blocksize(bdev, CD_FRAMESIZE);
2568
2569         pkt_init_queue(pd);
2570
2571         atomic_set(&pd->cdrw.pending_bios, 0);
2572         pd->cdrw.thread = kthread_run(kcdrwd, pd, "%s", pd->name);
2573         if (IS_ERR(pd->cdrw.thread)) {
2574                 pkt_err(pd, "can't start kernel thread\n");
2575                 goto out_mem;
2576         }
2577
2578         proc_create_single_data(pd->name, 0, pkt_proc, pkt_seq_show, pd);
2579         pkt_dbg(1, pd, "writer mapped to %s\n", bdevname(bdev, b));
2580         return 0;
2581
2582 out_mem:
2583         blkdev_put(bdev, FMODE_READ | FMODE_NDELAY);
2584         /* This is safe: open() is still holding a reference. */
2585         module_put(THIS_MODULE);
2586         return -ENOMEM;
2587 }
2588
2589 static int pkt_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg)
2590 {
2591         struct pktcdvd_device *pd = bdev->bd_disk->private_data;
2592         int ret;
2593
2594         pkt_dbg(2, pd, "cmd %x, dev %d:%d\n",
2595                 cmd, MAJOR(bdev->bd_dev), MINOR(bdev->bd_dev));
2596
2597         mutex_lock(&pktcdvd_mutex);
2598         switch (cmd) {
2599         case CDROMEJECT:
2600                 /*
2601                  * The door gets locked when the device is opened, so we
2602                  * have to unlock it or else the eject command fails.
2603                  */
2604                 if (pd->refcnt == 1)
2605                         pkt_lock_door(pd, 0);
2606                 fallthrough;
2607         /*
2608          * forward selected CDROM ioctls to CD-ROM, for UDF
2609          */
2610         case CDROMMULTISESSION:
2611         case CDROMREADTOCENTRY:
2612         case CDROM_LAST_WRITTEN:
2613         case CDROM_SEND_PACKET:
2614         case SCSI_IOCTL_SEND_COMMAND:
2615                 if (!bdev->bd_disk->fops->ioctl)
2616                         ret = -ENOTTY;
2617                 else
2618                         ret = bdev->bd_disk->fops->ioctl(bdev, mode, cmd, arg);
2619                 break;
2620         default:
2621                 pkt_dbg(2, pd, "Unknown ioctl (%x)\n", cmd);
2622                 ret = -ENOTTY;
2623         }
2624         mutex_unlock(&pktcdvd_mutex);
2625
2626         return ret;
2627 }
2628
2629 static unsigned int pkt_check_events(struct gendisk *disk,
2630                                      unsigned int clearing)
2631 {
2632         struct pktcdvd_device *pd = disk->private_data;
2633         struct gendisk *attached_disk;
2634
2635         if (!pd)
2636                 return 0;
2637         if (!pd->bdev)
2638                 return 0;
2639         attached_disk = pd->bdev->bd_disk;
2640         if (!attached_disk || !attached_disk->fops->check_events)
2641                 return 0;
2642         return attached_disk->fops->check_events(attached_disk, clearing);
2643 }
2644
2645 static char *pkt_devnode(struct gendisk *disk, umode_t *mode)
2646 {
2647         return kasprintf(GFP_KERNEL, "pktcdvd/%s", disk->disk_name);
2648 }
2649
2650 static const struct block_device_operations pktcdvd_ops = {
2651         .owner =                THIS_MODULE,
2652         .submit_bio =           pkt_submit_bio,
2653         .open =                 pkt_open,
2654         .release =              pkt_close,
2655         .ioctl =                pkt_ioctl,
2656         .compat_ioctl =         blkdev_compat_ptr_ioctl,
2657         .check_events =         pkt_check_events,
2658         .devnode =              pkt_devnode,
2659 };
2660
2661 /*
2662  * Set up mapping from pktcdvd device to CD-ROM device.
2663  */
2664 static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev)
2665 {
2666         int idx;
2667         int ret = -ENOMEM;
2668         struct pktcdvd_device *pd;
2669         struct gendisk *disk;
2670
2671         mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
2672
2673         for (idx = 0; idx < MAX_WRITERS; idx++)
2674                 if (!pkt_devs[idx])
2675                         break;
2676         if (idx == MAX_WRITERS) {
2677                 pr_err("max %d writers supported\n", MAX_WRITERS);
2678                 ret = -EBUSY;
2679                 goto out_mutex;
2680         }
2681
2682         pd = kzalloc(sizeof(struct pktcdvd_device), GFP_KERNEL);
2683         if (!pd)
2684                 goto out_mutex;
2685
2686         ret = mempool_init_kmalloc_pool(&pd->rb_pool, PKT_RB_POOL_SIZE,
2687                                         sizeof(struct pkt_rb_node));
2688         if (ret)
2689                 goto out_mem;
2690
2691         INIT_LIST_HEAD(&pd->cdrw.pkt_free_list);
2692         INIT_LIST_HEAD(&pd->cdrw.pkt_active_list);
2693         spin_lock_init(&pd->cdrw.active_list_lock);
2694
2695         spin_lock_init(&pd->lock);
2696         spin_lock_init(&pd->iosched.lock);
2697         bio_list_init(&pd->iosched.read_queue);
2698         bio_list_init(&pd->iosched.write_queue);
2699         sprintf(pd->name, DRIVER_NAME"%d", idx);
2700         init_waitqueue_head(&pd->wqueue);
2701         pd->bio_queue = RB_ROOT;
2702
2703         pd->write_congestion_on  = write_congestion_on;
2704         pd->write_congestion_off = write_congestion_off;
2705
2706         ret = -ENOMEM;
2707         disk = blk_alloc_disk(NUMA_NO_NODE);
2708         if (!disk)
2709                 goto out_mem;
2710         pd->disk = disk;
2711         disk->major = pktdev_major;
2712         disk->first_minor = idx;
2713         disk->minors = 1;
2714         disk->fops = &pktcdvd_ops;
2715         disk->flags = GENHD_FL_REMOVABLE | GENHD_FL_NO_PART;
2716         strcpy(disk->disk_name, pd->name);
2717         disk->private_data = pd;
2718
2719         pd->pkt_dev = MKDEV(pktdev_major, idx);
2720         ret = pkt_new_dev(pd, dev);
2721         if (ret)
2722                 goto out_mem2;
2723
2724         /* inherit events of the host device */
2725         disk->events = pd->bdev->bd_disk->events;
2726
2727         ret = add_disk(disk);
2728         if (ret)
2729                 goto out_mem2;
2730
2731         pkt_sysfs_dev_new(pd);
2732         pkt_debugfs_dev_new(pd);
2733
2734         pkt_devs[idx] = pd;
2735         if (pkt_dev)
2736                 *pkt_dev = pd->pkt_dev;
2737
2738         mutex_unlock(&ctl_mutex);
2739         return 0;
2740
2741 out_mem2:
2742         blk_cleanup_disk(disk);
2743 out_mem:
2744         mempool_exit(&pd->rb_pool);
2745         kfree(pd);
2746 out_mutex:
2747         mutex_unlock(&ctl_mutex);
2748         pr_err("setup of pktcdvd device failed\n");
2749         return ret;
2750 }
2751
2752 /*
2753  * Tear down mapping from pktcdvd device to CD-ROM device.
2754  */
2755 static int pkt_remove_dev(dev_t pkt_dev)
2756 {
2757         struct pktcdvd_device *pd;
2758         int idx;
2759         int ret = 0;
2760
2761         mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
2762
2763         for (idx = 0; idx < MAX_WRITERS; idx++) {
2764                 pd = pkt_devs[idx];
2765                 if (pd && (pd->pkt_dev == pkt_dev))
2766                         break;
2767         }
2768         if (idx == MAX_WRITERS) {
2769                 pr_debug("dev not setup\n");
2770                 ret = -ENXIO;
2771                 goto out;
2772         }
2773
2774         if (pd->refcnt > 0) {
2775                 ret = -EBUSY;
2776                 goto out;
2777         }
2778         if (!IS_ERR(pd->cdrw.thread))
2779                 kthread_stop(pd->cdrw.thread);
2780
2781         pkt_devs[idx] = NULL;
2782
2783         pkt_debugfs_dev_remove(pd);
2784         pkt_sysfs_dev_remove(pd);
2785
2786         blkdev_put(pd->bdev, FMODE_READ | FMODE_NDELAY);
2787
2788         remove_proc_entry(pd->name, pkt_proc);
2789         pkt_dbg(1, pd, "writer unmapped\n");
2790
2791         del_gendisk(pd->disk);
2792         blk_cleanup_disk(pd->disk);
2793
2794         mempool_exit(&pd->rb_pool);
2795         kfree(pd);
2796
2797         /* This is safe: open() is still holding a reference. */
2798         module_put(THIS_MODULE);
2799
2800 out:
2801         mutex_unlock(&ctl_mutex);
2802         return ret;
2803 }
2804
2805 static void pkt_get_status(struct pkt_ctrl_command *ctrl_cmd)
2806 {
2807         struct pktcdvd_device *pd;
2808
2809         mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
2810
2811         pd = pkt_find_dev_from_minor(ctrl_cmd->dev_index);
2812         if (pd) {
2813                 ctrl_cmd->dev = new_encode_dev(pd->bdev->bd_dev);
2814                 ctrl_cmd->pkt_dev = new_encode_dev(pd->pkt_dev);
2815         } else {
2816                 ctrl_cmd->dev = 0;
2817                 ctrl_cmd->pkt_dev = 0;
2818         }
2819         ctrl_cmd->num_devices = MAX_WRITERS;
2820
2821         mutex_unlock(&ctl_mutex);
2822 }
2823
2824 static long pkt_ctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
2825 {
2826         void __user *argp = (void __user *)arg;
2827         struct pkt_ctrl_command ctrl_cmd;
2828         int ret = 0;
2829         dev_t pkt_dev = 0;
2830
2831         if (cmd != PACKET_CTRL_CMD)
2832                 return -ENOTTY;
2833
2834         if (copy_from_user(&ctrl_cmd, argp, sizeof(struct pkt_ctrl_command)))
2835                 return -EFAULT;
2836
2837         switch (ctrl_cmd.command) {
2838         case PKT_CTRL_CMD_SETUP:
2839                 if (!capable(CAP_SYS_ADMIN))
2840                         return -EPERM;
2841                 ret = pkt_setup_dev(new_decode_dev(ctrl_cmd.dev), &pkt_dev);
2842                 ctrl_cmd.pkt_dev = new_encode_dev(pkt_dev);
2843                 break;
2844         case PKT_CTRL_CMD_TEARDOWN:
2845                 if (!capable(CAP_SYS_ADMIN))
2846                         return -EPERM;
2847                 ret = pkt_remove_dev(new_decode_dev(ctrl_cmd.pkt_dev));
2848                 break;
2849         case PKT_CTRL_CMD_STATUS:
2850                 pkt_get_status(&ctrl_cmd);
2851                 break;
2852         default:
2853                 return -ENOTTY;
2854         }
2855
2856         if (copy_to_user(argp, &ctrl_cmd, sizeof(struct pkt_ctrl_command)))
2857                 return -EFAULT;
2858         return ret;
2859 }
2860
2861 #ifdef CONFIG_COMPAT
2862 static long pkt_ctl_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
2863 {
2864         return pkt_ctl_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
2865 }
2866 #endif
2867
2868 static const struct file_operations pkt_ctl_fops = {
2869         .open           = nonseekable_open,
2870         .unlocked_ioctl = pkt_ctl_ioctl,
2871 #ifdef CONFIG_COMPAT
2872         .compat_ioctl   = pkt_ctl_compat_ioctl,
2873 #endif
2874         .owner          = THIS_MODULE,
2875         .llseek         = no_llseek,
2876 };
2877
2878 static struct miscdevice pkt_misc = {
2879         .minor          = MISC_DYNAMIC_MINOR,
2880         .name           = DRIVER_NAME,
2881         .nodename       = "pktcdvd/control",
2882         .fops           = &pkt_ctl_fops
2883 };
2884
2885 static int __init pkt_init(void)
2886 {
2887         int ret;
2888
2889         mutex_init(&ctl_mutex);
2890
2891         ret = mempool_init_kmalloc_pool(&psd_pool, PSD_POOL_SIZE,
2892                                     sizeof(struct packet_stacked_data));
2893         if (ret)
2894                 return ret;
2895         ret = bioset_init(&pkt_bio_set, BIO_POOL_SIZE, 0, 0);
2896         if (ret) {
2897                 mempool_exit(&psd_pool);
2898                 return ret;
2899         }
2900
2901         ret = register_blkdev(pktdev_major, DRIVER_NAME);
2902         if (ret < 0) {
2903                 pr_err("unable to register block device\n");
2904                 goto out2;
2905         }
2906         if (!pktdev_major)
2907                 pktdev_major = ret;
2908
2909         ret = pkt_sysfs_init();
2910         if (ret)
2911                 goto out;
2912
2913         pkt_debugfs_init();
2914
2915         ret = misc_register(&pkt_misc);
2916         if (ret) {
2917                 pr_err("unable to register misc device\n");
2918                 goto out_misc;
2919         }
2920
2921         pkt_proc = proc_mkdir("driver/"DRIVER_NAME, NULL);
2922
2923         return 0;
2924
2925 out_misc:
2926         pkt_debugfs_cleanup();
2927         pkt_sysfs_cleanup();
2928 out:
2929         unregister_blkdev(pktdev_major, DRIVER_NAME);
2930 out2:
2931         mempool_exit(&psd_pool);
2932         bioset_exit(&pkt_bio_set);
2933         return ret;
2934 }
2935
2936 static void __exit pkt_exit(void)
2937 {
2938         remove_proc_entry("driver/"DRIVER_NAME, NULL);
2939         misc_deregister(&pkt_misc);
2940
2941         pkt_debugfs_cleanup();
2942         pkt_sysfs_cleanup();
2943
2944         unregister_blkdev(pktdev_major, DRIVER_NAME);
2945         mempool_exit(&psd_pool);
2946         bioset_exit(&pkt_bio_set);
2947 }
2948
2949 MODULE_DESCRIPTION("Packet writing layer for CD/DVD drives");
2950 MODULE_AUTHOR("Jens Axboe <axboe@suse.de>");
2951 MODULE_LICENSE("GPL");
2952
2953 module_init(pkt_init);
2954 module_exit(pkt_exit);