1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Interface to Linux block layer for MTD 'translation layers'.
5 * Copyright © 2003-2010 David Woodhouse <dwmw2@infradead.org>
8 #include <linux/kernel.h>
9 #include <linux/slab.h>
10 #include <linux/module.h>
11 #include <linux/list.h>
13 #include <linux/mtd/blktrans.h>
14 #include <linux/mtd/mtd.h>
15 #include <linux/blkdev.h>
16 #include <linux/blk-mq.h>
17 #include <linux/blkpg.h>
18 #include <linux/spinlock.h>
19 #include <linux/hdreg.h>
20 #include <linux/mutex.h>
21 #include <linux/uaccess.h>
25 static LIST_HEAD(blktrans_majors);
26 static DEFINE_MUTEX(blktrans_ref_mutex);
28 static void blktrans_dev_release(struct kref *kref)
30 struct mtd_blktrans_dev *dev =
31 container_of(kref, struct mtd_blktrans_dev, ref);
33 blk_cleanup_disk(dev->disk);
34 blk_mq_free_tag_set(dev->tag_set);
40 static struct mtd_blktrans_dev *blktrans_dev_get(struct gendisk *disk)
42 struct mtd_blktrans_dev *dev;
44 mutex_lock(&blktrans_ref_mutex);
45 dev = disk->private_data;
51 mutex_unlock(&blktrans_ref_mutex);
55 static void blktrans_dev_put(struct mtd_blktrans_dev *dev)
57 mutex_lock(&blktrans_ref_mutex);
58 kref_put(&dev->ref, blktrans_dev_release);
59 mutex_unlock(&blktrans_ref_mutex);
63 static blk_status_t do_blktrans_request(struct mtd_blktrans_ops *tr,
64 struct mtd_blktrans_dev *dev,
67 unsigned long block, nsect;
70 block = blk_rq_pos(req) << 9 >> tr->blkshift;
71 nsect = blk_rq_cur_bytes(req) >> tr->blkshift;
73 if (req_op(req) == REQ_OP_FLUSH) {
79 if (blk_rq_pos(req) + blk_rq_cur_sectors(req) >
80 get_capacity(req->rq_disk))
83 switch (req_op(req)) {
85 if (tr->discard(dev, block, nsect))
89 buf = kmap(bio_page(req->bio)) + bio_offset(req->bio);
90 for (; nsect > 0; nsect--, block++, buf += tr->blksize) {
91 if (tr->readsect(dev, block, buf)) {
92 kunmap(bio_page(req->bio));
96 kunmap(bio_page(req->bio));
97 rq_flush_dcache_pages(req);
101 return BLK_STS_IOERR;
103 rq_flush_dcache_pages(req);
104 buf = kmap(bio_page(req->bio)) + bio_offset(req->bio);
105 for (; nsect > 0; nsect--, block++, buf += tr->blksize) {
106 if (tr->writesect(dev, block, buf)) {
107 kunmap(bio_page(req->bio));
108 return BLK_STS_IOERR;
111 kunmap(bio_page(req->bio));
114 return BLK_STS_IOERR;
118 int mtd_blktrans_cease_background(struct mtd_blktrans_dev *dev)
122 EXPORT_SYMBOL_GPL(mtd_blktrans_cease_background);
124 static struct request *mtd_next_request(struct mtd_blktrans_dev *dev)
128 rq = list_first_entry_or_null(&dev->rq_list, struct request, queuelist);
130 list_del_init(&rq->queuelist);
131 blk_mq_start_request(rq);
138 static void mtd_blktrans_work(struct mtd_blktrans_dev *dev)
139 __releases(&dev->queue_lock)
140 __acquires(&dev->queue_lock)
142 struct mtd_blktrans_ops *tr = dev->tr;
143 struct request *req = NULL;
144 int background_done = 0;
149 dev->bg_stop = false;
150 if (!req && !(req = mtd_next_request(dev))) {
151 if (tr->background && !background_done) {
152 spin_unlock_irq(&dev->queue_lock);
153 mutex_lock(&dev->lock);
155 mutex_unlock(&dev->lock);
156 spin_lock_irq(&dev->queue_lock);
158 * Do background processing just once per idle
161 background_done = !dev->bg_stop;
167 spin_unlock_irq(&dev->queue_lock);
169 mutex_lock(&dev->lock);
170 res = do_blktrans_request(dev->tr, dev, req);
171 mutex_unlock(&dev->lock);
173 if (!blk_update_request(req, res, blk_rq_cur_bytes(req))) {
174 __blk_mq_end_request(req, res);
179 spin_lock_irq(&dev->queue_lock);
183 static blk_status_t mtd_queue_rq(struct blk_mq_hw_ctx *hctx,
184 const struct blk_mq_queue_data *bd)
186 struct mtd_blktrans_dev *dev;
188 dev = hctx->queue->queuedata;
190 blk_mq_start_request(bd->rq);
191 return BLK_STS_IOERR;
194 spin_lock_irq(&dev->queue_lock);
195 list_add_tail(&bd->rq->queuelist, &dev->rq_list);
196 mtd_blktrans_work(dev);
197 spin_unlock_irq(&dev->queue_lock);
202 static int blktrans_open(struct block_device *bdev, fmode_t mode)
204 struct mtd_blktrans_dev *dev = blktrans_dev_get(bdev->bd_disk);
208 return -ERESTARTSYS; /* FIXME: busy loop! -arnd*/
210 mutex_lock(&mtd_table_mutex);
211 mutex_lock(&dev->lock);
217 __module_get(dev->tr->owner);
223 ret = dev->tr->open(dev);
228 ret = __get_mtd_device(dev->mtd);
231 dev->file_mode = mode;
235 mutex_unlock(&dev->lock);
236 mutex_unlock(&mtd_table_mutex);
237 blktrans_dev_put(dev);
241 if (dev->tr->release)
242 dev->tr->release(dev);
244 module_put(dev->tr->owner);
245 kref_put(&dev->ref, blktrans_dev_release);
246 mutex_unlock(&dev->lock);
247 mutex_unlock(&mtd_table_mutex);
248 blktrans_dev_put(dev);
252 static void blktrans_release(struct gendisk *disk, fmode_t mode)
254 struct mtd_blktrans_dev *dev = blktrans_dev_get(disk);
259 mutex_lock(&mtd_table_mutex);
260 mutex_lock(&dev->lock);
265 kref_put(&dev->ref, blktrans_dev_release);
266 module_put(dev->tr->owner);
269 if (dev->tr->release)
270 dev->tr->release(dev);
271 __put_mtd_device(dev->mtd);
274 mutex_unlock(&dev->lock);
275 mutex_unlock(&mtd_table_mutex);
276 blktrans_dev_put(dev);
279 static int blktrans_getgeo(struct block_device *bdev, struct hd_geometry *geo)
281 struct mtd_blktrans_dev *dev = blktrans_dev_get(bdev->bd_disk);
287 mutex_lock(&dev->lock);
292 ret = dev->tr->getgeo ? dev->tr->getgeo(dev, geo) : -ENOTTY;
294 mutex_unlock(&dev->lock);
295 blktrans_dev_put(dev);
299 static const struct block_device_operations mtd_block_ops = {
300 .owner = THIS_MODULE,
301 .open = blktrans_open,
302 .release = blktrans_release,
303 .getgeo = blktrans_getgeo,
306 static const struct blk_mq_ops mtd_mq_ops = {
307 .queue_rq = mtd_queue_rq,
310 int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
312 struct mtd_blktrans_ops *tr = new->tr;
313 struct mtd_blktrans_dev *d;
314 int last_devnum = -1;
318 if (mutex_trylock(&mtd_table_mutex)) {
319 mutex_unlock(&mtd_table_mutex);
323 mutex_lock(&blktrans_ref_mutex);
324 list_for_each_entry(d, &tr->devs, list) {
325 if (new->devnum == -1) {
326 /* Use first free number */
327 if (d->devnum != last_devnum+1) {
328 /* Found a free devnum. Plug it in here */
329 new->devnum = last_devnum+1;
330 list_add_tail(&new->list, &d->list);
333 } else if (d->devnum == new->devnum) {
334 /* Required number taken */
335 mutex_unlock(&blktrans_ref_mutex);
337 } else if (d->devnum > new->devnum) {
338 /* Required number was free */
339 list_add_tail(&new->list, &d->list);
342 last_devnum = d->devnum;
346 if (new->devnum == -1)
347 new->devnum = last_devnum+1;
349 /* Check that the device and any partitions will get valid
350 * minor numbers and that the disk naming code below can cope
351 * with this number. */
352 if (new->devnum > (MINORMASK >> tr->part_bits) ||
353 (tr->part_bits && new->devnum >= 27 * 26)) {
354 mutex_unlock(&blktrans_ref_mutex);
358 list_add_tail(&new->list, &tr->devs);
360 mutex_unlock(&blktrans_ref_mutex);
362 mutex_init(&new->lock);
363 kref_init(&new->ref);
368 new->tag_set = kzalloc(sizeof(*new->tag_set), GFP_KERNEL);
372 ret = blk_mq_alloc_sq_tag_set(new->tag_set, &mtd_mq_ops, 2,
373 BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING);
375 goto out_kfree_tag_set;
378 gd = blk_mq_alloc_disk(new->tag_set, new);
381 goto out_free_tag_set;
385 new->rq = new->disk->queue;
386 gd->private_data = new;
387 gd->major = tr->major;
388 gd->first_minor = (new->devnum) << tr->part_bits;
389 gd->minors = 1 << tr->part_bits;
390 gd->fops = &mtd_block_ops;
393 if (new->devnum < 26)
394 snprintf(gd->disk_name, sizeof(gd->disk_name),
395 "%s%c", tr->name, 'a' + new->devnum);
397 snprintf(gd->disk_name, sizeof(gd->disk_name),
399 'a' - 1 + new->devnum / 26,
400 'a' + new->devnum % 26);
402 snprintf(gd->disk_name, sizeof(gd->disk_name),
403 "%s%d", tr->name, new->devnum);
405 set_capacity(gd, ((u64)new->size * tr->blksize) >> 9);
407 /* Create the request queue */
408 spin_lock_init(&new->queue_lock);
409 INIT_LIST_HEAD(&new->rq_list);
412 blk_queue_write_cache(new->rq, true, false);
414 blk_queue_logical_block_size(new->rq, tr->blksize);
416 blk_queue_flag_set(QUEUE_FLAG_NONROT, new->rq);
417 blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, new->rq);
420 blk_queue_flag_set(QUEUE_FLAG_DISCARD, new->rq);
421 blk_queue_max_discard_sectors(new->rq, UINT_MAX);
429 device_add_disk(&new->mtd->dev, gd, NULL);
431 if (new->disk_attributes) {
432 ret = sysfs_create_group(&disk_to_dev(gd)->kobj,
433 new->disk_attributes);
439 blk_mq_free_tag_set(new->tag_set);
443 list_del(&new->list);
447 int del_mtd_blktrans_dev(struct mtd_blktrans_dev *old)
451 if (mutex_trylock(&mtd_table_mutex)) {
452 mutex_unlock(&mtd_table_mutex);
456 if (old->disk_attributes)
457 sysfs_remove_group(&disk_to_dev(old->disk)->kobj,
458 old->disk_attributes);
460 /* Stop new requests to arrive */
461 del_gendisk(old->disk);
463 /* Kill current requests */
464 spin_lock_irqsave(&old->queue_lock, flags);
465 old->rq->queuedata = NULL;
466 spin_unlock_irqrestore(&old->queue_lock, flags);
468 /* freeze+quiesce queue to ensure all requests are flushed */
469 blk_mq_freeze_queue(old->rq);
470 blk_mq_quiesce_queue(old->rq);
471 blk_mq_unquiesce_queue(old->rq);
472 blk_mq_unfreeze_queue(old->rq);
474 /* If the device is currently open, tell trans driver to close it,
475 then put mtd device, and don't touch it again */
476 mutex_lock(&old->lock);
478 if (old->tr->release)
479 old->tr->release(old);
480 __put_mtd_device(old->mtd);
485 mutex_unlock(&old->lock);
486 blktrans_dev_put(old);
490 static void blktrans_notify_remove(struct mtd_info *mtd)
492 struct mtd_blktrans_ops *tr;
493 struct mtd_blktrans_dev *dev, *next;
495 list_for_each_entry(tr, &blktrans_majors, list)
496 list_for_each_entry_safe(dev, next, &tr->devs, list)
501 static void blktrans_notify_add(struct mtd_info *mtd)
503 struct mtd_blktrans_ops *tr;
505 if (mtd->type == MTD_ABSENT)
508 list_for_each_entry(tr, &blktrans_majors, list)
509 tr->add_mtd(tr, mtd);
512 static struct mtd_notifier blktrans_notifier = {
513 .add = blktrans_notify_add,
514 .remove = blktrans_notify_remove,
517 int register_mtd_blktrans(struct mtd_blktrans_ops *tr)
519 struct mtd_info *mtd;
522 /* Register the notifier if/when the first device type is
523 registered, to prevent the link/init ordering from fucking
525 if (!blktrans_notifier.list.next)
526 register_mtd_user(&blktrans_notifier);
529 mutex_lock(&mtd_table_mutex);
531 ret = register_blkdev(tr->major, tr->name);
533 printk(KERN_WARNING "Unable to register %s block device on major %d: %d\n",
534 tr->name, tr->major, ret);
535 mutex_unlock(&mtd_table_mutex);
542 tr->blkshift = ffs(tr->blksize) - 1;
544 INIT_LIST_HEAD(&tr->devs);
545 list_add(&tr->list, &blktrans_majors);
547 mtd_for_each_device(mtd)
548 if (mtd->type != MTD_ABSENT)
549 tr->add_mtd(tr, mtd);
551 mutex_unlock(&mtd_table_mutex);
555 int deregister_mtd_blktrans(struct mtd_blktrans_ops *tr)
557 struct mtd_blktrans_dev *dev, *next;
559 mutex_lock(&mtd_table_mutex);
561 /* Remove it from the list of active majors */
564 list_for_each_entry_safe(dev, next, &tr->devs, list)
567 unregister_blkdev(tr->major, tr->name);
568 mutex_unlock(&mtd_table_mutex);
570 BUG_ON(!list_empty(&tr->devs));
574 static void __exit mtd_blktrans_exit(void)
576 /* No race here -- if someone's currently in register_mtd_blktrans
577 we're screwed anyway. */
578 if (blktrans_notifier.list.next)
579 unregister_mtd_user(&blktrans_notifier);
582 module_exit(mtd_blktrans_exit);
584 EXPORT_SYMBOL_GPL(register_mtd_blktrans);
585 EXPORT_SYMBOL_GPL(deregister_mtd_blktrans);
586 EXPORT_SYMBOL_GPL(add_mtd_blktrans_dev);
587 EXPORT_SYMBOL_GPL(del_mtd_blktrans_dev);
589 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
590 MODULE_LICENSE("GPL");
591 MODULE_DESCRIPTION("Common interface to block layer for MTD 'translation layers'");