switch (ioprio_class) {
default:
pr_err("bdi %s: bfq: bad prio class %d\n",
- bdi_dev_name(bfqq->bfqd->queue->backing_dev_info),
- ioprio_class);
+ bdi_dev_name(queue_to_disk(bfqq->bfqd->queue)->bdi),
+ ioprio_class);
fallthrough;
case IOPRIO_CLASS_NONE:
/*
const char *blkg_dev_name(struct blkcg_gq *blkg)
{
- /* some drivers (floppy) instantiate a queue w/o disk registered */
- if (blkg->q->backing_dev_info->dev)
- return bdi_dev_name(blkg->q->backing_dev_info);
- return NULL;
+ if (!queue_has_disk(blkg->q) || !queue_to_disk(blkg->q)->bdi->dev)
+ return NULL;
+ return bdi_dev_name(queue_to_disk(blkg->q)->bdi);
}
/**
*/
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/backing-dev.h>
#include <linux/bio.h>
#include <linux/blkdev.h>
#include <linux/blk-mq.h>
if (ret)
goto fail_id;
- q->backing_dev_info = bdi_alloc(node_id);
- if (!q->backing_dev_info)
- goto fail_split;
-
q->stats = blk_alloc_queue_stats();
if (!q->stats)
- goto fail_stats;
+ goto fail_split;
q->node = node_id;
if (percpu_ref_init(&q->q_usage_counter,
blk_queue_usage_counter_release,
PERCPU_REF_INIT_ATOMIC, GFP_KERNEL))
- goto fail_bdi;
+ goto fail_stats;
if (blkcg_init_queue(q))
goto fail_ref;
fail_ref:
percpu_ref_exit(&q->q_usage_counter);
-fail_bdi:
- blk_free_queue_stats(q->stats);
fail_stats:
- bdi_put(q->backing_dev_info);
+ blk_free_queue_stats(q->stats);
fail_split:
bioset_exit(&q->bio_split);
fail_id:
__blk_mq_dec_active_requests(hctx);
if (unlikely(laptop_mode && !blk_rq_is_passthrough(rq)))
- laptop_io_completion(q->backing_dev_info);
+ laptop_io_completion(queue_to_disk(q)->bdi);
rq_qos_done(q, rq);
#include <linux/bio.h>
#include <linux/blkdev.h>
#include <linux/pagemap.h>
+#include <linux/backing-dev-defs.h>
#include <linux/gcd.h>
#include <linux/lcm.h>
#include <linux/jiffies.h>
limits->logical_block_size >> SECTOR_SHIFT);
limits->max_sectors = max_sectors;
- q->backing_dev_info->io_pages = max_sectors >> (PAGE_SHIFT - 9);
+ if (!queue_has_disk(q))
+ return;
+ queue_to_disk(q)->bdi->io_pages = max_sectors >> (PAGE_SHIFT - 9);
}
EXPORT_SYMBOL(blk_queue_max_hw_sectors);
* For read-ahead of large files to be effective, we need to read ahead
* at least twice the optimal I/O size.
*/
- q->backing_dev_info->ra_pages =
+ disk->bdi->ra_pages =
max(queue_io_opt(q) * 2 / PAGE_SIZE, VM_READAHEAD_PAGES);
- q->backing_dev_info->io_pages =
- queue_max_sectors(q) >> (PAGE_SHIFT - 9);
+ disk->bdi->io_pages = queue_max_sectors(q) >> (PAGE_SHIFT - 9);
}
EXPORT_SYMBOL_GPL(disk_update_readahead);
void blk_queue_io_opt(struct request_queue *q, unsigned int opt)
{
blk_limits_io_opt(&q->limits, opt);
- q->backing_dev_info->ra_pages =
+ if (!queue_has_disk(q))
+ return;
+ queue_to_disk(q)->bdi->ra_pages =
max(queue_io_opt(q) * 2 / PAGE_SIZE, VM_READAHEAD_PAGES);
}
EXPORT_SYMBOL(blk_queue_io_opt);
static ssize_t queue_ra_show(struct request_queue *q, char *page)
{
- unsigned long ra_kb = q->backing_dev_info->ra_pages <<
- (PAGE_SHIFT - 10);
+ unsigned long ra_kb;
+ if (!queue_has_disk(q))
+ return -EINVAL;
+ ra_kb = queue_to_disk(q)->bdi->ra_pages << (PAGE_SHIFT - 10);
return queue_var_show(ra_kb, page);
}
queue_ra_store(struct request_queue *q, const char *page, size_t count)
{
unsigned long ra_kb;
- ssize_t ret = queue_var_store(&ra_kb, page, count);
+ ssize_t ret;
+ if (!queue_has_disk(q))
+ return -EINVAL;
+ ret = queue_var_store(&ra_kb, page, count);
if (ret < 0)
return ret;
-
- q->backing_dev_info->ra_pages = ra_kb >> (PAGE_SHIFT - 10);
-
+ queue_to_disk(q)->bdi->ra_pages = ra_kb >> (PAGE_SHIFT - 10);
return ret;
}
spin_lock_irq(&q->queue_lock);
q->limits.max_sectors = max_sectors_kb << 1;
- q->backing_dev_info->io_pages = max_sectors_kb >> (PAGE_SHIFT - 10);
+ if (queue_has_disk(q))
+ queue_to_disk(q)->bdi->io_pages =
+ max_sectors_kb >> (PAGE_SHIFT - 10);
spin_unlock_irq(&q->queue_lock);
return ret;
* e.g. blkcg_print_blkgs() to crash.
*/
blkcg_exit_queue(q);
-
- /*
- * Since the cgroup code may dereference the @q->backing_dev_info
- * pointer, only decrease its reference count after having removed the
- * association with the block cgroup controller.
- */
- bdi_put(q->backing_dev_info);
}
/**
*/
static bool wb_recent_wait(struct rq_wb *rwb)
{
- struct bdi_writeback *wb = &rwb->rqos.q->backing_dev_info->wb;
+ struct bdi_writeback *wb = &queue_to_disk(rwb->rqos.q)->bdi->wb;
return time_before(jiffies, wb->dirty_sleep + HZ);
}
static int latency_exceeded(struct rq_wb *rwb, struct blk_rq_stat *stat)
{
- struct backing_dev_info *bdi = rwb->rqos.q->backing_dev_info;
+ struct backing_dev_info *bdi = queue_to_disk(rwb->rqos.q)->bdi;
struct rq_depth *rqd = &rwb->rq_depth;
u64 thislat;
static void rwb_trace_step(struct rq_wb *rwb, const char *msg)
{
- struct backing_dev_info *bdi = rwb->rqos.q->backing_dev_info;
+ struct backing_dev_info *bdi = queue_to_disk(rwb->rqos.q)->bdi;
struct rq_depth *rqd = &rwb->rq_depth;
trace_wbt_step(bdi, msg, rqd->scale_step, rwb->cur_win_nsec,
status = latency_exceeded(rwb, cb->stat);
- trace_wbt_timer(rwb->rqos.q->backing_dev_info, status, rqd->scale_step,
- inflight);
+ trace_wbt_timer(queue_to_disk(rwb->rqos.q)->bdi, status,
+ rqd->scale_step, inflight);
/*
* If we exceeded the latency target, step down. If we did not,
dev_set_uevent_suppress(ddev, 0);
disk_uevent(disk, KOBJ_ADD);
- if (disk->queue->backing_dev_info->dev) {
- err = sysfs_create_link(&ddev->kobj,
- &disk->queue->backing_dev_info->dev->kobj,
- "bdi");
+ if (disk->bdi->dev) {
+ err = sysfs_create_link(&ddev->kobj, &disk->bdi->dev->kobj,
+ "bdi");
WARN_ON(err);
}
}
disk->flags |= GENHD_FL_SUPPRESS_PARTITION_INFO;
disk->flags |= GENHD_FL_NO_PART_SCAN;
} else {
- struct backing_dev_info *bdi = disk->queue->backing_dev_info;
struct device *dev = disk_to_dev(disk);
/* Register BDI before referencing it from bdev */
dev->devt = MKDEV(disk->major, disk->first_minor);
- ret = bdi_register(bdi, "%u:%u",
+ ret = bdi_register(disk->bdi, "%u:%u",
disk->major, disk->first_minor);
WARN_ON(ret);
- bdi_set_owner(bdi, dev);
+ bdi_set_owner(disk->bdi, dev);
bdev_add(disk->part0, dev->devt);
}
register_disk(parent, disk, groups);
* Unregister bdi before releasing device numbers (as they can
* get reused and we'd get clashes in sysfs).
*/
- bdi_unregister(disk->queue->backing_dev_info);
+ bdi_unregister(disk->bdi);
}
blk_unregister_queue(disk);
might_sleep();
+ bdi_put(disk->bdi);
if (MAJOR(dev->devt) == BLOCK_EXT_MAJOR)
blk_free_ext_minor(MINOR(dev->devt));
disk_release_events(disk);
if (!disk)
return NULL;
+ disk->bdi = bdi_alloc(node_id);
+ if (!disk->bdi)
+ goto out_free_disk;
+
disk->part0 = bdev_alloc(disk, 0);
if (!disk->part0)
- goto out_free_disk;
+ goto out_free_bdi;
disk->node_id = node_id;
mutex_init(&disk->open_mutex);
out_destroy_part_tbl:
xa_destroy(&disk->part_tbl);
iput(disk->part0->bd_inode);
+out_free_bdi:
+ bdi_put(disk->bdi);
out_free_disk:
kfree(disk);
return NULL;
static bool remote_due_to_read_balancing(struct drbd_device *device, sector_t sector,
enum drbd_read_balancing rbm)
{
- struct backing_dev_info *bdi;
int stripe_shift;
switch (rbm) {
case RB_CONGESTED_REMOTE:
- bdi = device->ldev->backing_bdev->bd_disk->queue->backing_dev_info;
- return bdi_read_congested(bdi);
+ return bdi_read_congested(
+ device->ldev->backing_bdev->bd_disk->bdi);
case RB_LEAST_PENDING:
return atomic_read(&device->local_cnt) >
atomic_read(&device->ap_pending_cnt) + atomic_read(&device->rs_pending_cnt);
wakeup = (pd->write_congestion_on > 0
&& pd->bio_queue_size <= pd->write_congestion_off);
spin_unlock(&pd->lock);
- if (wakeup) {
- clear_bdi_congested(pd->disk->queue->backing_dev_info,
- BLK_RW_ASYNC);
- }
+ if (wakeup)
+ clear_bdi_congested(pd->disk->bdi, BLK_RW_ASYNC);
pkt->sleep_time = max(PACKET_WAIT_TIME, 1);
pkt_set_state(pkt, PACKET_WAITING_STATE);
spin_lock(&pd->lock);
if (pd->write_congestion_on > 0
&& pd->bio_queue_size >= pd->write_congestion_on) {
- set_bdi_congested(q->backing_dev_info, BLK_RW_ASYNC);
+ set_bdi_congested(bio->bi_bdev->bd_disk->bdi, BLK_RW_ASYNC);
do {
spin_unlock(&pd->lock);
congestion_wait(BLK_RW_ASYNC, HZ);
if (!bdev->bd_openers) {
set_init_blocksize(bdev);
if (bdev->bd_bdi == &noop_backing_dev_info)
- bdev->bd_bdi = bdi_get(disk->queue->backing_dev_info);
+ bdev->bd_bdi = bdi_get(disk->bdi);
}
if (test_bit(GD_NEED_PART_SCAN, &disk->state))
bdev_disk_changed(disk, false);
disk->open_partitions++;
set_init_blocksize(part);
if (part->bd_bdi == &noop_backing_dev_info)
- part->bd_bdi = bdi_get(disk->queue->backing_dev_info);
+ part->bd_bdi = bdi_get(disk->bdi);
done:
part->bd_openers++;
return 0;
#include <linux/blkdev.h>
#include <linux/sched/signal.h>
+#include <linux/backing-dev-defs.h>
#include "fat.h"
struct fatent_operations {
#include <linux/minmax.h>
#include <linux/timer.h>
#include <linux/workqueue.h>
-#include <linux/backing-dev-defs.h>
#include <linux/wait.h>
#include <linux/mempool.h>
#include <linux/pfn.h>
struct blk_mq_hw_ctx **queue_hw_ctx;
unsigned int nr_hw_queues;
- struct backing_dev_info *backing_dev_info;
-
/*
* The queue owner gets to use this for whatever they like.
* ll_rw_blk doesn't touch it.
struct mutex open_mutex; /* open/close mutex */
unsigned open_partitions; /* number of open partitions */
+ struct backing_dev_info *bdi;
struct kobject *slave_dir;
#ifdef CONFIG_BLOCK_HOLDER_DEPRECATED
struct list_head slave_bdevs;