lim->alignment_offset = 0;
lim->io_opt = 0;
lim->misaligned = 0;
- lim->zoned = BLK_ZONED_NONE;
+ lim->zoned = false;
lim->zone_write_granularity = 0;
lim->dma_alignment = 511;
}
}
EXPORT_SYMBOL_GPL(blk_queue_can_use_dma_map_merging);
-static bool disk_has_partitions(struct gendisk *disk)
-{
- unsigned long idx;
- struct block_device *part;
- bool ret = false;
-
- rcu_read_lock();
- xa_for_each(&disk->part_tbl, idx, part) {
- if (bdev_is_partition(part)) {
- ret = true;
- break;
- }
- }
- rcu_read_unlock();
-
- return ret;
-}
-
/**
* disk_set_zoned - configure the zoned model for a disk
* @disk: the gendisk of the queue to configure
- * @model: the zoned model to set
- *
- * Set the zoned model of @disk to @model.
+ * @zoned: zoned or not.
*
- * When @model is BLK_ZONED_HM (host managed), this should be called only
- * if zoned block device support is enabled (CONFIG_BLK_DEV_ZONED option).
- * If @model specifies BLK_ZONED_HA (host aware), the effective model used
- * depends on CONFIG_BLK_DEV_ZONED settings and on the existence of partitions
- * on the disk.
+ * When @zoned is %true, this should be called only if zoned block device
+ * support is enabled (CONFIG_BLK_DEV_ZONED option).
*/
-void disk_set_zoned(struct gendisk *disk, enum blk_zoned_model model)
+void disk_set_zoned(struct gendisk *disk, bool zoned)
{
struct request_queue *q = disk->queue;
- unsigned int old_model = q->limits.zoned;
- switch (model) {
- case BLK_ZONED_HM:
- /*
- * Host managed devices are supported only if
- * CONFIG_BLK_DEV_ZONED is enabled.
- */
+ if (zoned) {
WARN_ON_ONCE(!IS_ENABLED(CONFIG_BLK_DEV_ZONED));
- break;
- case BLK_ZONED_HA:
- /*
- * Host aware devices can be treated either as regular block
- * devices (similar to drive managed devices) or as zoned block
- * devices to take advantage of the zone command set, similarly
- * to host managed devices. We try the latter if there are no
- * partitions and zoned block device support is enabled, else
- * we do nothing special as far as the block layer is concerned.
- */
- if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED) ||
- disk_has_partitions(disk))
- model = BLK_ZONED_NONE;
- break;
- case BLK_ZONED_NONE:
- default:
- if (WARN_ON_ONCE(model != BLK_ZONED_NONE))
- model = BLK_ZONED_NONE;
- break;
- }
- q->limits.zoned = model;
- if (model != BLK_ZONED_NONE) {
/*
* Set the zone write granularity to the device logical block
* size by default. The driver can change this value if needed.
*/
+ q->limits.zoned = true;
blk_queue_zone_write_granularity(q,
queue_logical_block_size(q));
- } else if (old_model != BLK_ZONED_NONE) {
+ } else if (q->limits.zoned) {
+ q->limits.zoned = false;
disk_clear_zone_settings(disk);
}
}
static ssize_t queue_zoned_show(struct request_queue *q, char *page)
{
- switch (blk_queue_zoned_model(q)) {
- case BLK_ZONED_HA:
- return sprintf(page, "host-aware\n");
- case BLK_ZONED_HM:
+ if (blk_queue_is_zoned(q))
return sprintf(page, "host-managed\n");
- default:
- return sprintf(page, "none\n");
- }
+ return sprintf(page, "none\n");
}
static ssize_t queue_nr_zones_show(struct request_queue *q, char *page)
* Partitions are not supported on zoned block devices that are used as
* such.
*/
- switch (disk->queue->limits.zoned) {
- case BLK_ZONED_HM:
+ if (bdev_is_zoned(disk->part0)) {
pr_warn("%s: partitions not supported on host managed zoned block device\n",
disk->disk_name);
return ERR_PTR(-ENXIO);
- case BLK_ZONED_HA:
- pr_info("%s: disabling host aware zoned block device support due to partitions\n",
- disk->disk_name);
- disk_set_zoned(disk, BLK_ZONED_NONE);
- break;
- case BLK_ZONED_NONE:
- break;
}
if (xa_load(&disk->part_tbl, partno))
/*
* Partitions are not supported on host managed zoned block devices.
*/
- if (disk->queue->limits.zoned == BLK_ZONED_HM) {
+ if (bdev_is_zoned(disk->part0)) {
pr_warn("%s: ignoring partition table on host managed zoned block device\n",
disk->disk_name);
ret = 0;
struct nullb_device *dev = nullb->dev;
struct request_queue *q = nullb->q;
- disk_set_zoned(nullb->disk, BLK_ZONED_HM);
+ disk_set_zoned(nullb->disk, true);
blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, q);
blk_queue_required_elevator_features(q, ELEVATOR_F_ZBD_SEQ_WRITE);
blk_queue_chunk_sectors(q, dev->zone_size_sects);
{
const struct ublk_param_zoned *p = &ub->params.zoned;
- disk_set_zoned(ub->ub_disk, BLK_ZONED_HM);
+ disk_set_zoned(ub->ub_disk, true);
blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, ub->ub_disk->queue);
blk_queue_required_elevator_features(ub->ub_disk->queue,
ELEVATOR_F_ZBD_SEQ_WRITE);
dev_dbg(&vdev->dev, "probing host-managed zoned device\n");
- disk_set_zoned(vblk->disk, BLK_ZONED_HM);
+ disk_set_zoned(vblk->disk, true);
blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, q);
virtio_cread(vdev, struct virtio_blk_config,
*/
if (!(job->flags & BIT(DM_KCOPYD_WRITE_SEQ))) {
for (i = 0; i < job->num_dests; i++) {
- if (bdev_zoned_model(dests[i].bdev) == BLK_ZONED_HM) {
+ if (bdev_is_zoned(dests[i].bdev)) {
job->flags |= BIT(DM_KCOPYD_WRITE_SEQ);
break;
}
return true;
}
-static int device_not_zoned_model(struct dm_target *ti, struct dm_dev *dev,
- sector_t start, sector_t len, void *data)
+static int device_not_zoned(struct dm_target *ti, struct dm_dev *dev,
+ sector_t start, sector_t len, void *data)
{
- struct request_queue *q = bdev_get_queue(dev->bdev);
- enum blk_zoned_model *zoned_model = data;
+ bool *zoned = data;
- return blk_queue_zoned_model(q) != *zoned_model;
+ return bdev_is_zoned(dev->bdev) != *zoned;
}
static int device_is_zoned_model(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data)
{
- struct request_queue *q = bdev_get_queue(dev->bdev);
-
- return blk_queue_zoned_model(q) != BLK_ZONED_NONE;
+ return bdev_is_zoned(dev->bdev);
}
/*
* has the DM_TARGET_MIXED_ZONED_MODEL feature set, the devices can have any
* zoned model with all zoned devices having the same zone size.
*/
-static bool dm_table_supports_zoned_model(struct dm_table *t,
- enum blk_zoned_model zoned_model)
+static bool dm_table_supports_zoned(struct dm_table *t, bool zoned)
{
for (unsigned int i = 0; i < t->num_targets; i++) {
struct dm_target *ti = dm_table_get_target(t, i);
if (dm_target_supports_zoned_hm(ti->type)) {
if (!ti->type->iterate_devices ||
- ti->type->iterate_devices(ti, device_not_zoned_model,
- &zoned_model))
+ ti->type->iterate_devices(ti, device_not_zoned,
+ &zoned))
return false;
} else if (!dm_target_supports_mixed_zoned_model(ti->type)) {
- if (zoned_model == BLK_ZONED_HM)
+ if (zoned)
return false;
}
}
* zone sectors, if the destination device is a zoned block device, it shall
* have the specified zone_sectors.
*/
-static int validate_hardware_zoned_model(struct dm_table *t,
- enum blk_zoned_model zoned_model,
- unsigned int zone_sectors)
+static int validate_hardware_zoned(struct dm_table *t, bool zoned,
+ unsigned int zone_sectors)
{
- if (zoned_model == BLK_ZONED_NONE)
+ if (!zoned)
return 0;
- if (!dm_table_supports_zoned_model(t, zoned_model)) {
+ if (!dm_table_supports_zoned(t, zoned)) {
DMERR("%s: zoned model is not consistent across all devices",
dm_device_name(t->md));
return -EINVAL;
struct queue_limits *limits)
{
struct queue_limits ti_limits;
- enum blk_zoned_model zoned_model = BLK_ZONED_NONE;
unsigned int zone_sectors = 0;
+ bool zoned = false;
blk_set_stacking_limits(limits);
ti->type->iterate_devices(ti, dm_set_device_limits,
&ti_limits);
- if (zoned_model == BLK_ZONED_NONE && ti_limits.zoned != BLK_ZONED_NONE) {
+ if (!zoned && ti_limits.zoned) {
/*
* After stacking all limits, validate all devices
* in table support this zoned model and zone sectors.
*/
- zoned_model = ti_limits.zoned;
+ zoned = ti_limits.zoned;
zone_sectors = ti_limits.chunk_sectors;
}
* Verify that the zoned model and zone sectors, as determined before
* any .io_hints override, are the same across all devices in the table.
* - this is especially relevant if .io_hints is emulating a disk-managed
- * zoned model (aka BLK_ZONED_NONE) on host-managed zoned block devices.
+ * zoned model on host-managed zoned block devices.
* BUT...
*/
- if (limits->zoned != BLK_ZONED_NONE) {
+ if (limits->zoned) {
/*
* ...IF the above limits stacking determined a zoned model
* validate that all of the table's devices conform to it.
*/
- zoned_model = limits->zoned;
+ zoned = limits->zoned;
zone_sectors = limits->chunk_sectors;
}
- if (validate_hardware_zoned_model(t, zoned_model, zone_sectors))
+ if (validate_hardware_zoned(t, zoned, zone_sectors))
return -EINVAL;
return validate_hardware_logical_block_alignment(t, limits);
{
struct dmz_dev *dev = &zmd->dev[num];
- if (bdev_zoned_model(dev->bdev) == BLK_ZONED_NONE)
+ if (!bdev_is_zoned(dev->bdev))
dmz_dev_info(dev, "Regular block device");
else
- dmz_dev_info(dev, "Host-%s zoned block device",
- bdev_zoned_model(dev->bdev) == BLK_ZONED_HA ?
- "aware" : "managed");
+ dmz_dev_info(dev, "Host-managed zoned block device");
+
if (zmd->sb_version > 1) {
sector_t sector_offset =
dev->zone_offset << zmd->zone_nr_sectors_shift;
}
bdev = ddev->bdev;
- if (bdev_zoned_model(bdev) == BLK_ZONED_NONE) {
+ if (!bdev_is_zoned(bdev)) {
if (nr_devs == 1) {
ti->error = "Invalid regular device";
goto err;
limits->max_sectors = chunk_sectors;
/* We are exposing a drive-managed zoned block device */
- limits->zoned = BLK_ZONED_NONE;
+ limits->zoned = false;
}
/*
goto free_data;
}
- disk_set_zoned(ns->disk, BLK_ZONED_HM);
+ disk_set_zoned(ns->disk, true);
blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, q);
disk_set_max_open_zones(ns->disk, le32_to_cpu(id->mor) + 1);
disk_set_max_active_zones(ns->disk, le32_to_cpu(id->mar) + 1);
bool used;
/* For ZBC devices */
- enum blk_zoned_model zmodel;
+ bool zoned;
unsigned int zcap;
unsigned int zsize;
unsigned int zsize_shift;
static bool sdebug_statistics = DEF_STATISTICS;
static bool sdebug_wp;
static bool sdebug_allow_restart;
-/* Following enum: 0: no zbc, def; 1: host aware; 2: host managed */
-static enum blk_zoned_model sdeb_zbc_model = BLK_ZONED_NONE;
+static enum {
+ BLK_ZONED_NONE = 0,
+ BLK_ZONED_HA = 1,
+ BLK_ZONED_HM = 2,
+} sdeb_zbc_model = BLK_ZONED_NONE;
static char *sdeb_zbc_model_s;
enum sam_lun_addr_method {SAM_LUN_AM_PERIPHERAL = 0x0,
arr[1] = 1; /* non rotating medium (e.g. solid state) */
arr[2] = 0;
arr[3] = 5; /* less than 1.8" */
- if (devip->zmodel == BLK_ZONED_HA)
- arr[4] = 1 << 4; /* zoned field = 01b */
return 0x3c;
}
if (! arr)
return DID_REQUEUE << 16;
is_disk = (sdebug_ptype == TYPE_DISK);
- is_zbc = (devip->zmodel != BLK_ZONED_NONE);
+ is_zbc = devip->zoned;
is_disk_zbc = (is_disk || is_zbc);
have_wlun = scsi_is_wlun(scp->device->lun);
if (have_wlun)
* Since the scsi_debug READ CAPACITY implementation always reports the
* total disk capacity, set RC BASIS = 1 for host-managed ZBC devices.
*/
- if (devip->zmodel == BLK_ZONED_HM)
+ if (devip->zoned)
arr[12] |= 1 << 4;
arr[15] = sdebug_lowest_aligned & 0xff;
msense_6 = (MODE_SENSE == cmd[0]);
llbaa = msense_6 ? false : !!(cmd[1] & 0x10);
is_disk = (sdebug_ptype == TYPE_DISK);
- is_zbc = (devip->zmodel != BLK_ZONED_NONE);
+ is_zbc = devip->zoned;
if ((is_disk || is_zbc) && !dbd)
bd_len = llbaa ? 16 : 8;
else
struct sdeb_zone_state *zsp_end = zbc_zone(devip, lba + num - 1);
if (!write) {
- if (devip->zmodel == BLK_ZONED_HA)
- return 0;
/* For host-managed, reads cannot cross zone types boundaries */
if (zsp->z_type != zsp_end->z_type) {
mk_sense_buffer(scp, ILLEGAL_REQUEST,
if (devip->zcap < devip->zsize)
devip->nr_zones += devip->nr_seq_zones;
- if (devip->zmodel == BLK_ZONED_HM) {
+ if (devip->zoned) {
/* zbc_max_open_zones can be 0, meaning "not reported" */
if (sdeb_zbc_max_open >= devip->nr_zones - 1)
devip->max_open = (devip->nr_zones - 1) / 2;
zsp->z_size =
min_t(u64, devip->zsize, capacity - zstart);
} else if ((zstart & (devip->zsize - 1)) == 0) {
- if (devip->zmodel == BLK_ZONED_HM)
+ if (devip->zoned)
zsp->z_type = ZBC_ZTYPE_SWR;
else
zsp->z_type = ZBC_ZTYPE_SWP;
}
devip->sdbg_host = sdbg_host;
if (sdeb_zbc_in_use) {
- devip->zmodel = sdeb_zbc_model;
+ devip->zoned = sdeb_zbc_model == BLK_ZONED_HM;
if (sdebug_device_create_zones(devip)) {
kfree(devip);
return NULL;
}
} else {
- devip->zmodel = BLK_ZONED_NONE;
+ devip->zoned = false;
}
devip->create_ts = ktime_get_boottime();
atomic_set(&devip->stopped, (sdeb_tur_ms_to_ready > 0 ? 2 : 0));
struct request_queue *q = sdkp->disk->queue;
struct scsi_vpd *vpd;
u16 rot;
- u8 zoned;
rcu_read_lock();
vpd = rcu_dereference(sdkp->device->vpd_pgb1);
}
rot = get_unaligned_be16(&vpd->data[4]);
- zoned = (vpd->data[8] >> 4) & 3;
+ sdkp->zoned = (vpd->data[8] >> 4) & 3;
rcu_read_unlock();
if (rot == 1) {
if (sdkp->device->type == TYPE_ZBC) {
/*
- * Host-managed: Per ZBC and ZAC specifications, writes in
- * sequential write required zones of host-managed devices must
- * be aligned to the device physical block size.
+ * Host-managed.
+ */
+ disk_set_zoned(sdkp->disk, true);
+
+ /*
+ * Per ZBC and ZAC specifications, writes in sequential write
+ * required zones of host-managed devices must be aligned to
+ * the device physical block size.
*/
- disk_set_zoned(sdkp->disk, BLK_ZONED_HM);
blk_queue_zone_write_granularity(q, sdkp->physical_block_size);
} else {
- sdkp->zoned = zoned;
- if (sdkp->zoned == 1) {
- /* Host-aware */
- disk_set_zoned(sdkp->disk, BLK_ZONED_HA);
- } else {
- /* Regular disk or drive managed disk */
- disk_set_zoned(sdkp->disk, BLK_ZONED_NONE);
- }
+ /*
+ * Anything else. This includes host-aware device that we treat
+ * as conventional.
+ */
+ disk_set_zoned(sdkp->disk, false);
}
if (!sdkp->first_scan)
return;
- if (blk_queue_is_zoned(q)) {
- sd_printk(KERN_NOTICE, sdkp, "Host-%s zoned block device\n",
- q->limits.zoned == BLK_ZONED_HM ? "managed" : "aware");
- } else {
- if (sdkp->zoned == 1)
- sd_printk(KERN_NOTICE, sdkp,
- "Host-aware SMR disk used as regular disk\n");
- else if (sdkp->zoned == 2)
- sd_printk(KERN_NOTICE, sdkp,
- "Drive-managed SMR disk\n");
- }
+ if (blk_queue_is_zoned(q))
+ sd_printk(KERN_NOTICE, sdkp, "Host-managed zoned block device\n");
+ else if (sdkp->zoned == 1)
+ sd_printk(KERN_NOTICE, sdkp, "Host-aware SMR disk used as regular disk\n");
+ else if (sdkp->zoned == 2)
+ sd_printk(KERN_NOTICE, sdkp, "Drive-managed SMR disk\n");
}
/**
/*
* For all zoned disks, initialize zone append emulation data if not
- * already done. This is necessary also for host-aware disks used as
- * regular disks due to the presence of partitions as these partitions
- * may be deleted and the disk zoned model changed back from
- * BLK_ZONED_NONE to BLK_ZONED_HA.
+ * already done.
*/
if (sd_is_zoned(sdkp) && !sdkp->zone_wp_update_buf) {
ret = sd_zbc_init_disk(sdkp);
sdkp->device->use_10_for_rw = 0;
sdkp->device->use_16_for_sync = 1;
- if (!blk_queue_is_zoned(q)) {
- /*
- * This can happen for a host aware disk with partitions.
- * The block device zone model was already cleared by
- * disk_set_zoned(). Only free the scsi disk zone
- * information and exit early.
- */
- sd_zbc_free_zone_info(sdkp);
- return 0;
- }
-
/* Check zoned block device characteristics (unconstrained reads) */
ret = sd_zbc_check_zoned_characteristics(sdkp, buf);
if (ret)
kvfree(zones);
- switch (bdev_zoned_model(bdev)) {
- case BLK_ZONED_HM:
+ if (bdev_is_zoned(bdev)) {
model = "host-managed zoned";
emulated = "";
- break;
- case BLK_ZONED_HA:
- model = "host-aware zoned";
- emulated = "";
- break;
- case BLK_ZONED_NONE:
+ } else {
model = "regular";
emulated = "emulated ";
- break;
- default:
- /* Just in case */
- btrfs_err_in_rcu(fs_info, "zoned: unsupported model %d on %s",
- bdev_zoned_model(bdev),
- rcu_str_deref(device->name));
- ret = -EOPNOTSUPP;
- goto out_free_zone_info;
}
btrfs_info_in_rcu(fs_info,
out:
kvfree(zones);
-out_free_zone_info:
btrfs_destroy_dev_zone_info(device);
-
return ret;
}
struct btrfs_device *device;
list_for_each_entry(device, &fs_info->fs_devices->devices, dev_list) {
- if (device->bdev &&
- bdev_zoned_model(device->bdev) == BLK_ZONED_HM) {
+ if (device->bdev && bdev_is_zoned(device->bdev)) {
btrfs_err(fs_info,
"zoned: mode not enabled but zoned device found: %pg",
device->bdev);
}
/* Do not allow Host Manged zoned device */
- return bdev_zoned_model(bdev) != BLK_ZONED_HM;
+ return !bdev_is_zoned(bdev);
}
static inline bool btrfs_check_super_location(struct btrfs_device *device, u64 pos)
}
blkaddr -= FDEV(devi).start_blk;
}
- return bdev_zoned_model(FDEV(devi).bdev) == BLK_ZONED_HM &&
+ return bdev_is_zoned(FDEV(devi).bdev) &&
f2fs_blkz_is_seq(sbi, devi, blkaddr) &&
(blkaddr % sbi->blocks_per_blkz == sbi->blocks_per_blkz - 1);
}
sbi->aligned_blksize = false;
#ifdef CONFIG_BLK_DEV_ZONED
- if (bdev_zoned_model(FDEV(i).bdev) == BLK_ZONED_HM &&
- !f2fs_sb_has_blkzoned(sbi)) {
- f2fs_err(sbi, "Zoned block device feature not enabled");
- return -EINVAL;
- }
- if (bdev_zoned_model(FDEV(i).bdev) != BLK_ZONED_NONE) {
+ if (bdev_is_zoned(FDEV(i).bdev)) {
+ if (!f2fs_sb_has_blkzoned(sbi)) {
+ f2fs_err(sbi, "Zoned block device feature not enabled");
+ return -EINVAL;
+ }
if (init_blkz_info(sbi, i)) {
f2fs_err(sbi, "Failed to initialize F2FS blkzone information");
return -EINVAL;
}
if (max_devices == 1)
break;
- f2fs_info(sbi, "Mount Device [%2d]: %20s, %8u, %8x - %8x (zone: %s)",
+ f2fs_info(sbi, "Mount Device [%2d]: %20s, %8u, %8x - %8x (zone: Host-managed)",
i, FDEV(i).path,
FDEV(i).total_segments,
- FDEV(i).start_blk, FDEV(i).end_blk,
- bdev_zoned_model(FDEV(i).bdev) == BLK_ZONED_HA ?
- "Host-aware" : "Host-managed");
+ FDEV(i).start_blk, FDEV(i).end_blk);
continue;
}
#endif
return op == REQ_OP_DRV_IN || op == REQ_OP_DRV_OUT;
}
-/*
- * Zoned block device models (zoned limit).
- *
- * Note: This needs to be ordered from the least to the most severe
- * restrictions for the inheritance in blk_stack_limits() to work.
- */
-enum blk_zoned_model {
- BLK_ZONED_NONE = 0, /* Regular block device */
- BLK_ZONED_HA, /* Host-aware zoned block device */
- BLK_ZONED_HM, /* Host-managed zoned block device */
-};
-
/*
* BLK_BOUNCE_NONE: never bounce (default)
* BLK_BOUNCE_HIGH: bounce all highmem pages
unsigned char misaligned;
unsigned char discard_misaligned;
unsigned char raid_partial_stripes_expensive;
- enum blk_zoned_model zoned;
+ bool zoned;
/*
* Drivers that set dma_alignment to less than 511 must be prepared to
typedef int (*report_zones_cb)(struct blk_zone *zone, unsigned int idx,
void *data);
-void disk_set_zoned(struct gendisk *disk, enum blk_zoned_model model);
+void disk_set_zoned(struct gendisk *disk, bool zoned);
#define BLK_ALL_ZONES ((unsigned int)-1)
int blkdev_report_zones(struct block_device *bdev, sector_t sector,
}
#endif
-static inline enum blk_zoned_model
-blk_queue_zoned_model(struct request_queue *q)
-{
- if (IS_ENABLED(CONFIG_BLK_DEV_ZONED))
- return q->limits.zoned;
- return BLK_ZONED_NONE;
-}
-
static inline bool blk_queue_is_zoned(struct request_queue *q)
{
- switch (blk_queue_zoned_model(q)) {
- case BLK_ZONED_HA:
- case BLK_ZONED_HM:
- return true;
- default:
- return false;
- }
+ return IS_ENABLED(CONFIG_BLK_DEV_ZONED) && q->limits.zoned;
}
#ifdef CONFIG_BLK_DEV_ZONED
return test_bit(QUEUE_FLAG_NOWAIT, &bdev_get_queue(bdev)->queue_flags);
}
-static inline enum blk_zoned_model bdev_zoned_model(struct block_device *bdev)
-{
- return blk_queue_zoned_model(bdev_get_queue(bdev));
-}
-
static inline bool bdev_is_zoned(struct block_device *bdev)
{
return blk_queue_is_zoned(bdev_get_queue(bdev));