1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/vmalloc.h>
5 #define CREATE_TRACE_POINTS
6 #include "null_blk_trace.h"
8 /* zone_size in MBs to sectors. */
9 #define ZONE_SIZE_SHIFT 11
11 static inline unsigned int null_zone_no(struct nullb_device *dev, sector_t sect)
13 return sect >> ilog2(dev->zone_size_sects);
16 int null_init_zoned_dev(struct nullb_device *dev, struct request_queue *q)
18 sector_t dev_size = (sector_t)dev->size * 1024 * 1024;
22 if (!is_power_of_2(dev->zone_size)) {
23 pr_err("zone_size must be power-of-two\n");
26 if (dev->zone_size > dev->size) {
27 pr_err("Zone size larger than device capacity\n");
31 if (!dev->zone_capacity)
32 dev->zone_capacity = dev->zone_size;
34 if (dev->zone_capacity > dev->zone_size) {
35 pr_err("null_blk: zone capacity (%lu MB) larger than zone size (%lu MB)\n",
36 dev->zone_capacity, dev->zone_size);
40 dev->zone_size_sects = dev->zone_size << ZONE_SIZE_SHIFT;
41 dev->nr_zones = dev_size >>
42 (SECTOR_SHIFT + ilog2(dev->zone_size_sects));
43 dev->zones = kvmalloc_array(dev->nr_zones, sizeof(struct blk_zone),
44 GFP_KERNEL | __GFP_ZERO);
48 if (dev->zone_nr_conv >= dev->nr_zones) {
49 dev->zone_nr_conv = dev->nr_zones - 1;
50 pr_info("changed the number of conventional zones to %u",
54 for (i = 0; i < dev->zone_nr_conv; i++) {
55 struct blk_zone *zone = &dev->zones[i];
58 zone->len = dev->zone_size_sects;
59 zone->capacity = zone->len;
60 zone->wp = zone->start + zone->len;
61 zone->type = BLK_ZONE_TYPE_CONVENTIONAL;
62 zone->cond = BLK_ZONE_COND_NOT_WP;
64 sector += dev->zone_size_sects;
67 for (i = dev->zone_nr_conv; i < dev->nr_zones; i++) {
68 struct blk_zone *zone = &dev->zones[i];
70 zone->start = zone->wp = sector;
71 zone->len = dev->zone_size_sects;
72 zone->capacity = dev->zone_capacity << ZONE_SIZE_SHIFT;
73 zone->type = BLK_ZONE_TYPE_SEQWRITE_REQ;
74 zone->cond = BLK_ZONE_COND_EMPTY;
76 sector += dev->zone_size_sects;
79 q->limits.zoned = BLK_ZONED_HM;
80 blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, q);
81 blk_queue_required_elevator_features(q, ELEVATOR_F_ZBD_SEQ_WRITE);
86 int null_register_zoned_dev(struct nullb *nullb)
88 struct nullb_device *dev = nullb->dev;
89 struct request_queue *q = nullb->q;
92 int ret = blk_revalidate_disk_zones(nullb->disk, NULL);
97 blk_queue_chunk_sectors(q, dev->zone_size_sects);
98 q->nr_zones = blkdev_nr_zones(nullb->disk);
101 blk_queue_max_zone_append_sectors(q, dev->zone_size_sects);
106 void null_free_zoned_dev(struct nullb_device *dev)
111 int null_report_zones(struct gendisk *disk, sector_t sector,
112 unsigned int nr_zones, report_zones_cb cb, void *data)
114 struct nullb *nullb = disk->private_data;
115 struct nullb_device *dev = nullb->dev;
116 unsigned int first_zone, i;
117 struct blk_zone zone;
120 first_zone = null_zone_no(dev, sector);
121 if (first_zone >= dev->nr_zones)
124 nr_zones = min(nr_zones, dev->nr_zones - first_zone);
125 trace_nullb_report_zones(nullb, nr_zones);
127 for (i = 0; i < nr_zones; i++) {
129 * Stacked DM target drivers will remap the zone information by
130 * modifying the zone information passed to the report callback.
131 * So use a local copy to avoid corruption of the device zone
134 memcpy(&zone, &dev->zones[first_zone + i],
135 sizeof(struct blk_zone));
136 error = cb(&zone, i, data);
144 size_t null_zone_valid_read_len(struct nullb *nullb,
145 sector_t sector, unsigned int len)
147 struct nullb_device *dev = nullb->dev;
148 struct blk_zone *zone = &dev->zones[null_zone_no(dev, sector)];
149 unsigned int nr_sectors = len >> SECTOR_SHIFT;
151 /* Read must be below the write pointer position */
152 if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL ||
153 sector + nr_sectors <= zone->wp)
156 if (sector > zone->wp)
159 return (zone->wp - sector) << SECTOR_SHIFT;
162 static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector,
163 unsigned int nr_sectors, bool append)
165 struct nullb_device *dev = cmd->nq->dev;
166 unsigned int zno = null_zone_no(dev, sector);
167 struct blk_zone *zone = &dev->zones[zno];
170 trace_nullb_zone_op(cmd, zno, zone->cond);
172 if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
173 return null_process_cmd(cmd, REQ_OP_WRITE, sector, nr_sectors);
175 switch (zone->cond) {
176 case BLK_ZONE_COND_FULL:
177 /* Cannot write to a full zone */
178 return BLK_STS_IOERR;
179 case BLK_ZONE_COND_EMPTY:
180 case BLK_ZONE_COND_IMP_OPEN:
181 case BLK_ZONE_COND_EXP_OPEN:
182 case BLK_ZONE_COND_CLOSED:
184 * Regular writes must be at the write pointer position.
185 * Zone append writes are automatically issued at the write
186 * pointer and the position returned using the request or BIO
192 cmd->bio->bi_iter.bi_sector = sector;
194 cmd->rq->__sector = sector;
195 } else if (sector != zone->wp) {
196 return BLK_STS_IOERR;
199 if (zone->wp + nr_sectors > zone->start + zone->capacity)
200 return BLK_STS_IOERR;
202 if (zone->cond != BLK_ZONE_COND_EXP_OPEN)
203 zone->cond = BLK_ZONE_COND_IMP_OPEN;
205 ret = null_process_cmd(cmd, REQ_OP_WRITE, sector, nr_sectors);
206 if (ret != BLK_STS_OK)
209 zone->wp += nr_sectors;
210 if (zone->wp == zone->start + zone->capacity)
211 zone->cond = BLK_ZONE_COND_FULL;
214 /* Invalid zone condition */
215 return BLK_STS_IOERR;
219 static blk_status_t null_zone_mgmt(struct nullb_cmd *cmd, enum req_opf op,
222 struct nullb_device *dev = cmd->nq->dev;
223 unsigned int zone_no = null_zone_no(dev, sector);
224 struct blk_zone *zone = &dev->zones[zone_no];
228 case REQ_OP_ZONE_RESET_ALL:
229 for (i = 0; i < dev->nr_zones; i++) {
230 if (zone[i].type == BLK_ZONE_TYPE_CONVENTIONAL)
232 zone[i].cond = BLK_ZONE_COND_EMPTY;
233 zone[i].wp = zone[i].start;
236 case REQ_OP_ZONE_RESET:
237 if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
238 return BLK_STS_IOERR;
240 zone->cond = BLK_ZONE_COND_EMPTY;
241 zone->wp = zone->start;
243 case REQ_OP_ZONE_OPEN:
244 if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
245 return BLK_STS_IOERR;
246 if (zone->cond == BLK_ZONE_COND_FULL)
247 return BLK_STS_IOERR;
249 zone->cond = BLK_ZONE_COND_EXP_OPEN;
251 case REQ_OP_ZONE_CLOSE:
252 if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
253 return BLK_STS_IOERR;
254 if (zone->cond == BLK_ZONE_COND_FULL)
255 return BLK_STS_IOERR;
257 if (zone->wp == zone->start)
258 zone->cond = BLK_ZONE_COND_EMPTY;
260 zone->cond = BLK_ZONE_COND_CLOSED;
262 case REQ_OP_ZONE_FINISH:
263 if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
264 return BLK_STS_IOERR;
266 zone->cond = BLK_ZONE_COND_FULL;
267 zone->wp = zone->start + zone->len;
270 return BLK_STS_NOTSUPP;
273 trace_nullb_zone_op(cmd, zone_no, zone->cond);
277 blk_status_t null_process_zoned_cmd(struct nullb_cmd *cmd, enum req_opf op,
278 sector_t sector, sector_t nr_sectors)
282 return null_zone_write(cmd, sector, nr_sectors, false);
283 case REQ_OP_ZONE_APPEND:
284 return null_zone_write(cmd, sector, nr_sectors, true);
285 case REQ_OP_ZONE_RESET:
286 case REQ_OP_ZONE_RESET_ALL:
287 case REQ_OP_ZONE_OPEN:
288 case REQ_OP_ZONE_CLOSE:
289 case REQ_OP_ZONE_FINISH:
290 return null_zone_mgmt(cmd, op, sector);
292 return null_process_cmd(cmd, op, sector, nr_sectors);