1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/vmalloc.h>
5 #define CREATE_TRACE_POINTS
6 #include "null_blk_trace.h"
8 /* zone_size in MBs to sectors. */
9 #define ZONE_SIZE_SHIFT 11
11 static inline unsigned int null_zone_no(struct nullb_device *dev, sector_t sect)
13 return sect >> ilog2(dev->zone_size_sects);
16 int null_init_zoned_dev(struct nullb_device *dev, struct request_queue *q)
18 sector_t dev_size = (sector_t)dev->size * 1024 * 1024;
22 if (!is_power_of_2(dev->zone_size)) {
23 pr_err("zone_size must be power-of-two\n");
26 if (dev->zone_size > dev->size) {
27 pr_err("Zone size larger than device capacity\n");
31 if (!dev->zone_capacity)
32 dev->zone_capacity = dev->zone_size;
34 if (dev->zone_capacity > dev->zone_size) {
35 pr_err("null_blk: zone capacity (%lu MB) larger than zone size (%lu MB)\n",
36 dev->zone_capacity, dev->zone_size);
40 dev->zone_size_sects = dev->zone_size << ZONE_SIZE_SHIFT;
41 dev->nr_zones = dev_size >>
42 (SECTOR_SHIFT + ilog2(dev->zone_size_sects));
43 dev->zones = kvmalloc_array(dev->nr_zones, sizeof(struct blk_zone),
44 GFP_KERNEL | __GFP_ZERO);
48 if (dev->zone_nr_conv >= dev->nr_zones) {
49 dev->zone_nr_conv = dev->nr_zones - 1;
50 pr_info("changed the number of conventional zones to %u",
54 /* Max active zones has to be < nbr of seq zones in order to be enforceable */
55 if (dev->zone_max_active >= dev->nr_zones - dev->zone_nr_conv) {
56 dev->zone_max_active = 0;
57 pr_info("zone_max_active limit disabled, limit >= zone count\n");
60 /* Max open zones has to be <= max active zones */
61 if (dev->zone_max_active && dev->zone_max_open > dev->zone_max_active) {
62 dev->zone_max_open = dev->zone_max_active;
63 pr_info("changed the maximum number of open zones to %u\n",
65 } else if (dev->zone_max_open >= dev->nr_zones - dev->zone_nr_conv) {
66 dev->zone_max_open = 0;
67 pr_info("zone_max_open limit disabled, limit >= zone count\n");
70 for (i = 0; i < dev->zone_nr_conv; i++) {
71 struct blk_zone *zone = &dev->zones[i];
74 zone->len = dev->zone_size_sects;
75 zone->capacity = zone->len;
76 zone->wp = zone->start + zone->len;
77 zone->type = BLK_ZONE_TYPE_CONVENTIONAL;
78 zone->cond = BLK_ZONE_COND_NOT_WP;
80 sector += dev->zone_size_sects;
83 for (i = dev->zone_nr_conv; i < dev->nr_zones; i++) {
84 struct blk_zone *zone = &dev->zones[i];
86 zone->start = zone->wp = sector;
87 zone->len = dev->zone_size_sects;
88 zone->capacity = dev->zone_capacity << ZONE_SIZE_SHIFT;
89 zone->type = BLK_ZONE_TYPE_SEQWRITE_REQ;
90 zone->cond = BLK_ZONE_COND_EMPTY;
92 sector += dev->zone_size_sects;
95 q->limits.zoned = BLK_ZONED_HM;
96 blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, q);
97 blk_queue_required_elevator_features(q, ELEVATOR_F_ZBD_SEQ_WRITE);
102 int null_register_zoned_dev(struct nullb *nullb)
104 struct nullb_device *dev = nullb->dev;
105 struct request_queue *q = nullb->q;
107 if (queue_is_mq(q)) {
108 int ret = blk_revalidate_disk_zones(nullb->disk, NULL);
113 blk_queue_chunk_sectors(q, dev->zone_size_sects);
114 q->nr_zones = blkdev_nr_zones(nullb->disk);
117 blk_queue_max_zone_append_sectors(q, dev->zone_size_sects);
118 blk_queue_max_open_zones(q, dev->zone_max_open);
119 blk_queue_max_active_zones(q, dev->zone_max_active);
124 void null_free_zoned_dev(struct nullb_device *dev)
129 int null_report_zones(struct gendisk *disk, sector_t sector,
130 unsigned int nr_zones, report_zones_cb cb, void *data)
132 struct nullb *nullb = disk->private_data;
133 struct nullb_device *dev = nullb->dev;
134 unsigned int first_zone, i;
135 struct blk_zone zone;
138 first_zone = null_zone_no(dev, sector);
139 if (first_zone >= dev->nr_zones)
142 nr_zones = min(nr_zones, dev->nr_zones - first_zone);
143 trace_nullb_report_zones(nullb, nr_zones);
145 for (i = 0; i < nr_zones; i++) {
147 * Stacked DM target drivers will remap the zone information by
148 * modifying the zone information passed to the report callback.
149 * So use a local copy to avoid corruption of the device zone
152 memcpy(&zone, &dev->zones[first_zone + i],
153 sizeof(struct blk_zone));
154 error = cb(&zone, i, data);
162 size_t null_zone_valid_read_len(struct nullb *nullb,
163 sector_t sector, unsigned int len)
165 struct nullb_device *dev = nullb->dev;
166 struct blk_zone *zone = &dev->zones[null_zone_no(dev, sector)];
167 unsigned int nr_sectors = len >> SECTOR_SHIFT;
169 /* Read must be below the write pointer position */
170 if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL ||
171 sector + nr_sectors <= zone->wp)
174 if (sector > zone->wp)
177 return (zone->wp - sector) << SECTOR_SHIFT;
180 static blk_status_t null_close_zone(struct nullb_device *dev, struct blk_zone *zone)
182 if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
183 return BLK_STS_IOERR;
185 switch (zone->cond) {
186 case BLK_ZONE_COND_CLOSED:
187 /* close operation on closed is not an error */
189 case BLK_ZONE_COND_IMP_OPEN:
190 dev->nr_zones_imp_open--;
192 case BLK_ZONE_COND_EXP_OPEN:
193 dev->nr_zones_exp_open--;
195 case BLK_ZONE_COND_EMPTY:
196 case BLK_ZONE_COND_FULL:
198 return BLK_STS_IOERR;
201 if (zone->wp == zone->start) {
202 zone->cond = BLK_ZONE_COND_EMPTY;
204 zone->cond = BLK_ZONE_COND_CLOSED;
205 dev->nr_zones_closed++;
211 static void null_close_first_imp_zone(struct nullb_device *dev)
215 for (i = dev->zone_nr_conv; i < dev->nr_zones; i++) {
216 if (dev->zones[i].cond == BLK_ZONE_COND_IMP_OPEN) {
217 null_close_zone(dev, &dev->zones[i]);
223 static blk_status_t null_check_active(struct nullb_device *dev)
225 if (!dev->zone_max_active)
228 if (dev->nr_zones_exp_open + dev->nr_zones_imp_open +
229 dev->nr_zones_closed < dev->zone_max_active)
232 return BLK_STS_ZONE_ACTIVE_RESOURCE;
235 static blk_status_t null_check_open(struct nullb_device *dev)
237 if (!dev->zone_max_open)
240 if (dev->nr_zones_exp_open + dev->nr_zones_imp_open < dev->zone_max_open)
243 if (dev->nr_zones_imp_open) {
244 if (null_check_active(dev) == BLK_STS_OK) {
245 null_close_first_imp_zone(dev);
250 return BLK_STS_ZONE_OPEN_RESOURCE;
254 * This function matches the manage open zone resources function in the ZBC standard,
255 * with the addition of max active zones support (added in the ZNS standard).
257 * The function determines if a zone can transition to implicit open or explicit open,
258 * while maintaining the max open zone (and max active zone) limit(s). It may close an
259 * implicit open zone in order to make additional zone resources available.
261 * ZBC states that an implicit open zone shall be closed only if there is not
262 * room within the open limit. However, with the addition of an active limit,
263 * it is not certain that closing an implicit open zone will allow a new zone
264 * to be opened, since we might already be at the active limit capacity.
266 static blk_status_t null_check_zone_resources(struct nullb_device *dev, struct blk_zone *zone)
270 switch (zone->cond) {
271 case BLK_ZONE_COND_EMPTY:
272 ret = null_check_active(dev);
273 if (ret != BLK_STS_OK)
276 case BLK_ZONE_COND_CLOSED:
277 return null_check_open(dev);
279 /* Should never be called for other states */
281 return BLK_STS_IOERR;
285 static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector,
286 unsigned int nr_sectors, bool append)
288 struct nullb_device *dev = cmd->nq->dev;
289 unsigned int zno = null_zone_no(dev, sector);
290 struct blk_zone *zone = &dev->zones[zno];
293 trace_nullb_zone_op(cmd, zno, zone->cond);
295 if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
296 return null_process_cmd(cmd, REQ_OP_WRITE, sector, nr_sectors);
298 switch (zone->cond) {
299 case BLK_ZONE_COND_FULL:
300 /* Cannot write to a full zone */
301 return BLK_STS_IOERR;
302 case BLK_ZONE_COND_EMPTY:
303 case BLK_ZONE_COND_CLOSED:
304 ret = null_check_zone_resources(dev, zone);
305 if (ret != BLK_STS_OK)
308 case BLK_ZONE_COND_IMP_OPEN:
309 case BLK_ZONE_COND_EXP_OPEN:
312 /* Invalid zone condition */
313 return BLK_STS_IOERR;
317 * Regular writes must be at the write pointer position.
318 * Zone append writes are automatically issued at the write
319 * pointer and the position returned using the request or BIO
325 cmd->bio->bi_iter.bi_sector = sector;
327 cmd->rq->__sector = sector;
328 } else if (sector != zone->wp) {
329 return BLK_STS_IOERR;
332 if (zone->wp + nr_sectors > zone->start + zone->capacity)
333 return BLK_STS_IOERR;
335 if (zone->cond == BLK_ZONE_COND_CLOSED) {
336 dev->nr_zones_closed--;
337 dev->nr_zones_imp_open++;
338 } else if (zone->cond == BLK_ZONE_COND_EMPTY) {
339 dev->nr_zones_imp_open++;
341 if (zone->cond != BLK_ZONE_COND_EXP_OPEN)
342 zone->cond = BLK_ZONE_COND_IMP_OPEN;
344 ret = null_process_cmd(cmd, REQ_OP_WRITE, sector, nr_sectors);
345 if (ret != BLK_STS_OK)
348 zone->wp += nr_sectors;
349 if (zone->wp == zone->start + zone->capacity) {
350 if (zone->cond == BLK_ZONE_COND_EXP_OPEN)
351 dev->nr_zones_exp_open--;
352 else if (zone->cond == BLK_ZONE_COND_IMP_OPEN)
353 dev->nr_zones_imp_open--;
354 zone->cond = BLK_ZONE_COND_FULL;
359 static blk_status_t null_open_zone(struct nullb_device *dev, struct blk_zone *zone)
363 if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
364 return BLK_STS_IOERR;
366 switch (zone->cond) {
367 case BLK_ZONE_COND_EXP_OPEN:
368 /* open operation on exp open is not an error */
370 case BLK_ZONE_COND_EMPTY:
371 ret = null_check_zone_resources(dev, zone);
372 if (ret != BLK_STS_OK)
375 case BLK_ZONE_COND_IMP_OPEN:
376 dev->nr_zones_imp_open--;
378 case BLK_ZONE_COND_CLOSED:
379 ret = null_check_zone_resources(dev, zone);
380 if (ret != BLK_STS_OK)
382 dev->nr_zones_closed--;
384 case BLK_ZONE_COND_FULL:
386 return BLK_STS_IOERR;
389 zone->cond = BLK_ZONE_COND_EXP_OPEN;
390 dev->nr_zones_exp_open++;
395 static blk_status_t null_finish_zone(struct nullb_device *dev, struct blk_zone *zone)
399 if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
400 return BLK_STS_IOERR;
402 switch (zone->cond) {
403 case BLK_ZONE_COND_FULL:
404 /* finish operation on full is not an error */
406 case BLK_ZONE_COND_EMPTY:
407 ret = null_check_zone_resources(dev, zone);
408 if (ret != BLK_STS_OK)
411 case BLK_ZONE_COND_IMP_OPEN:
412 dev->nr_zones_imp_open--;
414 case BLK_ZONE_COND_EXP_OPEN:
415 dev->nr_zones_exp_open--;
417 case BLK_ZONE_COND_CLOSED:
418 ret = null_check_zone_resources(dev, zone);
419 if (ret != BLK_STS_OK)
421 dev->nr_zones_closed--;
424 return BLK_STS_IOERR;
427 zone->cond = BLK_ZONE_COND_FULL;
428 zone->wp = zone->start + zone->len;
433 static blk_status_t null_reset_zone(struct nullb_device *dev, struct blk_zone *zone)
435 if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
436 return BLK_STS_IOERR;
438 switch (zone->cond) {
439 case BLK_ZONE_COND_EMPTY:
440 /* reset operation on empty is not an error */
442 case BLK_ZONE_COND_IMP_OPEN:
443 dev->nr_zones_imp_open--;
445 case BLK_ZONE_COND_EXP_OPEN:
446 dev->nr_zones_exp_open--;
448 case BLK_ZONE_COND_CLOSED:
449 dev->nr_zones_closed--;
451 case BLK_ZONE_COND_FULL:
454 return BLK_STS_IOERR;
457 zone->cond = BLK_ZONE_COND_EMPTY;
458 zone->wp = zone->start;
463 static blk_status_t null_zone_mgmt(struct nullb_cmd *cmd, enum req_opf op,
466 struct nullb_device *dev = cmd->nq->dev;
467 unsigned int zone_no = null_zone_no(dev, sector);
468 struct blk_zone *zone = &dev->zones[zone_no];
469 blk_status_t ret = BLK_STS_OK;
473 case REQ_OP_ZONE_RESET_ALL:
474 for (i = dev->zone_nr_conv; i < dev->nr_zones; i++)
475 null_reset_zone(dev, &dev->zones[i]);
477 case REQ_OP_ZONE_RESET:
478 ret = null_reset_zone(dev, zone);
480 case REQ_OP_ZONE_OPEN:
481 ret = null_open_zone(dev, zone);
483 case REQ_OP_ZONE_CLOSE:
484 ret = null_close_zone(dev, zone);
486 case REQ_OP_ZONE_FINISH:
487 ret = null_finish_zone(dev, zone);
490 return BLK_STS_NOTSUPP;
493 if (ret == BLK_STS_OK)
494 trace_nullb_zone_op(cmd, zone_no, zone->cond);
499 blk_status_t null_process_zoned_cmd(struct nullb_cmd *cmd, enum req_opf op,
500 sector_t sector, sector_t nr_sectors)
504 return null_zone_write(cmd, sector, nr_sectors, false);
505 case REQ_OP_ZONE_APPEND:
506 return null_zone_write(cmd, sector, nr_sectors, true);
507 case REQ_OP_ZONE_RESET:
508 case REQ_OP_ZONE_RESET_ALL:
509 case REQ_OP_ZONE_OPEN:
510 case REQ_OP_ZONE_CLOSE:
511 case REQ_OP_ZONE_FINISH:
512 return null_zone_mgmt(cmd, op, sector);
514 return null_process_cmd(cmd, op, sector, nr_sectors);