1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/vmalloc.h>
3 #include <linux/bitmap.h>
6 #define CREATE_TRACE_POINTS
9 static inline sector_t mb_to_sects(unsigned long mb)
11 return ((sector_t)mb * SZ_1M) >> SECTOR_SHIFT;
14 static inline unsigned int null_zone_no(struct nullb_device *dev, sector_t sect)
16 return sect >> ilog2(dev->zone_size_sects);
19 static inline void null_lock_zone_res(struct nullb_device *dev)
21 if (dev->need_zone_res_mgmt)
22 spin_lock_irq(&dev->zone_res_lock);
25 static inline void null_unlock_zone_res(struct nullb_device *dev)
27 if (dev->need_zone_res_mgmt)
28 spin_unlock_irq(&dev->zone_res_lock);
31 static inline void null_init_zone_lock(struct nullb_device *dev,
32 struct nullb_zone *zone)
34 if (!dev->memory_backed)
35 spin_lock_init(&zone->spinlock);
37 mutex_init(&zone->mutex);
40 static inline void null_lock_zone(struct nullb_device *dev,
41 struct nullb_zone *zone)
43 if (!dev->memory_backed)
44 spin_lock_irq(&zone->spinlock);
46 mutex_lock(&zone->mutex);
49 static inline void null_unlock_zone(struct nullb_device *dev,
50 struct nullb_zone *zone)
52 if (!dev->memory_backed)
53 spin_unlock_irq(&zone->spinlock);
55 mutex_unlock(&zone->mutex);
58 int null_init_zoned_dev(struct nullb_device *dev, struct request_queue *q)
60 sector_t dev_capacity_sects, zone_capacity_sects;
61 struct nullb_zone *zone;
65 if (!is_power_of_2(dev->zone_size)) {
66 pr_err("zone_size must be power-of-two\n");
69 if (dev->zone_size > dev->size) {
70 pr_err("Zone size larger than device capacity\n");
74 if (!dev->zone_capacity)
75 dev->zone_capacity = dev->zone_size;
77 if (dev->zone_capacity > dev->zone_size) {
78 pr_err("null_blk: zone capacity (%lu MB) larger than zone size (%lu MB)\n",
79 dev->zone_capacity, dev->zone_size);
83 zone_capacity_sects = mb_to_sects(dev->zone_capacity);
84 dev_capacity_sects = mb_to_sects(dev->size);
85 dev->zone_size_sects = mb_to_sects(dev->zone_size);
86 dev->nr_zones = round_up(dev_capacity_sects, dev->zone_size_sects)
87 >> ilog2(dev->zone_size_sects);
89 dev->zones = kvmalloc_array(dev->nr_zones, sizeof(struct nullb_zone),
90 GFP_KERNEL | __GFP_ZERO);
94 spin_lock_init(&dev->zone_res_lock);
96 if (dev->zone_nr_conv >= dev->nr_zones) {
97 dev->zone_nr_conv = dev->nr_zones - 1;
98 pr_info("changed the number of conventional zones to %u",
102 /* Max active zones has to be < nbr of seq zones in order to be enforceable */
103 if (dev->zone_max_active >= dev->nr_zones - dev->zone_nr_conv) {
104 dev->zone_max_active = 0;
105 pr_info("zone_max_active limit disabled, limit >= zone count\n");
108 /* Max open zones has to be <= max active zones */
109 if (dev->zone_max_active && dev->zone_max_open > dev->zone_max_active) {
110 dev->zone_max_open = dev->zone_max_active;
111 pr_info("changed the maximum number of open zones to %u\n",
113 } else if (dev->zone_max_open >= dev->nr_zones - dev->zone_nr_conv) {
114 dev->zone_max_open = 0;
115 pr_info("zone_max_open limit disabled, limit >= zone count\n");
117 dev->need_zone_res_mgmt = dev->zone_max_active || dev->zone_max_open;
118 dev->imp_close_zone_no = dev->zone_nr_conv;
120 for (i = 0; i < dev->zone_nr_conv; i++) {
121 zone = &dev->zones[i];
123 null_init_zone_lock(dev, zone);
124 zone->start = sector;
125 zone->len = dev->zone_size_sects;
126 zone->capacity = zone->len;
127 zone->wp = zone->start + zone->len;
128 zone->type = BLK_ZONE_TYPE_CONVENTIONAL;
129 zone->cond = BLK_ZONE_COND_NOT_WP;
131 sector += dev->zone_size_sects;
134 for (i = dev->zone_nr_conv; i < dev->nr_zones; i++) {
135 zone = &dev->zones[i];
137 null_init_zone_lock(dev, zone);
138 zone->start = zone->wp = sector;
139 if (zone->start + dev->zone_size_sects > dev_capacity_sects)
140 zone->len = dev_capacity_sects - zone->start;
142 zone->len = dev->zone_size_sects;
144 min_t(sector_t, zone->len, zone_capacity_sects);
145 zone->type = BLK_ZONE_TYPE_SEQWRITE_REQ;
146 zone->cond = BLK_ZONE_COND_EMPTY;
148 sector += dev->zone_size_sects;
154 int null_register_zoned_dev(struct nullb *nullb)
156 struct nullb_device *dev = nullb->dev;
157 struct request_queue *q = nullb->q;
159 blk_queue_set_zoned(nullb->disk, BLK_ZONED_HM);
160 blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, q);
161 blk_queue_required_elevator_features(q, ELEVATOR_F_ZBD_SEQ_WRITE);
163 if (queue_is_mq(q)) {
164 int ret = blk_revalidate_disk_zones(nullb->disk, NULL);
169 blk_queue_chunk_sectors(q, dev->zone_size_sects);
170 q->nr_zones = blkdev_nr_zones(nullb->disk);
173 blk_queue_max_zone_append_sectors(q, dev->zone_size_sects);
174 blk_queue_max_open_zones(q, dev->zone_max_open);
175 blk_queue_max_active_zones(q, dev->zone_max_active);
180 void null_free_zoned_dev(struct nullb_device *dev)
185 int null_report_zones(struct gendisk *disk, sector_t sector,
186 unsigned int nr_zones, report_zones_cb cb, void *data)
188 struct nullb *nullb = disk->private_data;
189 struct nullb_device *dev = nullb->dev;
190 unsigned int first_zone, i;
191 struct nullb_zone *zone;
192 struct blk_zone blkz;
195 first_zone = null_zone_no(dev, sector);
196 if (first_zone >= dev->nr_zones)
199 nr_zones = min(nr_zones, dev->nr_zones - first_zone);
200 trace_nullb_report_zones(nullb, nr_zones);
202 memset(&blkz, 0, sizeof(struct blk_zone));
203 zone = &dev->zones[first_zone];
204 for (i = 0; i < nr_zones; i++, zone++) {
206 * Stacked DM target drivers will remap the zone information by
207 * modifying the zone information passed to the report callback.
208 * So use a local copy to avoid corruption of the device zone
211 null_lock_zone(dev, zone);
212 blkz.start = zone->start;
213 blkz.len = zone->len;
215 blkz.type = zone->type;
216 blkz.cond = zone->cond;
217 blkz.capacity = zone->capacity;
218 null_unlock_zone(dev, zone);
220 error = cb(&blkz, i, data);
229 * This is called in the case of memory backing from null_process_cmd()
230 * with the target zone already locked.
232 size_t null_zone_valid_read_len(struct nullb *nullb,
233 sector_t sector, unsigned int len)
235 struct nullb_device *dev = nullb->dev;
236 struct nullb_zone *zone = &dev->zones[null_zone_no(dev, sector)];
237 unsigned int nr_sectors = len >> SECTOR_SHIFT;
239 /* Read must be below the write pointer position */
240 if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL ||
241 sector + nr_sectors <= zone->wp)
244 if (sector > zone->wp)
247 return (zone->wp - sector) << SECTOR_SHIFT;
250 static blk_status_t __null_close_zone(struct nullb_device *dev,
251 struct nullb_zone *zone)
253 switch (zone->cond) {
254 case BLK_ZONE_COND_CLOSED:
255 /* close operation on closed is not an error */
257 case BLK_ZONE_COND_IMP_OPEN:
258 dev->nr_zones_imp_open--;
260 case BLK_ZONE_COND_EXP_OPEN:
261 dev->nr_zones_exp_open--;
263 case BLK_ZONE_COND_EMPTY:
264 case BLK_ZONE_COND_FULL:
266 return BLK_STS_IOERR;
269 if (zone->wp == zone->start) {
270 zone->cond = BLK_ZONE_COND_EMPTY;
272 zone->cond = BLK_ZONE_COND_CLOSED;
273 dev->nr_zones_closed++;
279 static void null_close_imp_open_zone(struct nullb_device *dev)
281 struct nullb_zone *zone;
284 zno = dev->imp_close_zone_no;
285 if (zno >= dev->nr_zones)
286 zno = dev->zone_nr_conv;
288 for (i = dev->zone_nr_conv; i < dev->nr_zones; i++) {
289 zone = &dev->zones[zno];
291 if (zno >= dev->nr_zones)
292 zno = dev->zone_nr_conv;
294 if (zone->cond == BLK_ZONE_COND_IMP_OPEN) {
295 __null_close_zone(dev, zone);
296 dev->imp_close_zone_no = zno;
302 static blk_status_t null_check_active(struct nullb_device *dev)
304 if (!dev->zone_max_active)
307 if (dev->nr_zones_exp_open + dev->nr_zones_imp_open +
308 dev->nr_zones_closed < dev->zone_max_active)
311 return BLK_STS_ZONE_ACTIVE_RESOURCE;
314 static blk_status_t null_check_open(struct nullb_device *dev)
316 if (!dev->zone_max_open)
319 if (dev->nr_zones_exp_open + dev->nr_zones_imp_open < dev->zone_max_open)
322 if (dev->nr_zones_imp_open) {
323 if (null_check_active(dev) == BLK_STS_OK) {
324 null_close_imp_open_zone(dev);
329 return BLK_STS_ZONE_OPEN_RESOURCE;
333 * This function matches the manage open zone resources function in the ZBC standard,
334 * with the addition of max active zones support (added in the ZNS standard).
336 * The function determines if a zone can transition to implicit open or explicit open,
337 * while maintaining the max open zone (and max active zone) limit(s). It may close an
338 * implicit open zone in order to make additional zone resources available.
340 * ZBC states that an implicit open zone shall be closed only if there is not
341 * room within the open limit. However, with the addition of an active limit,
342 * it is not certain that closing an implicit open zone will allow a new zone
343 * to be opened, since we might already be at the active limit capacity.
345 static blk_status_t null_check_zone_resources(struct nullb_device *dev,
346 struct nullb_zone *zone)
350 switch (zone->cond) {
351 case BLK_ZONE_COND_EMPTY:
352 ret = null_check_active(dev);
353 if (ret != BLK_STS_OK)
356 case BLK_ZONE_COND_CLOSED:
357 return null_check_open(dev);
359 /* Should never be called for other states */
361 return BLK_STS_IOERR;
365 static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector,
366 unsigned int nr_sectors, bool append)
368 struct nullb_device *dev = cmd->nq->dev;
369 unsigned int zno = null_zone_no(dev, sector);
370 struct nullb_zone *zone = &dev->zones[zno];
373 trace_nullb_zone_op(cmd, zno, zone->cond);
375 if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL) {
377 return BLK_STS_IOERR;
378 return null_process_cmd(cmd, REQ_OP_WRITE, sector, nr_sectors);
381 null_lock_zone(dev, zone);
383 if (zone->cond == BLK_ZONE_COND_FULL) {
384 /* Cannot write to a full zone */
390 * Regular writes must be at the write pointer position.
391 * Zone append writes are automatically issued at the write
392 * pointer and the position returned using the request or BIO
398 cmd->bio->bi_iter.bi_sector = sector;
400 cmd->rq->__sector = sector;
401 } else if (sector != zone->wp) {
406 if (zone->wp + nr_sectors > zone->start + zone->capacity) {
411 if (zone->cond == BLK_ZONE_COND_CLOSED ||
412 zone->cond == BLK_ZONE_COND_EMPTY) {
413 null_lock_zone_res(dev);
415 ret = null_check_zone_resources(dev, zone);
416 if (ret != BLK_STS_OK) {
417 null_unlock_zone_res(dev);
420 if (zone->cond == BLK_ZONE_COND_CLOSED) {
421 dev->nr_zones_closed--;
422 dev->nr_zones_imp_open++;
423 } else if (zone->cond == BLK_ZONE_COND_EMPTY) {
424 dev->nr_zones_imp_open++;
427 if (zone->cond != BLK_ZONE_COND_EXP_OPEN)
428 zone->cond = BLK_ZONE_COND_IMP_OPEN;
430 null_unlock_zone_res(dev);
433 ret = null_process_cmd(cmd, REQ_OP_WRITE, sector, nr_sectors);
434 if (ret != BLK_STS_OK)
437 zone->wp += nr_sectors;
438 if (zone->wp == zone->start + zone->capacity) {
439 null_lock_zone_res(dev);
440 if (zone->cond == BLK_ZONE_COND_EXP_OPEN)
441 dev->nr_zones_exp_open--;
442 else if (zone->cond == BLK_ZONE_COND_IMP_OPEN)
443 dev->nr_zones_imp_open--;
444 zone->cond = BLK_ZONE_COND_FULL;
445 null_unlock_zone_res(dev);
451 null_unlock_zone(dev, zone);
456 static blk_status_t null_open_zone(struct nullb_device *dev,
457 struct nullb_zone *zone)
459 blk_status_t ret = BLK_STS_OK;
461 if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
462 return BLK_STS_IOERR;
464 null_lock_zone_res(dev);
466 switch (zone->cond) {
467 case BLK_ZONE_COND_EXP_OPEN:
468 /* open operation on exp open is not an error */
470 case BLK_ZONE_COND_EMPTY:
471 ret = null_check_zone_resources(dev, zone);
472 if (ret != BLK_STS_OK)
475 case BLK_ZONE_COND_IMP_OPEN:
476 dev->nr_zones_imp_open--;
478 case BLK_ZONE_COND_CLOSED:
479 ret = null_check_zone_resources(dev, zone);
480 if (ret != BLK_STS_OK)
482 dev->nr_zones_closed--;
484 case BLK_ZONE_COND_FULL:
490 zone->cond = BLK_ZONE_COND_EXP_OPEN;
491 dev->nr_zones_exp_open++;
494 null_unlock_zone_res(dev);
499 static blk_status_t null_close_zone(struct nullb_device *dev,
500 struct nullb_zone *zone)
504 if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
505 return BLK_STS_IOERR;
507 null_lock_zone_res(dev);
508 ret = __null_close_zone(dev, zone);
509 null_unlock_zone_res(dev);
514 static blk_status_t null_finish_zone(struct nullb_device *dev,
515 struct nullb_zone *zone)
517 blk_status_t ret = BLK_STS_OK;
519 if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
520 return BLK_STS_IOERR;
522 null_lock_zone_res(dev);
524 switch (zone->cond) {
525 case BLK_ZONE_COND_FULL:
526 /* finish operation on full is not an error */
528 case BLK_ZONE_COND_EMPTY:
529 ret = null_check_zone_resources(dev, zone);
530 if (ret != BLK_STS_OK)
533 case BLK_ZONE_COND_IMP_OPEN:
534 dev->nr_zones_imp_open--;
536 case BLK_ZONE_COND_EXP_OPEN:
537 dev->nr_zones_exp_open--;
539 case BLK_ZONE_COND_CLOSED:
540 ret = null_check_zone_resources(dev, zone);
541 if (ret != BLK_STS_OK)
543 dev->nr_zones_closed--;
550 zone->cond = BLK_ZONE_COND_FULL;
551 zone->wp = zone->start + zone->len;
554 null_unlock_zone_res(dev);
559 static blk_status_t null_reset_zone(struct nullb_device *dev,
560 struct nullb_zone *zone)
562 if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
563 return BLK_STS_IOERR;
565 null_lock_zone_res(dev);
567 switch (zone->cond) {
568 case BLK_ZONE_COND_EMPTY:
569 /* reset operation on empty is not an error */
570 null_unlock_zone_res(dev);
572 case BLK_ZONE_COND_IMP_OPEN:
573 dev->nr_zones_imp_open--;
575 case BLK_ZONE_COND_EXP_OPEN:
576 dev->nr_zones_exp_open--;
578 case BLK_ZONE_COND_CLOSED:
579 dev->nr_zones_closed--;
581 case BLK_ZONE_COND_FULL:
584 null_unlock_zone_res(dev);
585 return BLK_STS_IOERR;
588 zone->cond = BLK_ZONE_COND_EMPTY;
589 zone->wp = zone->start;
591 null_unlock_zone_res(dev);
593 if (dev->memory_backed)
594 return null_handle_discard(dev, zone->start, zone->len);
599 static blk_status_t null_zone_mgmt(struct nullb_cmd *cmd, enum req_opf op,
602 struct nullb_device *dev = cmd->nq->dev;
603 unsigned int zone_no;
604 struct nullb_zone *zone;
608 if (op == REQ_OP_ZONE_RESET_ALL) {
609 for (i = dev->zone_nr_conv; i < dev->nr_zones; i++) {
610 zone = &dev->zones[i];
611 null_lock_zone(dev, zone);
612 if (zone->cond != BLK_ZONE_COND_EMPTY) {
613 null_reset_zone(dev, zone);
614 trace_nullb_zone_op(cmd, i, zone->cond);
616 null_unlock_zone(dev, zone);
621 zone_no = null_zone_no(dev, sector);
622 zone = &dev->zones[zone_no];
624 null_lock_zone(dev, zone);
627 case REQ_OP_ZONE_RESET:
628 ret = null_reset_zone(dev, zone);
630 case REQ_OP_ZONE_OPEN:
631 ret = null_open_zone(dev, zone);
633 case REQ_OP_ZONE_CLOSE:
634 ret = null_close_zone(dev, zone);
636 case REQ_OP_ZONE_FINISH:
637 ret = null_finish_zone(dev, zone);
640 ret = BLK_STS_NOTSUPP;
644 if (ret == BLK_STS_OK)
645 trace_nullb_zone_op(cmd, zone_no, zone->cond);
647 null_unlock_zone(dev, zone);
652 blk_status_t null_process_zoned_cmd(struct nullb_cmd *cmd, enum req_opf op,
653 sector_t sector, sector_t nr_sectors)
655 struct nullb_device *dev;
656 struct nullb_zone *zone;
661 return null_zone_write(cmd, sector, nr_sectors, false);
662 case REQ_OP_ZONE_APPEND:
663 return null_zone_write(cmd, sector, nr_sectors, true);
664 case REQ_OP_ZONE_RESET:
665 case REQ_OP_ZONE_RESET_ALL:
666 case REQ_OP_ZONE_OPEN:
667 case REQ_OP_ZONE_CLOSE:
668 case REQ_OP_ZONE_FINISH:
669 return null_zone_mgmt(cmd, op, sector);
672 zone = &dev->zones[null_zone_no(dev, sector)];
674 null_lock_zone(dev, zone);
675 sts = null_process_cmd(cmd, op, sector, nr_sectors);
676 null_unlock_zone(dev, zone);