1 // SPDX-License-Identifier: GPL-2.0
3 * NVMe ZNS-ZBD command implementation.
4 * Copyright (C) 2021 Western Digital Corporation or its affiliates.
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 #include <linux/nvme.h>
8 #include <linux/blkdev.h>
12 * We set the Memory Page Size Minimum (MPSMIN) for target controller to 0
13 * which gets added by 12 in the nvme_enable_ctrl() which results in 2^12 = 4k
14 * as page_shift value. When calculating the ZASL use shift by 12.
16 #define NVMET_MPSMIN_SHIFT 12
18 static inline u8 nvmet_zasl(unsigned int zone_append_sects)
21 * Zone Append Size Limit (zasl) is expressed as a power of 2 value
22 * with the minimum memory page size (i.e. 12) as unit.
24 return ilog2(zone_append_sects >> (NVMET_MPSMIN_SHIFT - 9));
27 static int validate_conv_zones_cb(struct blk_zone *z,
28 unsigned int i, void *data)
30 if (z->type == BLK_ZONE_TYPE_CONVENTIONAL)
35 bool nvmet_bdev_zns_enable(struct nvmet_ns *ns)
37 u8 zasl = nvmet_zasl(bdev_max_zone_append_sectors(ns->bdev));
38 struct gendisk *bd_disk = ns->bdev->bd_disk;
41 if (ns->subsys->zasl) {
42 if (ns->subsys->zasl > zasl)
45 ns->subsys->zasl = zasl;
48 * Generic zoned block devices may have a smaller last zone which is
49 * not supported by ZNS. Exclude zoned drives that have such smaller
52 if (get_capacity(bd_disk) & (bdev_zone_sectors(ns->bdev) - 1))
55 * ZNS does not define a conventional zone type. If the underlying
56 * device has a bitmap set indicating the existence of conventional
57 * zones, reject the device. Otherwise, use report zones to detect if
58 * the device has conventional zones.
60 if (ns->bdev->bd_disk->queue->conv_zones_bitmap)
63 ret = blkdev_report_zones(ns->bdev, 0, blkdev_nr_zones(bd_disk),
64 validate_conv_zones_cb, NULL);
68 ns->blksize_shift = blksize_bits(bdev_logical_block_size(ns->bdev));
73 void nvmet_execute_identify_cns_cs_ctrl(struct nvmet_req *req)
75 u8 zasl = req->sq->ctrl->subsys->zasl;
76 struct nvmet_ctrl *ctrl = req->sq->ctrl;
77 struct nvme_id_ctrl_zns *id;
80 id = kzalloc(sizeof(*id), GFP_KERNEL);
82 status = NVME_SC_INTERNAL;
86 if (ctrl->ops->get_mdts)
87 id->zasl = min_t(u8, ctrl->ops->get_mdts(ctrl), zasl);
91 status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
95 nvmet_req_complete(req, status);
98 void nvmet_execute_identify_cns_cs_ns(struct nvmet_req *req)
100 struct nvme_id_ns_zns *id_zns;
104 if (le32_to_cpu(req->cmd->identify.nsid) == NVME_NSID_ALL) {
105 req->error_loc = offsetof(struct nvme_identify, nsid);
106 status = NVME_SC_INVALID_NS | NVME_SC_DNR;
110 id_zns = kzalloc(sizeof(*id_zns), GFP_KERNEL);
112 status = NVME_SC_INTERNAL;
116 status = nvmet_req_find_ns(req);
120 if (!bdev_is_zoned(req->ns->bdev)) {
121 req->error_loc = offsetof(struct nvme_identify, nsid);
125 if (nvmet_ns_revalidate(req->ns)) {
126 mutex_lock(&req->ns->subsys->lock);
127 nvmet_ns_changed(req->ns->subsys, req->ns->nsid);
128 mutex_unlock(&req->ns->subsys->lock);
130 zsze = (bdev_zone_sectors(req->ns->bdev) << 9) >>
131 req->ns->blksize_shift;
132 id_zns->lbafe[0].zsze = cpu_to_le64(zsze);
133 id_zns->mor = cpu_to_le32(bdev_max_open_zones(req->ns->bdev));
134 id_zns->mar = cpu_to_le32(bdev_max_active_zones(req->ns->bdev));
137 status = nvmet_copy_to_sgl(req, 0, id_zns, sizeof(*id_zns));
140 nvmet_req_complete(req, status);
143 static u16 nvmet_bdev_validate_zone_mgmt_recv(struct nvmet_req *req)
145 sector_t sect = nvmet_lba_to_sect(req->ns, req->cmd->zmr.slba);
146 u32 out_bufsize = (le32_to_cpu(req->cmd->zmr.numd) + 1) << 2;
148 if (sect >= get_capacity(req->ns->bdev->bd_disk)) {
149 req->error_loc = offsetof(struct nvme_zone_mgmt_recv_cmd, slba);
150 return NVME_SC_LBA_RANGE | NVME_SC_DNR;
153 if (out_bufsize < sizeof(struct nvme_zone_report)) {
154 req->error_loc = offsetof(struct nvme_zone_mgmt_recv_cmd, numd);
155 return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
158 if (req->cmd->zmr.zra != NVME_ZRA_ZONE_REPORT) {
159 req->error_loc = offsetof(struct nvme_zone_mgmt_recv_cmd, zra);
160 return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
163 switch (req->cmd->zmr.pr) {
168 req->error_loc = offsetof(struct nvme_zone_mgmt_recv_cmd, pr);
169 return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
172 switch (req->cmd->zmr.zrasf) {
173 case NVME_ZRASF_ZONE_REPORT_ALL:
174 case NVME_ZRASF_ZONE_STATE_EMPTY:
175 case NVME_ZRASF_ZONE_STATE_IMP_OPEN:
176 case NVME_ZRASF_ZONE_STATE_EXP_OPEN:
177 case NVME_ZRASF_ZONE_STATE_CLOSED:
178 case NVME_ZRASF_ZONE_STATE_FULL:
179 case NVME_ZRASF_ZONE_STATE_READONLY:
180 case NVME_ZRASF_ZONE_STATE_OFFLINE:
184 offsetof(struct nvme_zone_mgmt_recv_cmd, zrasf);
185 return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
188 return NVME_SC_SUCCESS;
191 struct nvmet_report_zone_data {
192 struct nvmet_req *req;
199 static int nvmet_bdev_report_zone_cb(struct blk_zone *z, unsigned i, void *d)
201 static const unsigned int nvme_zrasf_to_blk_zcond[] = {
202 [NVME_ZRASF_ZONE_STATE_EMPTY] = BLK_ZONE_COND_EMPTY,
203 [NVME_ZRASF_ZONE_STATE_IMP_OPEN] = BLK_ZONE_COND_IMP_OPEN,
204 [NVME_ZRASF_ZONE_STATE_EXP_OPEN] = BLK_ZONE_COND_EXP_OPEN,
205 [NVME_ZRASF_ZONE_STATE_CLOSED] = BLK_ZONE_COND_CLOSED,
206 [NVME_ZRASF_ZONE_STATE_READONLY] = BLK_ZONE_COND_READONLY,
207 [NVME_ZRASF_ZONE_STATE_FULL] = BLK_ZONE_COND_FULL,
208 [NVME_ZRASF_ZONE_STATE_OFFLINE] = BLK_ZONE_COND_OFFLINE,
210 struct nvmet_report_zone_data *rz = d;
212 if (rz->zrasf != NVME_ZRASF_ZONE_REPORT_ALL &&
213 z->cond != nvme_zrasf_to_blk_zcond[rz->zrasf])
216 if (rz->nr_zones < rz->out_nr_zones) {
217 struct nvme_zone_descriptor zdesc = { };
220 zdesc.zcap = nvmet_sect_to_lba(rz->req->ns, z->capacity);
221 zdesc.zslba = nvmet_sect_to_lba(rz->req->ns, z->start);
222 zdesc.wp = nvmet_sect_to_lba(rz->req->ns, z->wp);
223 zdesc.za = z->reset ? 1 << 2 : 0;
224 zdesc.zs = z->cond << 4;
227 status = nvmet_copy_to_sgl(rz->req, rz->out_buf_offset, &zdesc,
232 rz->out_buf_offset += sizeof(zdesc);
240 static unsigned long nvmet_req_nr_zones_from_slba(struct nvmet_req *req)
242 unsigned int sect = nvmet_lba_to_sect(req->ns, req->cmd->zmr.slba);
244 return blkdev_nr_zones(req->ns->bdev->bd_disk) -
245 (sect >> ilog2(bdev_zone_sectors(req->ns->bdev)));
248 static unsigned long get_nr_zones_from_buf(struct nvmet_req *req, u32 bufsize)
250 if (bufsize <= sizeof(struct nvme_zone_report))
253 return (bufsize - sizeof(struct nvme_zone_report)) /
254 sizeof(struct nvme_zone_descriptor);
257 static void nvmet_bdev_zone_zmgmt_recv_work(struct work_struct *w)
259 struct nvmet_req *req = container_of(w, struct nvmet_req, z.zmgmt_work);
260 sector_t start_sect = nvmet_lba_to_sect(req->ns, req->cmd->zmr.slba);
261 unsigned long req_slba_nr_zones = nvmet_req_nr_zones_from_slba(req);
262 u32 out_bufsize = (le32_to_cpu(req->cmd->zmr.numd) + 1) << 2;
266 struct nvmet_report_zone_data rz_data = {
267 .out_nr_zones = get_nr_zones_from_buf(req, out_bufsize),
268 /* leave the place for report zone header */
269 .out_buf_offset = sizeof(struct nvme_zone_report),
270 .zrasf = req->cmd->zmr.zrasf,
275 status = nvmet_bdev_validate_zone_mgmt_recv(req);
279 if (!req_slba_nr_zones) {
280 status = NVME_SC_SUCCESS;
284 ret = blkdev_report_zones(req->ns->bdev, start_sect, req_slba_nr_zones,
285 nvmet_bdev_report_zone_cb, &rz_data);
287 status = NVME_SC_INTERNAL;
292 * When partial bit is set nr_zones must indicate the number of zone
293 * descriptors actually transferred.
295 if (req->cmd->zmr.pr)
296 rz_data.nr_zones = min(rz_data.nr_zones, rz_data.out_nr_zones);
298 nr_zones = cpu_to_le64(rz_data.nr_zones);
299 status = nvmet_copy_to_sgl(req, 0, &nr_zones, sizeof(nr_zones));
302 nvmet_req_complete(req, status);
305 void nvmet_bdev_execute_zone_mgmt_recv(struct nvmet_req *req)
307 INIT_WORK(&req->z.zmgmt_work, nvmet_bdev_zone_zmgmt_recv_work);
308 queue_work(zbd_wq, &req->z.zmgmt_work);
311 static inline enum req_opf zsa_req_op(u8 zsa)
315 return REQ_OP_ZONE_OPEN;
316 case NVME_ZONE_CLOSE:
317 return REQ_OP_ZONE_CLOSE;
318 case NVME_ZONE_FINISH:
319 return REQ_OP_ZONE_FINISH;
320 case NVME_ZONE_RESET:
321 return REQ_OP_ZONE_RESET;
327 static u16 blkdev_zone_mgmt_errno_to_nvme_status(int ret)
331 return NVME_SC_SUCCESS;
334 return NVME_SC_ZONE_INVALID_TRANSITION | NVME_SC_DNR;
336 return NVME_SC_INTERNAL;
340 struct nvmet_zone_mgmt_send_all_data {
341 unsigned long *zbitmap;
342 struct nvmet_req *req;
345 static int zmgmt_send_scan_cb(struct blk_zone *z, unsigned i, void *d)
347 struct nvmet_zone_mgmt_send_all_data *data = d;
349 switch (zsa_req_op(data->req->cmd->zms.zsa)) {
350 case REQ_OP_ZONE_OPEN:
352 case BLK_ZONE_COND_CLOSED:
358 case REQ_OP_ZONE_CLOSE:
360 case BLK_ZONE_COND_IMP_OPEN:
361 case BLK_ZONE_COND_EXP_OPEN:
367 case REQ_OP_ZONE_FINISH:
369 case BLK_ZONE_COND_IMP_OPEN:
370 case BLK_ZONE_COND_EXP_OPEN:
371 case BLK_ZONE_COND_CLOSED:
381 set_bit(i, data->zbitmap);
386 static u16 nvmet_bdev_zone_mgmt_emulate_all(struct nvmet_req *req)
388 struct block_device *bdev = req->ns->bdev;
389 unsigned int nr_zones = blkdev_nr_zones(bdev->bd_disk);
390 struct request_queue *q = bdev_get_queue(bdev);
391 struct bio *bio = NULL;
394 struct nvmet_zone_mgmt_send_all_data d = {
398 d.zbitmap = kcalloc_node(BITS_TO_LONGS(nr_zones), sizeof(*(d.zbitmap)),
405 /* Scan and build bitmap of the eligible zones */
406 ret = blkdev_report_zones(bdev, 0, nr_zones, zmgmt_send_scan_cb, &d);
407 if (ret != nr_zones) {
412 /* We scanned all the zones */
416 while (sector < get_capacity(bdev->bd_disk)) {
417 if (test_bit(blk_queue_zone_no(q, sector), d.zbitmap)) {
418 bio = blk_next_bio(bio, bdev, 0,
419 zsa_req_op(req->cmd->zms.zsa) | REQ_SYNC,
421 bio->bi_iter.bi_sector = sector;
422 /* This may take a while, so be nice to others */
425 sector += blk_queue_zone_sectors(q);
429 ret = submit_bio_wait(bio);
436 return blkdev_zone_mgmt_errno_to_nvme_status(ret);
439 static u16 nvmet_bdev_execute_zmgmt_send_all(struct nvmet_req *req)
443 switch (zsa_req_op(req->cmd->zms.zsa)) {
444 case REQ_OP_ZONE_RESET:
445 ret = blkdev_zone_mgmt(req->ns->bdev, REQ_OP_ZONE_RESET, 0,
446 get_capacity(req->ns->bdev->bd_disk),
449 return blkdev_zone_mgmt_errno_to_nvme_status(ret);
451 case REQ_OP_ZONE_OPEN:
452 case REQ_OP_ZONE_CLOSE:
453 case REQ_OP_ZONE_FINISH:
454 return nvmet_bdev_zone_mgmt_emulate_all(req);
456 /* this is needed to quiet compiler warning */
457 req->error_loc = offsetof(struct nvme_zone_mgmt_send_cmd, zsa);
458 return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
461 return NVME_SC_SUCCESS;
464 static void nvmet_bdev_zmgmt_send_work(struct work_struct *w)
466 struct nvmet_req *req = container_of(w, struct nvmet_req, z.zmgmt_work);
467 sector_t sect = nvmet_lba_to_sect(req->ns, req->cmd->zms.slba);
468 enum req_opf op = zsa_req_op(req->cmd->zms.zsa);
469 struct block_device *bdev = req->ns->bdev;
470 sector_t zone_sectors = bdev_zone_sectors(bdev);
471 u16 status = NVME_SC_SUCCESS;
474 if (op == REQ_OP_LAST) {
475 req->error_loc = offsetof(struct nvme_zone_mgmt_send_cmd, zsa);
476 status = NVME_SC_ZONE_INVALID_TRANSITION | NVME_SC_DNR;
480 /* when select all bit is set slba field is ignored */
481 if (req->cmd->zms.select_all) {
482 status = nvmet_bdev_execute_zmgmt_send_all(req);
486 if (sect >= get_capacity(bdev->bd_disk)) {
487 req->error_loc = offsetof(struct nvme_zone_mgmt_send_cmd, slba);
488 status = NVME_SC_LBA_RANGE | NVME_SC_DNR;
492 if (sect & (zone_sectors - 1)) {
493 req->error_loc = offsetof(struct nvme_zone_mgmt_send_cmd, slba);
494 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
498 ret = blkdev_zone_mgmt(bdev, op, sect, zone_sectors, GFP_KERNEL);
500 status = blkdev_zone_mgmt_errno_to_nvme_status(ret);
503 nvmet_req_complete(req, status);
506 void nvmet_bdev_execute_zone_mgmt_send(struct nvmet_req *req)
508 INIT_WORK(&req->z.zmgmt_work, nvmet_bdev_zmgmt_send_work);
509 queue_work(zbd_wq, &req->z.zmgmt_work);
512 static void nvmet_bdev_zone_append_bio_done(struct bio *bio)
514 struct nvmet_req *req = bio->bi_private;
516 if (bio->bi_status == BLK_STS_OK) {
517 req->cqe->result.u64 =
518 nvmet_sect_to_lba(req->ns, bio->bi_iter.bi_sector);
521 nvmet_req_complete(req, blk_to_nvme_status(req, bio->bi_status));
522 nvmet_req_bio_put(req, bio);
525 void nvmet_bdev_execute_zone_append(struct nvmet_req *req)
527 sector_t sect = nvmet_lba_to_sect(req->ns, req->cmd->rw.slba);
528 const unsigned int op = REQ_OP_ZONE_APPEND | REQ_SYNC | REQ_IDLE;
529 u16 status = NVME_SC_SUCCESS;
530 unsigned int total_len = 0;
531 struct scatterlist *sg;
535 /* Request is completed on len mismatch in nvmet_check_transter_len() */
536 if (!nvmet_check_transfer_len(req, nvmet_rw_data_len(req)))
540 nvmet_req_complete(req, 0);
544 if (sect >= get_capacity(req->ns->bdev->bd_disk)) {
545 req->error_loc = offsetof(struct nvme_rw_command, slba);
546 status = NVME_SC_LBA_RANGE | NVME_SC_DNR;
550 if (sect & (bdev_zone_sectors(req->ns->bdev) - 1)) {
551 req->error_loc = offsetof(struct nvme_rw_command, slba);
552 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
556 if (nvmet_use_inline_bvec(req)) {
557 bio = &req->z.inline_bio;
558 bio_init(bio, req->ns->bdev, req->inline_bvec,
559 ARRAY_SIZE(req->inline_bvec), op);
561 bio = bio_alloc(req->ns->bdev, req->sg_cnt, op, GFP_KERNEL);
564 bio->bi_end_io = nvmet_bdev_zone_append_bio_done;
565 bio->bi_iter.bi_sector = sect;
566 bio->bi_private = req;
567 if (req->cmd->rw.control & cpu_to_le16(NVME_RW_FUA))
568 bio->bi_opf |= REQ_FUA;
570 for_each_sg(req->sg, sg, req->sg_cnt, sg_cnt) {
571 struct page *p = sg_page(sg);
572 unsigned int l = sg->length;
573 unsigned int o = sg->offset;
576 ret = bio_add_zone_append_page(bio, p, l, o);
577 if (ret != sg->length) {
578 status = NVME_SC_INTERNAL;
581 total_len += sg->length;
584 if (total_len != nvmet_rw_data_len(req)) {
585 status = NVME_SC_INTERNAL | NVME_SC_DNR;
593 nvmet_req_bio_put(req, bio);
595 nvmet_req_complete(req, status);
598 u16 nvmet_bdev_zns_parse_io_cmd(struct nvmet_req *req)
600 struct nvme_command *cmd = req->cmd;
602 switch (cmd->common.opcode) {
603 case nvme_cmd_zone_append:
604 req->execute = nvmet_bdev_execute_zone_append;
606 case nvme_cmd_zone_mgmt_recv:
607 req->execute = nvmet_bdev_execute_zone_mgmt_recv;
609 case nvme_cmd_zone_mgmt_send:
610 req->execute = nvmet_bdev_execute_zone_mgmt_send;
613 return nvmet_bdev_parse_io_cmd(req);