1 // SPDX-License-Identifier: GPL-2.0
3 * NVMe admin command implementation.
4 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 #include <linux/module.h>
8 #include <linux/rculist.h>
9 #include <linux/part_stat.h>
11 #include <generated/utsrelease.h>
12 #include <asm/unaligned.h>
15 u32 nvmet_get_log_page_len(struct nvme_command *cmd)
17 u32 len = le16_to_cpu(cmd->get_log_page.numdu);
20 len += le16_to_cpu(cmd->get_log_page.numdl);
21 /* NUMD is a 0's based value */
28 static u32 nvmet_feat_data_len(struct nvmet_req *req, u32 cdw10)
30 switch (cdw10 & 0xff) {
31 case NVME_FEAT_HOST_ID:
32 return sizeof(req->sq->ctrl->hostid);
38 u64 nvmet_get_log_page_offset(struct nvme_command *cmd)
40 return le64_to_cpu(cmd->get_log_page.lpo);
43 static void nvmet_execute_get_log_page_noop(struct nvmet_req *req)
45 nvmet_req_complete(req, nvmet_zero_sgl(req, 0, req->transfer_len));
48 static void nvmet_execute_get_log_page_error(struct nvmet_req *req)
50 struct nvmet_ctrl *ctrl = req->sq->ctrl;
56 spin_lock_irqsave(&ctrl->error_lock, flags);
57 slot = ctrl->err_counter % NVMET_ERROR_LOG_SLOTS;
59 for (i = 0; i < NVMET_ERROR_LOG_SLOTS; i++) {
60 if (nvmet_copy_to_sgl(req, offset, &ctrl->slots[slot],
61 sizeof(struct nvme_error_slot)))
65 slot = NVMET_ERROR_LOG_SLOTS - 1;
68 offset += sizeof(struct nvme_error_slot);
70 spin_unlock_irqrestore(&ctrl->error_lock, flags);
71 nvmet_req_complete(req, 0);
74 static u16 nvmet_get_smart_log_nsid(struct nvmet_req *req,
75 struct nvme_smart_log *slog)
77 u64 host_reads, host_writes, data_units_read, data_units_written;
79 req->ns = nvmet_find_namespace(req->sq->ctrl,
80 req->cmd->get_log_page.nsid);
82 pr_err("Could not find namespace id : %d\n",
83 le32_to_cpu(req->cmd->get_log_page.nsid));
84 req->error_loc = offsetof(struct nvme_rw_command, nsid);
85 return NVME_SC_INVALID_NS;
88 /* we don't have the right data for file backed ns */
90 return NVME_SC_SUCCESS;
92 host_reads = part_stat_read(req->ns->bdev, ios[READ]);
94 DIV_ROUND_UP(part_stat_read(req->ns->bdev, sectors[READ]), 1000);
95 host_writes = part_stat_read(req->ns->bdev, ios[WRITE]);
97 DIV_ROUND_UP(part_stat_read(req->ns->bdev, sectors[WRITE]), 1000);
99 put_unaligned_le64(host_reads, &slog->host_reads[0]);
100 put_unaligned_le64(data_units_read, &slog->data_units_read[0]);
101 put_unaligned_le64(host_writes, &slog->host_writes[0]);
102 put_unaligned_le64(data_units_written, &slog->data_units_written[0]);
104 return NVME_SC_SUCCESS;
107 static u16 nvmet_get_smart_log_all(struct nvmet_req *req,
108 struct nvme_smart_log *slog)
110 u64 host_reads = 0, host_writes = 0;
111 u64 data_units_read = 0, data_units_written = 0;
113 struct nvmet_ctrl *ctrl;
116 ctrl = req->sq->ctrl;
117 xa_for_each(&ctrl->subsys->namespaces, idx, ns) {
118 /* we don't have the right data for file backed ns */
121 host_reads += part_stat_read(ns->bdev, ios[READ]);
122 data_units_read += DIV_ROUND_UP(
123 part_stat_read(ns->bdev, sectors[READ]), 1000);
124 host_writes += part_stat_read(ns->bdev, ios[WRITE]);
125 data_units_written += DIV_ROUND_UP(
126 part_stat_read(ns->bdev, sectors[WRITE]), 1000);
129 put_unaligned_le64(host_reads, &slog->host_reads[0]);
130 put_unaligned_le64(data_units_read, &slog->data_units_read[0]);
131 put_unaligned_le64(host_writes, &slog->host_writes[0]);
132 put_unaligned_le64(data_units_written, &slog->data_units_written[0]);
134 return NVME_SC_SUCCESS;
137 static void nvmet_execute_get_log_page_smart(struct nvmet_req *req)
139 struct nvme_smart_log *log;
140 u16 status = NVME_SC_INTERNAL;
143 if (req->transfer_len != sizeof(*log))
146 log = kzalloc(sizeof(*log), GFP_KERNEL);
150 if (req->cmd->get_log_page.nsid == cpu_to_le32(NVME_NSID_ALL))
151 status = nvmet_get_smart_log_all(req, log);
153 status = nvmet_get_smart_log_nsid(req, log);
157 spin_lock_irqsave(&req->sq->ctrl->error_lock, flags);
158 put_unaligned_le64(req->sq->ctrl->err_counter,
159 &log->num_err_log_entries);
160 spin_unlock_irqrestore(&req->sq->ctrl->error_lock, flags);
162 status = nvmet_copy_to_sgl(req, 0, log, sizeof(*log));
166 nvmet_req_complete(req, status);
169 static void nvmet_execute_get_log_cmd_effects_ns(struct nvmet_req *req)
171 u16 status = NVME_SC_INTERNAL;
172 struct nvme_effects_log *log;
174 log = kzalloc(sizeof(*log), GFP_KERNEL);
178 log->acs[nvme_admin_get_log_page] = cpu_to_le32(1 << 0);
179 log->acs[nvme_admin_identify] = cpu_to_le32(1 << 0);
180 log->acs[nvme_admin_abort_cmd] = cpu_to_le32(1 << 0);
181 log->acs[nvme_admin_set_features] = cpu_to_le32(1 << 0);
182 log->acs[nvme_admin_get_features] = cpu_to_le32(1 << 0);
183 log->acs[nvme_admin_async_event] = cpu_to_le32(1 << 0);
184 log->acs[nvme_admin_keep_alive] = cpu_to_le32(1 << 0);
186 log->iocs[nvme_cmd_read] = cpu_to_le32(1 << 0);
187 log->iocs[nvme_cmd_write] = cpu_to_le32(1 << 0);
188 log->iocs[nvme_cmd_flush] = cpu_to_le32(1 << 0);
189 log->iocs[nvme_cmd_dsm] = cpu_to_le32(1 << 0);
190 log->iocs[nvme_cmd_write_zeroes] = cpu_to_le32(1 << 0);
192 status = nvmet_copy_to_sgl(req, 0, log, sizeof(*log));
196 nvmet_req_complete(req, status);
199 static void nvmet_execute_get_log_changed_ns(struct nvmet_req *req)
201 struct nvmet_ctrl *ctrl = req->sq->ctrl;
202 u16 status = NVME_SC_INTERNAL;
205 if (req->transfer_len != NVME_MAX_CHANGED_NAMESPACES * sizeof(__le32))
208 mutex_lock(&ctrl->lock);
209 if (ctrl->nr_changed_ns == U32_MAX)
210 len = sizeof(__le32);
212 len = ctrl->nr_changed_ns * sizeof(__le32);
213 status = nvmet_copy_to_sgl(req, 0, ctrl->changed_ns_list, len);
215 status = nvmet_zero_sgl(req, len, req->transfer_len - len);
216 ctrl->nr_changed_ns = 0;
217 nvmet_clear_aen_bit(req, NVME_AEN_BIT_NS_ATTR);
218 mutex_unlock(&ctrl->lock);
220 nvmet_req_complete(req, status);
223 static u32 nvmet_format_ana_group(struct nvmet_req *req, u32 grpid,
224 struct nvme_ana_group_desc *desc)
226 struct nvmet_ctrl *ctrl = req->sq->ctrl;
231 if (!(req->cmd->get_log_page.lsp & NVME_ANA_LOG_RGO)) {
232 xa_for_each(&ctrl->subsys->namespaces, idx, ns)
233 if (ns->anagrpid == grpid)
234 desc->nsids[count++] = cpu_to_le32(ns->nsid);
237 desc->grpid = cpu_to_le32(grpid);
238 desc->nnsids = cpu_to_le32(count);
239 desc->chgcnt = cpu_to_le64(nvmet_ana_chgcnt);
240 desc->state = req->port->ana_state[grpid];
241 memset(desc->rsvd17, 0, sizeof(desc->rsvd17));
242 return sizeof(struct nvme_ana_group_desc) + count * sizeof(__le32);
245 static void nvmet_execute_get_log_page_ana(struct nvmet_req *req)
247 struct nvme_ana_rsp_hdr hdr = { 0, };
248 struct nvme_ana_group_desc *desc;
249 size_t offset = sizeof(struct nvme_ana_rsp_hdr); /* start beyond hdr */
255 status = NVME_SC_INTERNAL;
256 desc = kmalloc(sizeof(struct nvme_ana_group_desc) +
257 NVMET_MAX_NAMESPACES * sizeof(__le32), GFP_KERNEL);
261 down_read(&nvmet_ana_sem);
262 for (grpid = 1; grpid <= NVMET_MAX_ANAGRPS; grpid++) {
263 if (!nvmet_ana_group_enabled[grpid])
265 len = nvmet_format_ana_group(req, grpid, desc);
266 status = nvmet_copy_to_sgl(req, offset, desc, len);
272 for ( ; grpid <= NVMET_MAX_ANAGRPS; grpid++) {
273 if (nvmet_ana_group_enabled[grpid])
277 hdr.chgcnt = cpu_to_le64(nvmet_ana_chgcnt);
278 hdr.ngrps = cpu_to_le16(ngrps);
279 nvmet_clear_aen_bit(req, NVME_AEN_BIT_ANA_CHANGE);
280 up_read(&nvmet_ana_sem);
284 /* copy the header last once we know the number of groups */
285 status = nvmet_copy_to_sgl(req, 0, &hdr, sizeof(hdr));
287 nvmet_req_complete(req, status);
290 static void nvmet_execute_get_log_page(struct nvmet_req *req)
292 if (!nvmet_check_transfer_len(req, nvmet_get_log_page_len(req->cmd)))
295 switch (req->cmd->get_log_page.lid) {
297 return nvmet_execute_get_log_page_error(req);
299 return nvmet_execute_get_log_page_smart(req);
300 case NVME_LOG_FW_SLOT:
302 * We only support a single firmware slot which always is
303 * active, so we can zero out the whole firmware slot log and
304 * still claim to fully implement this mandatory log page.
306 return nvmet_execute_get_log_page_noop(req);
307 case NVME_LOG_CHANGED_NS:
308 return nvmet_execute_get_log_changed_ns(req);
309 case NVME_LOG_CMD_EFFECTS:
310 return nvmet_execute_get_log_cmd_effects_ns(req);
312 return nvmet_execute_get_log_page_ana(req);
314 pr_err("unhandled lid %d on qid %d\n",
315 req->cmd->get_log_page.lid, req->sq->qid);
316 req->error_loc = offsetof(struct nvme_get_log_page_command, lid);
317 nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_SC_DNR);
320 static void nvmet_id_set_model_number(struct nvme_id_ctrl *id,
321 struct nvmet_subsys *subsys)
323 const char *model = NVMET_DEFAULT_CTRL_MODEL;
324 struct nvmet_subsys_model *subsys_model;
327 subsys_model = rcu_dereference(subsys->model);
329 model = subsys_model->number;
330 memcpy_and_pad(id->mn, sizeof(id->mn), model, strlen(model), ' ');
334 static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
336 struct nvmet_ctrl *ctrl = req->sq->ctrl;
337 struct nvme_id_ctrl *id;
338 u32 cmd_capsule_size;
341 id = kzalloc(sizeof(*id), GFP_KERNEL);
343 status = NVME_SC_INTERNAL;
347 /* XXX: figure out how to assign real vendors IDs. */
351 memset(id->sn, ' ', sizeof(id->sn));
352 bin2hex(id->sn, &ctrl->subsys->serial,
353 min(sizeof(ctrl->subsys->serial), sizeof(id->sn) / 2));
354 nvmet_id_set_model_number(id, ctrl->subsys);
355 memcpy_and_pad(id->fr, sizeof(id->fr),
356 UTS_RELEASE, strlen(UTS_RELEASE), ' ');
361 * XXX: figure out how we can assign a IEEE OUI, but until then
362 * the safest is to leave it as zeroes.
365 /* we support multiple ports, multiples hosts and ANA: */
366 id->cmic = (1 << 0) | (1 << 1) | (1 << 3);
368 /* Limit MDTS according to transport capability */
369 if (ctrl->ops->get_mdts)
370 id->mdts = ctrl->ops->get_mdts(ctrl);
374 id->cntlid = cpu_to_le16(ctrl->cntlid);
375 id->ver = cpu_to_le32(ctrl->subsys->ver);
377 /* XXX: figure out what to do about RTD3R/RTD3 */
378 id->oaes = cpu_to_le32(NVMET_AEN_CFG_OPTIONAL);
379 id->ctratt = cpu_to_le32(NVME_CTRL_ATTR_HID_128_BIT |
380 NVME_CTRL_ATTR_TBKAS);
385 * We don't really have a practical limit on the number of abort
386 * comands. But we don't do anything useful for abort either, so
387 * no point in allowing more abort commands than the spec requires.
391 id->aerl = NVMET_ASYNC_EVENTS - 1;
393 /* first slot is read-only, only one slot supported */
394 id->frmw = (1 << 0) | (1 << 1);
395 id->lpa = (1 << 0) | (1 << 1) | (1 << 2);
396 id->elpe = NVMET_ERROR_LOG_SLOTS - 1;
399 /* We support keep-alive timeout in granularity of seconds */
400 id->kas = cpu_to_le16(NVMET_KAS);
402 id->sqes = (0x6 << 4) | 0x6;
403 id->cqes = (0x4 << 4) | 0x4;
405 /* no enforcement soft-limit for maxcmd - pick arbitrary high value */
406 id->maxcmd = cpu_to_le16(NVMET_MAX_CMD);
408 id->nn = cpu_to_le32(ctrl->subsys->max_nsid);
409 id->mnan = cpu_to_le32(NVMET_MAX_NAMESPACES);
410 id->oncs = cpu_to_le16(NVME_CTRL_ONCS_DSM |
411 NVME_CTRL_ONCS_WRITE_ZEROES);
413 /* XXX: don't report vwc if the underlying device is write through */
414 id->vwc = NVME_CTRL_VWC_PRESENT;
417 * We can't support atomic writes bigger than a LBA without support
418 * from the backend device.
423 id->sgls = cpu_to_le32(1 << 0); /* we always support SGLs */
424 if (ctrl->ops->flags & NVMF_KEYED_SGLS)
425 id->sgls |= cpu_to_le32(1 << 2);
426 if (req->port->inline_data_size)
427 id->sgls |= cpu_to_le32(1 << 20);
429 strlcpy(id->subnqn, ctrl->subsys->subsysnqn, sizeof(id->subnqn));
432 * Max command capsule size is sqe + in-capsule data size.
433 * Disable in-capsule data for Metadata capable controllers.
435 cmd_capsule_size = sizeof(struct nvme_command);
436 if (!ctrl->pi_support)
437 cmd_capsule_size += req->port->inline_data_size;
438 id->ioccsz = cpu_to_le32(cmd_capsule_size / 16);
440 /* Max response capsule size is cqe */
441 id->iorcsz = cpu_to_le32(sizeof(struct nvme_completion) / 16);
443 id->msdbd = ctrl->ops->msdbd;
445 id->anacap = (1 << 0) | (1 << 1) | (1 << 2) | (1 << 3) | (1 << 4);
446 id->anatt = 10; /* random value */
447 id->anagrpmax = cpu_to_le32(NVMET_MAX_ANAGRPS);
448 id->nanagrpid = cpu_to_le32(NVMET_MAX_ANAGRPS);
451 * Meh, we don't really support any power state. Fake up the same
452 * values that qemu does.
454 id->psd[0].max_power = cpu_to_le16(0x9c4);
455 id->psd[0].entry_lat = cpu_to_le32(0x10);
456 id->psd[0].exit_lat = cpu_to_le32(0x4);
458 id->nwpc = 1 << 0; /* write protect and no write protect */
460 status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
464 nvmet_req_complete(req, status);
467 static void nvmet_execute_identify_ns(struct nvmet_req *req)
469 struct nvmet_ctrl *ctrl = req->sq->ctrl;
471 struct nvme_id_ns *id;
474 if (le32_to_cpu(req->cmd->identify.nsid) == NVME_NSID_ALL) {
475 req->error_loc = offsetof(struct nvme_identify, nsid);
476 status = NVME_SC_INVALID_NS | NVME_SC_DNR;
480 id = kzalloc(sizeof(*id), GFP_KERNEL);
482 status = NVME_SC_INTERNAL;
486 /* return an all zeroed buffer if we can't find an active namespace */
487 ns = nvmet_find_namespace(ctrl, req->cmd->identify.nsid);
489 status = NVME_SC_INVALID_NS;
493 nvmet_ns_revalidate(ns);
496 * nuse = ncap = nsze isn't always true, but we have no way to find
497 * that out from the underlying device.
499 id->ncap = id->nsze = cpu_to_le64(ns->size >> ns->blksize_shift);
500 switch (req->port->ana_state[ns->anagrpid]) {
501 case NVME_ANA_INACCESSIBLE:
502 case NVME_ANA_PERSISTENT_LOSS:
510 nvmet_bdev_set_limits(ns->bdev, id);
513 * We just provide a single LBA format that matches what the
514 * underlying device reports.
520 * Our namespace might always be shared. Not just with other
521 * controllers, but also with any other user of the block device.
524 id->anagrpid = cpu_to_le32(ns->anagrpid);
526 memcpy(&id->nguid, &ns->nguid, sizeof(id->nguid));
528 id->lbaf[0].ds = ns->blksize_shift;
530 if (ctrl->pi_support && nvmet_ns_has_pi(ns)) {
531 id->dpc = NVME_NS_DPC_PI_FIRST | NVME_NS_DPC_PI_LAST |
532 NVME_NS_DPC_PI_TYPE1 | NVME_NS_DPC_PI_TYPE2 |
533 NVME_NS_DPC_PI_TYPE3;
534 id->mc = NVME_MC_EXTENDED_LBA;
535 id->dps = ns->pi_type;
536 id->flbas = NVME_NS_FLBAS_META_EXT;
537 id->lbaf[0].ms = cpu_to_le16(ns->metadata_size);
541 id->nsattr |= (1 << 0);
542 nvmet_put_namespace(ns);
545 status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
549 nvmet_req_complete(req, status);
552 static void nvmet_execute_identify_nslist(struct nvmet_req *req)
554 static const int buf_size = NVME_IDENTIFY_DATA_SIZE;
555 struct nvmet_ctrl *ctrl = req->sq->ctrl;
558 u32 min_nsid = le32_to_cpu(req->cmd->identify.nsid);
563 list = kzalloc(buf_size, GFP_KERNEL);
565 status = NVME_SC_INTERNAL;
569 xa_for_each(&ctrl->subsys->namespaces, idx, ns) {
570 if (ns->nsid <= min_nsid)
572 list[i++] = cpu_to_le32(ns->nsid);
573 if (i == buf_size / sizeof(__le32))
577 status = nvmet_copy_to_sgl(req, 0, list, buf_size);
581 nvmet_req_complete(req, status);
584 static u16 nvmet_copy_ns_identifier(struct nvmet_req *req, u8 type, u8 len,
585 void *id, off_t *off)
587 struct nvme_ns_id_desc desc = {
593 status = nvmet_copy_to_sgl(req, *off, &desc, sizeof(desc));
596 *off += sizeof(desc);
598 status = nvmet_copy_to_sgl(req, *off, id, len);
606 static void nvmet_execute_identify_desclist(struct nvmet_req *req)
611 req->ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->identify.nsid);
613 req->error_loc = offsetof(struct nvme_identify, nsid);
614 status = NVME_SC_INVALID_NS | NVME_SC_DNR;
618 if (memchr_inv(&req->ns->uuid, 0, sizeof(req->ns->uuid))) {
619 status = nvmet_copy_ns_identifier(req, NVME_NIDT_UUID,
621 &req->ns->uuid, &off);
625 if (memchr_inv(req->ns->nguid, 0, sizeof(req->ns->nguid))) {
626 status = nvmet_copy_ns_identifier(req, NVME_NIDT_NGUID,
628 &req->ns->nguid, &off);
633 if (sg_zero_buffer(req->sg, req->sg_cnt, NVME_IDENTIFY_DATA_SIZE - off,
634 off) != NVME_IDENTIFY_DATA_SIZE - off)
635 status = NVME_SC_INTERNAL | NVME_SC_DNR;
638 nvmet_req_complete(req, status);
641 static void nvmet_execute_identify(struct nvmet_req *req)
643 if (!nvmet_check_transfer_len(req, NVME_IDENTIFY_DATA_SIZE))
646 switch (req->cmd->identify.cns) {
648 return nvmet_execute_identify_ns(req);
649 case NVME_ID_CNS_CTRL:
650 return nvmet_execute_identify_ctrl(req);
651 case NVME_ID_CNS_NS_ACTIVE_LIST:
652 return nvmet_execute_identify_nslist(req);
653 case NVME_ID_CNS_NS_DESC_LIST:
654 return nvmet_execute_identify_desclist(req);
657 pr_err("unhandled identify cns %d on qid %d\n",
658 req->cmd->identify.cns, req->sq->qid);
659 req->error_loc = offsetof(struct nvme_identify, cns);
660 nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_SC_DNR);
664 * A "minimum viable" abort implementation: the command is mandatory in the
665 * spec, but we are not required to do any useful work. We couldn't really
666 * do a useful abort, so don't bother even with waiting for the command
667 * to be exectuted and return immediately telling the command to abort
670 static void nvmet_execute_abort(struct nvmet_req *req)
672 if (!nvmet_check_transfer_len(req, 0))
674 nvmet_set_result(req, 1);
675 nvmet_req_complete(req, 0);
678 static u16 nvmet_write_protect_flush_sync(struct nvmet_req *req)
683 status = nvmet_file_flush(req);
685 status = nvmet_bdev_flush(req);
688 pr_err("write protect flush failed nsid: %u\n", req->ns->nsid);
692 static u16 nvmet_set_feat_write_protect(struct nvmet_req *req)
694 u32 write_protect = le32_to_cpu(req->cmd->common.cdw11);
695 struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
696 u16 status = NVME_SC_FEATURE_NOT_CHANGEABLE;
698 req->ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->rw.nsid);
699 if (unlikely(!req->ns)) {
700 req->error_loc = offsetof(struct nvme_common_command, nsid);
704 mutex_lock(&subsys->lock);
705 switch (write_protect) {
706 case NVME_NS_WRITE_PROTECT:
707 req->ns->readonly = true;
708 status = nvmet_write_protect_flush_sync(req);
710 req->ns->readonly = false;
712 case NVME_NS_NO_WRITE_PROTECT:
713 req->ns->readonly = false;
721 nvmet_ns_changed(subsys, req->ns->nsid);
722 mutex_unlock(&subsys->lock);
726 u16 nvmet_set_feat_kato(struct nvmet_req *req)
728 u32 val32 = le32_to_cpu(req->cmd->common.cdw11);
730 nvmet_stop_keep_alive_timer(req->sq->ctrl);
731 req->sq->ctrl->kato = DIV_ROUND_UP(val32, 1000);
732 nvmet_start_keep_alive_timer(req->sq->ctrl);
734 nvmet_set_result(req, req->sq->ctrl->kato);
739 u16 nvmet_set_feat_async_event(struct nvmet_req *req, u32 mask)
741 u32 val32 = le32_to_cpu(req->cmd->common.cdw11);
744 req->error_loc = offsetof(struct nvme_common_command, cdw11);
745 return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
748 WRITE_ONCE(req->sq->ctrl->aen_enabled, val32);
749 nvmet_set_result(req, val32);
754 void nvmet_execute_set_features(struct nvmet_req *req)
756 struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
757 u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
758 u32 cdw11 = le32_to_cpu(req->cmd->common.cdw11);
763 if (!nvmet_check_transfer_len(req, 0))
766 switch (cdw10 & 0xff) {
767 case NVME_FEAT_NUM_QUEUES:
768 ncqr = (cdw11 >> 16) & 0xffff;
769 nsqr = cdw11 & 0xffff;
770 if (ncqr == 0xffff || nsqr == 0xffff) {
771 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
774 nvmet_set_result(req,
775 (subsys->max_qid - 1) | ((subsys->max_qid - 1) << 16));
778 status = nvmet_set_feat_kato(req);
780 case NVME_FEAT_ASYNC_EVENT:
781 status = nvmet_set_feat_async_event(req, NVMET_AEN_CFG_ALL);
783 case NVME_FEAT_HOST_ID:
784 status = NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
786 case NVME_FEAT_WRITE_PROTECT:
787 status = nvmet_set_feat_write_protect(req);
790 req->error_loc = offsetof(struct nvme_common_command, cdw10);
791 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
795 nvmet_req_complete(req, status);
798 static u16 nvmet_get_feat_write_protect(struct nvmet_req *req)
800 struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
803 req->ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->common.nsid);
805 req->error_loc = offsetof(struct nvme_common_command, nsid);
806 return NVME_SC_INVALID_NS | NVME_SC_DNR;
808 mutex_lock(&subsys->lock);
809 if (req->ns->readonly == true)
810 result = NVME_NS_WRITE_PROTECT;
812 result = NVME_NS_NO_WRITE_PROTECT;
813 nvmet_set_result(req, result);
814 mutex_unlock(&subsys->lock);
819 void nvmet_get_feat_kato(struct nvmet_req *req)
821 nvmet_set_result(req, req->sq->ctrl->kato * 1000);
824 void nvmet_get_feat_async_event(struct nvmet_req *req)
826 nvmet_set_result(req, READ_ONCE(req->sq->ctrl->aen_enabled));
829 void nvmet_execute_get_features(struct nvmet_req *req)
831 struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
832 u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
835 if (!nvmet_check_transfer_len(req, nvmet_feat_data_len(req, cdw10)))
838 switch (cdw10 & 0xff) {
840 * These features are mandatory in the spec, but we don't
841 * have a useful way to implement them. We'll eventually
842 * need to come up with some fake values for these.
845 case NVME_FEAT_ARBITRATION:
847 case NVME_FEAT_POWER_MGMT:
849 case NVME_FEAT_TEMP_THRESH:
851 case NVME_FEAT_ERR_RECOVERY:
853 case NVME_FEAT_IRQ_COALESCE:
855 case NVME_FEAT_IRQ_CONFIG:
857 case NVME_FEAT_WRITE_ATOMIC:
860 case NVME_FEAT_ASYNC_EVENT:
861 nvmet_get_feat_async_event(req);
863 case NVME_FEAT_VOLATILE_WC:
864 nvmet_set_result(req, 1);
866 case NVME_FEAT_NUM_QUEUES:
867 nvmet_set_result(req,
868 (subsys->max_qid-1) | ((subsys->max_qid-1) << 16));
871 nvmet_get_feat_kato(req);
873 case NVME_FEAT_HOST_ID:
874 /* need 128-bit host identifier flag */
875 if (!(req->cmd->common.cdw11 & cpu_to_le32(1 << 0))) {
877 offsetof(struct nvme_common_command, cdw11);
878 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
882 status = nvmet_copy_to_sgl(req, 0, &req->sq->ctrl->hostid,
883 sizeof(req->sq->ctrl->hostid));
885 case NVME_FEAT_WRITE_PROTECT:
886 status = nvmet_get_feat_write_protect(req);
890 offsetof(struct nvme_common_command, cdw10);
891 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
895 nvmet_req_complete(req, status);
898 void nvmet_execute_async_event(struct nvmet_req *req)
900 struct nvmet_ctrl *ctrl = req->sq->ctrl;
902 if (!nvmet_check_transfer_len(req, 0))
905 mutex_lock(&ctrl->lock);
906 if (ctrl->nr_async_event_cmds >= NVMET_ASYNC_EVENTS) {
907 mutex_unlock(&ctrl->lock);
908 nvmet_req_complete(req, NVME_SC_ASYNC_LIMIT | NVME_SC_DNR);
911 ctrl->async_event_cmds[ctrl->nr_async_event_cmds++] = req;
912 mutex_unlock(&ctrl->lock);
914 schedule_work(&ctrl->async_event_work);
917 void nvmet_execute_keep_alive(struct nvmet_req *req)
919 struct nvmet_ctrl *ctrl = req->sq->ctrl;
921 if (!nvmet_check_transfer_len(req, 0))
924 pr_debug("ctrl %d update keep-alive timer for %d secs\n",
925 ctrl->cntlid, ctrl->kato);
927 mod_delayed_work(system_wq, &ctrl->ka_work, ctrl->kato * HZ);
928 nvmet_req_complete(req, 0);
931 u16 nvmet_parse_admin_cmd(struct nvmet_req *req)
933 struct nvme_command *cmd = req->cmd;
936 if (nvme_is_fabrics(cmd))
937 return nvmet_parse_fabrics_cmd(req);
938 if (req->sq->ctrl->subsys->type == NVME_NQN_DISC)
939 return nvmet_parse_discovery_cmd(req);
941 ret = nvmet_check_ctrl_status(req, cmd);
945 if (nvmet_req_passthru_ctrl(req))
946 return nvmet_parse_passthru_admin_cmd(req);
948 switch (cmd->common.opcode) {
949 case nvme_admin_get_log_page:
950 req->execute = nvmet_execute_get_log_page;
952 case nvme_admin_identify:
953 req->execute = nvmet_execute_identify;
955 case nvme_admin_abort_cmd:
956 req->execute = nvmet_execute_abort;
958 case nvme_admin_set_features:
959 req->execute = nvmet_execute_set_features;
961 case nvme_admin_get_features:
962 req->execute = nvmet_execute_get_features;
964 case nvme_admin_async_event:
965 req->execute = nvmet_execute_async_event;
967 case nvme_admin_keep_alive:
968 req->execute = nvmet_execute_keep_alive;
972 pr_err("unhandled cmd %d on qid %d\n", cmd->common.opcode,
974 req->error_loc = offsetof(struct nvme_common_command, opcode);
975 return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;