1 // SPDX-License-Identifier: GPL-2.0
3 * NVMe admin command implementation.
4 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 #include <linux/module.h>
8 #include <linux/rculist.h>
10 #include <generated/utsrelease.h>
11 #include <asm/unaligned.h>
14 u32 nvmet_get_log_page_len(struct nvme_command *cmd)
16 u32 len = le16_to_cpu(cmd->get_log_page.numdu);
19 len += le16_to_cpu(cmd->get_log_page.numdl);
20 /* NUMD is a 0's based value */
27 static u32 nvmet_feat_data_len(struct nvmet_req *req, u32 cdw10)
29 switch (cdw10 & 0xff) {
30 case NVME_FEAT_HOST_ID:
31 return sizeof(req->sq->ctrl->hostid);
37 u64 nvmet_get_log_page_offset(struct nvme_command *cmd)
39 return le64_to_cpu(cmd->get_log_page.lpo);
42 static void nvmet_execute_get_log_page_noop(struct nvmet_req *req)
44 nvmet_req_complete(req, nvmet_zero_sgl(req, 0, req->transfer_len));
47 static void nvmet_execute_get_log_page_error(struct nvmet_req *req)
49 struct nvmet_ctrl *ctrl = req->sq->ctrl;
55 spin_lock_irqsave(&ctrl->error_lock, flags);
56 slot = ctrl->err_counter % NVMET_ERROR_LOG_SLOTS;
58 for (i = 0; i < NVMET_ERROR_LOG_SLOTS; i++) {
59 if (nvmet_copy_to_sgl(req, offset, &ctrl->slots[slot],
60 sizeof(struct nvme_error_slot)))
64 slot = NVMET_ERROR_LOG_SLOTS - 1;
67 offset += sizeof(struct nvme_error_slot);
69 spin_unlock_irqrestore(&ctrl->error_lock, flags);
70 nvmet_req_complete(req, 0);
73 static u16 nvmet_get_smart_log_nsid(struct nvmet_req *req,
74 struct nvme_smart_log *slog)
77 u64 host_reads, host_writes, data_units_read, data_units_written;
79 ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->get_log_page.nsid);
81 pr_err("Could not find namespace id : %d\n",
82 le32_to_cpu(req->cmd->get_log_page.nsid));
83 req->error_loc = offsetof(struct nvme_rw_command, nsid);
84 return NVME_SC_INVALID_NS;
87 /* we don't have the right data for file backed ns */
91 host_reads = part_stat_read(ns->bdev->bd_part, ios[READ]);
92 data_units_read = DIV_ROUND_UP(part_stat_read(ns->bdev->bd_part,
93 sectors[READ]), 1000);
94 host_writes = part_stat_read(ns->bdev->bd_part, ios[WRITE]);
95 data_units_written = DIV_ROUND_UP(part_stat_read(ns->bdev->bd_part,
96 sectors[WRITE]), 1000);
98 put_unaligned_le64(host_reads, &slog->host_reads[0]);
99 put_unaligned_le64(data_units_read, &slog->data_units_read[0]);
100 put_unaligned_le64(host_writes, &slog->host_writes[0]);
101 put_unaligned_le64(data_units_written, &slog->data_units_written[0]);
103 nvmet_put_namespace(ns);
105 return NVME_SC_SUCCESS;
108 static u16 nvmet_get_smart_log_all(struct nvmet_req *req,
109 struct nvme_smart_log *slog)
111 u64 host_reads = 0, host_writes = 0;
112 u64 data_units_read = 0, data_units_written = 0;
114 struct nvmet_ctrl *ctrl;
116 ctrl = req->sq->ctrl;
119 list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link) {
120 /* we don't have the right data for file backed ns */
123 host_reads += part_stat_read(ns->bdev->bd_part, ios[READ]);
124 data_units_read += DIV_ROUND_UP(
125 part_stat_read(ns->bdev->bd_part, sectors[READ]), 1000);
126 host_writes += part_stat_read(ns->bdev->bd_part, ios[WRITE]);
127 data_units_written += DIV_ROUND_UP(
128 part_stat_read(ns->bdev->bd_part, sectors[WRITE]), 1000);
133 put_unaligned_le64(host_reads, &slog->host_reads[0]);
134 put_unaligned_le64(data_units_read, &slog->data_units_read[0]);
135 put_unaligned_le64(host_writes, &slog->host_writes[0]);
136 put_unaligned_le64(data_units_written, &slog->data_units_written[0]);
138 return NVME_SC_SUCCESS;
141 static void nvmet_execute_get_log_page_smart(struct nvmet_req *req)
143 struct nvme_smart_log *log;
144 u16 status = NVME_SC_INTERNAL;
147 if (req->transfer_len != sizeof(*log))
150 log = kzalloc(sizeof(*log), GFP_KERNEL);
154 if (req->cmd->get_log_page.nsid == cpu_to_le32(NVME_NSID_ALL))
155 status = nvmet_get_smart_log_all(req, log);
157 status = nvmet_get_smart_log_nsid(req, log);
161 spin_lock_irqsave(&req->sq->ctrl->error_lock, flags);
162 put_unaligned_le64(req->sq->ctrl->err_counter,
163 &log->num_err_log_entries);
164 spin_unlock_irqrestore(&req->sq->ctrl->error_lock, flags);
166 status = nvmet_copy_to_sgl(req, 0, log, sizeof(*log));
170 nvmet_req_complete(req, status);
173 static void nvmet_execute_get_log_cmd_effects_ns(struct nvmet_req *req)
175 u16 status = NVME_SC_INTERNAL;
176 struct nvme_effects_log *log;
178 log = kzalloc(sizeof(*log), GFP_KERNEL);
182 log->acs[nvme_admin_get_log_page] = cpu_to_le32(1 << 0);
183 log->acs[nvme_admin_identify] = cpu_to_le32(1 << 0);
184 log->acs[nvme_admin_abort_cmd] = cpu_to_le32(1 << 0);
185 log->acs[nvme_admin_set_features] = cpu_to_le32(1 << 0);
186 log->acs[nvme_admin_get_features] = cpu_to_le32(1 << 0);
187 log->acs[nvme_admin_async_event] = cpu_to_le32(1 << 0);
188 log->acs[nvme_admin_keep_alive] = cpu_to_le32(1 << 0);
190 log->iocs[nvme_cmd_read] = cpu_to_le32(1 << 0);
191 log->iocs[nvme_cmd_write] = cpu_to_le32(1 << 0);
192 log->iocs[nvme_cmd_flush] = cpu_to_le32(1 << 0);
193 log->iocs[nvme_cmd_dsm] = cpu_to_le32(1 << 0);
194 log->iocs[nvme_cmd_write_zeroes] = cpu_to_le32(1 << 0);
196 status = nvmet_copy_to_sgl(req, 0, log, sizeof(*log));
200 nvmet_req_complete(req, status);
203 static void nvmet_execute_get_log_changed_ns(struct nvmet_req *req)
205 struct nvmet_ctrl *ctrl = req->sq->ctrl;
206 u16 status = NVME_SC_INTERNAL;
209 if (req->transfer_len != NVME_MAX_CHANGED_NAMESPACES * sizeof(__le32))
212 mutex_lock(&ctrl->lock);
213 if (ctrl->nr_changed_ns == U32_MAX)
214 len = sizeof(__le32);
216 len = ctrl->nr_changed_ns * sizeof(__le32);
217 status = nvmet_copy_to_sgl(req, 0, ctrl->changed_ns_list, len);
219 status = nvmet_zero_sgl(req, len, req->transfer_len - len);
220 ctrl->nr_changed_ns = 0;
221 nvmet_clear_aen_bit(req, NVME_AEN_BIT_NS_ATTR);
222 mutex_unlock(&ctrl->lock);
224 nvmet_req_complete(req, status);
227 static u32 nvmet_format_ana_group(struct nvmet_req *req, u32 grpid,
228 struct nvme_ana_group_desc *desc)
230 struct nvmet_ctrl *ctrl = req->sq->ctrl;
234 if (!(req->cmd->get_log_page.lsp & NVME_ANA_LOG_RGO)) {
236 list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link)
237 if (ns->anagrpid == grpid)
238 desc->nsids[count++] = cpu_to_le32(ns->nsid);
242 desc->grpid = cpu_to_le32(grpid);
243 desc->nnsids = cpu_to_le32(count);
244 desc->chgcnt = cpu_to_le64(nvmet_ana_chgcnt);
245 desc->state = req->port->ana_state[grpid];
246 memset(desc->rsvd17, 0, sizeof(desc->rsvd17));
247 return sizeof(struct nvme_ana_group_desc) + count * sizeof(__le32);
250 static void nvmet_execute_get_log_page_ana(struct nvmet_req *req)
252 struct nvme_ana_rsp_hdr hdr = { 0, };
253 struct nvme_ana_group_desc *desc;
254 size_t offset = sizeof(struct nvme_ana_rsp_hdr); /* start beyond hdr */
260 status = NVME_SC_INTERNAL;
261 desc = kmalloc(sizeof(struct nvme_ana_group_desc) +
262 NVMET_MAX_NAMESPACES * sizeof(__le32), GFP_KERNEL);
266 down_read(&nvmet_ana_sem);
267 for (grpid = 1; grpid <= NVMET_MAX_ANAGRPS; grpid++) {
268 if (!nvmet_ana_group_enabled[grpid])
270 len = nvmet_format_ana_group(req, grpid, desc);
271 status = nvmet_copy_to_sgl(req, offset, desc, len);
277 for ( ; grpid <= NVMET_MAX_ANAGRPS; grpid++) {
278 if (nvmet_ana_group_enabled[grpid])
282 hdr.chgcnt = cpu_to_le64(nvmet_ana_chgcnt);
283 hdr.ngrps = cpu_to_le16(ngrps);
284 nvmet_clear_aen_bit(req, NVME_AEN_BIT_ANA_CHANGE);
285 up_read(&nvmet_ana_sem);
289 /* copy the header last once we know the number of groups */
290 status = nvmet_copy_to_sgl(req, 0, &hdr, sizeof(hdr));
292 nvmet_req_complete(req, status);
295 static void nvmet_execute_get_log_page(struct nvmet_req *req)
297 if (!nvmet_check_data_len(req, nvmet_get_log_page_len(req->cmd)))
300 switch (req->cmd->get_log_page.lid) {
302 return nvmet_execute_get_log_page_error(req);
304 return nvmet_execute_get_log_page_smart(req);
305 case NVME_LOG_FW_SLOT:
307 * We only support a single firmware slot which always is
308 * active, so we can zero out the whole firmware slot log and
309 * still claim to fully implement this mandatory log page.
311 return nvmet_execute_get_log_page_noop(req);
312 case NVME_LOG_CHANGED_NS:
313 return nvmet_execute_get_log_changed_ns(req);
314 case NVME_LOG_CMD_EFFECTS:
315 return nvmet_execute_get_log_cmd_effects_ns(req);
317 return nvmet_execute_get_log_page_ana(req);
319 pr_err("unhandled lid %d on qid %d\n",
320 req->cmd->get_log_page.lid, req->sq->qid);
321 req->error_loc = offsetof(struct nvme_get_log_page_command, lid);
322 nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_SC_DNR);
325 static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
327 struct nvmet_ctrl *ctrl = req->sq->ctrl;
328 struct nvme_id_ctrl *id;
330 const char model[] = "Linux";
332 id = kzalloc(sizeof(*id), GFP_KERNEL);
334 status = NVME_SC_INTERNAL;
338 /* XXX: figure out how to assign real vendors IDs. */
342 memset(id->sn, ' ', sizeof(id->sn));
343 bin2hex(id->sn, &ctrl->subsys->serial,
344 min(sizeof(ctrl->subsys->serial), sizeof(id->sn) / 2));
345 memcpy_and_pad(id->mn, sizeof(id->mn), model, sizeof(model) - 1, ' ');
346 memcpy_and_pad(id->fr, sizeof(id->fr),
347 UTS_RELEASE, strlen(UTS_RELEASE), ' ');
352 * XXX: figure out how we can assign a IEEE OUI, but until then
353 * the safest is to leave it as zeroes.
356 /* we support multiple ports, multiples hosts and ANA: */
357 id->cmic = (1 << 0) | (1 << 1) | (1 << 3);
359 /* no limit on data transfer sizes for now */
361 id->cntlid = cpu_to_le16(ctrl->cntlid);
362 id->ver = cpu_to_le32(ctrl->subsys->ver);
364 /* XXX: figure out what to do about RTD3R/RTD3 */
365 id->oaes = cpu_to_le32(NVMET_AEN_CFG_OPTIONAL);
366 id->ctratt = cpu_to_le32(NVME_CTRL_ATTR_HID_128_BIT |
367 NVME_CTRL_ATTR_TBKAS);
372 * We don't really have a practical limit on the number of abort
373 * comands. But we don't do anything useful for abort either, so
374 * no point in allowing more abort commands than the spec requires.
378 id->aerl = NVMET_ASYNC_EVENTS - 1;
380 /* first slot is read-only, only one slot supported */
381 id->frmw = (1 << 0) | (1 << 1);
382 id->lpa = (1 << 0) | (1 << 1) | (1 << 2);
383 id->elpe = NVMET_ERROR_LOG_SLOTS - 1;
386 /* We support keep-alive timeout in granularity of seconds */
387 id->kas = cpu_to_le16(NVMET_KAS);
389 id->sqes = (0x6 << 4) | 0x6;
390 id->cqes = (0x4 << 4) | 0x4;
392 /* no enforcement soft-limit for maxcmd - pick arbitrary high value */
393 id->maxcmd = cpu_to_le16(NVMET_MAX_CMD);
395 id->nn = cpu_to_le32(ctrl->subsys->max_nsid);
396 id->mnan = cpu_to_le32(NVMET_MAX_NAMESPACES);
397 id->oncs = cpu_to_le16(NVME_CTRL_ONCS_DSM |
398 NVME_CTRL_ONCS_WRITE_ZEROES);
400 /* XXX: don't report vwc if the underlying device is write through */
401 id->vwc = NVME_CTRL_VWC_PRESENT;
404 * We can't support atomic writes bigger than a LBA without support
405 * from the backend device.
410 id->sgls = cpu_to_le32(1 << 0); /* we always support SGLs */
411 if (ctrl->ops->has_keyed_sgls)
412 id->sgls |= cpu_to_le32(1 << 2);
413 if (req->port->inline_data_size)
414 id->sgls |= cpu_to_le32(1 << 20);
416 strlcpy(id->subnqn, ctrl->subsys->subsysnqn, sizeof(id->subnqn));
418 /* Max command capsule size is sqe + single page of in-capsule data */
419 id->ioccsz = cpu_to_le32((sizeof(struct nvme_command) +
420 req->port->inline_data_size) / 16);
421 /* Max response capsule size is cqe */
422 id->iorcsz = cpu_to_le32(sizeof(struct nvme_completion) / 16);
424 id->msdbd = ctrl->ops->msdbd;
426 id->anacap = (1 << 0) | (1 << 1) | (1 << 2) | (1 << 3) | (1 << 4);
427 id->anatt = 10; /* random value */
428 id->anagrpmax = cpu_to_le32(NVMET_MAX_ANAGRPS);
429 id->nanagrpid = cpu_to_le32(NVMET_MAX_ANAGRPS);
432 * Meh, we don't really support any power state. Fake up the same
433 * values that qemu does.
435 id->psd[0].max_power = cpu_to_le16(0x9c4);
436 id->psd[0].entry_lat = cpu_to_le32(0x10);
437 id->psd[0].exit_lat = cpu_to_le32(0x4);
439 id->nwpc = 1 << 0; /* write protect and no write protect */
441 status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
445 nvmet_req_complete(req, status);
448 static void nvmet_execute_identify_ns(struct nvmet_req *req)
451 struct nvme_id_ns *id;
454 if (le32_to_cpu(req->cmd->identify.nsid) == NVME_NSID_ALL) {
455 req->error_loc = offsetof(struct nvme_identify, nsid);
456 status = NVME_SC_INVALID_NS | NVME_SC_DNR;
460 id = kzalloc(sizeof(*id), GFP_KERNEL);
462 status = NVME_SC_INTERNAL;
466 /* return an all zeroed buffer if we can't find an active namespace */
467 ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->identify.nsid);
472 * nuse = ncap = nsze isn't always true, but we have no way to find
473 * that out from the underlying device.
475 id->ncap = id->nsze = cpu_to_le64(ns->size >> ns->blksize_shift);
476 switch (req->port->ana_state[ns->anagrpid]) {
477 case NVME_ANA_INACCESSIBLE:
478 case NVME_ANA_PERSISTENT_LOSS:
486 nvmet_bdev_set_limits(ns->bdev, id);
489 * We just provide a single LBA format that matches what the
490 * underlying device reports.
496 * Our namespace might always be shared. Not just with other
497 * controllers, but also with any other user of the block device.
500 id->anagrpid = cpu_to_le32(ns->anagrpid);
502 memcpy(&id->nguid, &ns->nguid, sizeof(id->nguid));
504 id->lbaf[0].ds = ns->blksize_shift;
507 id->nsattr |= (1 << 0);
508 nvmet_put_namespace(ns);
510 status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
513 nvmet_req_complete(req, status);
516 static void nvmet_execute_identify_nslist(struct nvmet_req *req)
518 static const int buf_size = NVME_IDENTIFY_DATA_SIZE;
519 struct nvmet_ctrl *ctrl = req->sq->ctrl;
521 u32 min_nsid = le32_to_cpu(req->cmd->identify.nsid);
526 list = kzalloc(buf_size, GFP_KERNEL);
528 status = NVME_SC_INTERNAL;
533 list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link) {
534 if (ns->nsid <= min_nsid)
536 list[i++] = cpu_to_le32(ns->nsid);
537 if (i == buf_size / sizeof(__le32))
542 status = nvmet_copy_to_sgl(req, 0, list, buf_size);
546 nvmet_req_complete(req, status);
549 static u16 nvmet_copy_ns_identifier(struct nvmet_req *req, u8 type, u8 len,
550 void *id, off_t *off)
552 struct nvme_ns_id_desc desc = {
558 status = nvmet_copy_to_sgl(req, *off, &desc, sizeof(desc));
561 *off += sizeof(desc);
563 status = nvmet_copy_to_sgl(req, *off, id, len);
571 static void nvmet_execute_identify_desclist(struct nvmet_req *req)
577 ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->identify.nsid);
579 req->error_loc = offsetof(struct nvme_identify, nsid);
580 status = NVME_SC_INVALID_NS | NVME_SC_DNR;
584 if (memchr_inv(&ns->uuid, 0, sizeof(ns->uuid))) {
585 status = nvmet_copy_ns_identifier(req, NVME_NIDT_UUID,
591 if (memchr_inv(ns->nguid, 0, sizeof(ns->nguid))) {
592 status = nvmet_copy_ns_identifier(req, NVME_NIDT_NGUID,
599 if (sg_zero_buffer(req->sg, req->sg_cnt, NVME_IDENTIFY_DATA_SIZE - off,
600 off) != NVME_IDENTIFY_DATA_SIZE - off)
601 status = NVME_SC_INTERNAL | NVME_SC_DNR;
603 nvmet_put_namespace(ns);
605 nvmet_req_complete(req, status);
608 static void nvmet_execute_identify(struct nvmet_req *req)
610 if (!nvmet_check_data_len(req, NVME_IDENTIFY_DATA_SIZE))
613 switch (req->cmd->identify.cns) {
615 return nvmet_execute_identify_ns(req);
616 case NVME_ID_CNS_CTRL:
617 return nvmet_execute_identify_ctrl(req);
618 case NVME_ID_CNS_NS_ACTIVE_LIST:
619 return nvmet_execute_identify_nslist(req);
620 case NVME_ID_CNS_NS_DESC_LIST:
621 return nvmet_execute_identify_desclist(req);
624 pr_err("unhandled identify cns %d on qid %d\n",
625 req->cmd->identify.cns, req->sq->qid);
626 req->error_loc = offsetof(struct nvme_identify, cns);
627 nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_SC_DNR);
631 * A "minimum viable" abort implementation: the command is mandatory in the
632 * spec, but we are not required to do any useful work. We couldn't really
633 * do a useful abort, so don't bother even with waiting for the command
634 * to be exectuted and return immediately telling the command to abort
637 static void nvmet_execute_abort(struct nvmet_req *req)
639 if (!nvmet_check_data_len(req, 0))
641 nvmet_set_result(req, 1);
642 nvmet_req_complete(req, 0);
645 static u16 nvmet_write_protect_flush_sync(struct nvmet_req *req)
650 status = nvmet_file_flush(req);
652 status = nvmet_bdev_flush(req);
655 pr_err("write protect flush failed nsid: %u\n", req->ns->nsid);
659 static u16 nvmet_set_feat_write_protect(struct nvmet_req *req)
661 u32 write_protect = le32_to_cpu(req->cmd->common.cdw11);
662 struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
663 u16 status = NVME_SC_FEATURE_NOT_CHANGEABLE;
665 req->ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->rw.nsid);
666 if (unlikely(!req->ns)) {
667 req->error_loc = offsetof(struct nvme_common_command, nsid);
671 mutex_lock(&subsys->lock);
672 switch (write_protect) {
673 case NVME_NS_WRITE_PROTECT:
674 req->ns->readonly = true;
675 status = nvmet_write_protect_flush_sync(req);
677 req->ns->readonly = false;
679 case NVME_NS_NO_WRITE_PROTECT:
680 req->ns->readonly = false;
688 nvmet_ns_changed(subsys, req->ns->nsid);
689 mutex_unlock(&subsys->lock);
693 u16 nvmet_set_feat_kato(struct nvmet_req *req)
695 u32 val32 = le32_to_cpu(req->cmd->common.cdw11);
697 req->sq->ctrl->kato = DIV_ROUND_UP(val32, 1000);
699 nvmet_set_result(req, req->sq->ctrl->kato);
704 u16 nvmet_set_feat_async_event(struct nvmet_req *req, u32 mask)
706 u32 val32 = le32_to_cpu(req->cmd->common.cdw11);
709 req->error_loc = offsetof(struct nvme_common_command, cdw11);
710 return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
713 WRITE_ONCE(req->sq->ctrl->aen_enabled, val32);
714 nvmet_set_result(req, val32);
719 static void nvmet_execute_set_features(struct nvmet_req *req)
721 struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
722 u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
725 if (!nvmet_check_data_len(req, 0))
728 switch (cdw10 & 0xff) {
729 case NVME_FEAT_NUM_QUEUES:
730 nvmet_set_result(req,
731 (subsys->max_qid - 1) | ((subsys->max_qid - 1) << 16));
734 status = nvmet_set_feat_kato(req);
736 case NVME_FEAT_ASYNC_EVENT:
737 status = nvmet_set_feat_async_event(req, NVMET_AEN_CFG_ALL);
739 case NVME_FEAT_HOST_ID:
740 status = NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
742 case NVME_FEAT_WRITE_PROTECT:
743 status = nvmet_set_feat_write_protect(req);
746 req->error_loc = offsetof(struct nvme_common_command, cdw10);
747 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
751 nvmet_req_complete(req, status);
754 static u16 nvmet_get_feat_write_protect(struct nvmet_req *req)
756 struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
759 req->ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->common.nsid);
761 req->error_loc = offsetof(struct nvme_common_command, nsid);
762 return NVME_SC_INVALID_NS | NVME_SC_DNR;
764 mutex_lock(&subsys->lock);
765 if (req->ns->readonly == true)
766 result = NVME_NS_WRITE_PROTECT;
768 result = NVME_NS_NO_WRITE_PROTECT;
769 nvmet_set_result(req, result);
770 mutex_unlock(&subsys->lock);
775 void nvmet_get_feat_kato(struct nvmet_req *req)
777 nvmet_set_result(req, req->sq->ctrl->kato * 1000);
780 void nvmet_get_feat_async_event(struct nvmet_req *req)
782 nvmet_set_result(req, READ_ONCE(req->sq->ctrl->aen_enabled));
785 static void nvmet_execute_get_features(struct nvmet_req *req)
787 struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
788 u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
791 if (!nvmet_check_data_len(req, nvmet_feat_data_len(req, cdw10)))
794 switch (cdw10 & 0xff) {
796 * These features are mandatory in the spec, but we don't
797 * have a useful way to implement them. We'll eventually
798 * need to come up with some fake values for these.
801 case NVME_FEAT_ARBITRATION:
803 case NVME_FEAT_POWER_MGMT:
805 case NVME_FEAT_TEMP_THRESH:
807 case NVME_FEAT_ERR_RECOVERY:
809 case NVME_FEAT_IRQ_COALESCE:
811 case NVME_FEAT_IRQ_CONFIG:
813 case NVME_FEAT_WRITE_ATOMIC:
816 case NVME_FEAT_ASYNC_EVENT:
817 nvmet_get_feat_async_event(req);
819 case NVME_FEAT_VOLATILE_WC:
820 nvmet_set_result(req, 1);
822 case NVME_FEAT_NUM_QUEUES:
823 nvmet_set_result(req,
824 (subsys->max_qid-1) | ((subsys->max_qid-1) << 16));
827 nvmet_get_feat_kato(req);
829 case NVME_FEAT_HOST_ID:
830 /* need 128-bit host identifier flag */
831 if (!(req->cmd->common.cdw11 & cpu_to_le32(1 << 0))) {
833 offsetof(struct nvme_common_command, cdw11);
834 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
838 status = nvmet_copy_to_sgl(req, 0, &req->sq->ctrl->hostid,
839 sizeof(req->sq->ctrl->hostid));
841 case NVME_FEAT_WRITE_PROTECT:
842 status = nvmet_get_feat_write_protect(req);
846 offsetof(struct nvme_common_command, cdw10);
847 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
851 nvmet_req_complete(req, status);
854 void nvmet_execute_async_event(struct nvmet_req *req)
856 struct nvmet_ctrl *ctrl = req->sq->ctrl;
858 if (!nvmet_check_data_len(req, 0))
861 mutex_lock(&ctrl->lock);
862 if (ctrl->nr_async_event_cmds >= NVMET_ASYNC_EVENTS) {
863 mutex_unlock(&ctrl->lock);
864 nvmet_req_complete(req, NVME_SC_ASYNC_LIMIT | NVME_SC_DNR);
867 ctrl->async_event_cmds[ctrl->nr_async_event_cmds++] = req;
868 mutex_unlock(&ctrl->lock);
870 schedule_work(&ctrl->async_event_work);
873 void nvmet_execute_keep_alive(struct nvmet_req *req)
875 struct nvmet_ctrl *ctrl = req->sq->ctrl;
877 if (!nvmet_check_data_len(req, 0))
880 pr_debug("ctrl %d update keep-alive timer for %d secs\n",
881 ctrl->cntlid, ctrl->kato);
883 mod_delayed_work(system_wq, &ctrl->ka_work, ctrl->kato * HZ);
884 nvmet_req_complete(req, 0);
887 u16 nvmet_parse_admin_cmd(struct nvmet_req *req)
889 struct nvme_command *cmd = req->cmd;
892 if (nvme_is_fabrics(cmd))
893 return nvmet_parse_fabrics_cmd(req);
894 if (req->sq->ctrl->subsys->type == NVME_NQN_DISC)
895 return nvmet_parse_discovery_cmd(req);
897 ret = nvmet_check_ctrl_status(req, cmd);
901 switch (cmd->common.opcode) {
902 case nvme_admin_get_log_page:
903 req->execute = nvmet_execute_get_log_page;
905 case nvme_admin_identify:
906 req->execute = nvmet_execute_identify;
908 case nvme_admin_abort_cmd:
909 req->execute = nvmet_execute_abort;
911 case nvme_admin_set_features:
912 req->execute = nvmet_execute_set_features;
914 case nvme_admin_get_features:
915 req->execute = nvmet_execute_get_features;
917 case nvme_admin_async_event:
918 req->execute = nvmet_execute_async_event;
920 case nvme_admin_keep_alive:
921 req->execute = nvmet_execute_keep_alive;
925 pr_err("unhandled cmd %d on qid %d\n", cmd->common.opcode,
927 req->error_loc = offsetof(struct nvme_common_command, opcode);
928 return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;