2 * NVMe admin command implementation.
3 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/module.h>
16 #include <linux/rculist.h>
18 #include <generated/utsrelease.h>
19 #include <asm/unaligned.h>
22 u32 nvmet_get_log_page_len(struct nvme_command *cmd)
24 u32 len = le16_to_cpu(cmd->get_log_page.numdu);
27 len += le16_to_cpu(cmd->get_log_page.numdl);
28 /* NUMD is a 0's based value */
35 static void nvmet_execute_get_log_page_noop(struct nvmet_req *req)
37 nvmet_req_complete(req, nvmet_zero_sgl(req, 0, req->data_len));
40 static u16 nvmet_get_smart_log_nsid(struct nvmet_req *req,
41 struct nvme_smart_log *slog)
44 u64 host_reads, host_writes, data_units_read, data_units_written;
46 ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->get_log_page.nsid);
48 pr_err("nvmet : Could not find namespace id : %d\n",
49 le32_to_cpu(req->cmd->get_log_page.nsid));
50 return NVME_SC_INVALID_NS;
53 /* we don't have the right data for file backed ns */
57 host_reads = part_stat_read(ns->bdev->bd_part, ios[READ]);
58 data_units_read = part_stat_read(ns->bdev->bd_part, sectors[READ]);
59 host_writes = part_stat_read(ns->bdev->bd_part, ios[WRITE]);
60 data_units_written = part_stat_read(ns->bdev->bd_part, sectors[WRITE]);
62 put_unaligned_le64(host_reads, &slog->host_reads[0]);
63 put_unaligned_le64(data_units_read, &slog->data_units_read[0]);
64 put_unaligned_le64(host_writes, &slog->host_writes[0]);
65 put_unaligned_le64(data_units_written, &slog->data_units_written[0]);
67 nvmet_put_namespace(ns);
69 return NVME_SC_SUCCESS;
72 static u16 nvmet_get_smart_log_all(struct nvmet_req *req,
73 struct nvme_smart_log *slog)
75 u64 host_reads = 0, host_writes = 0;
76 u64 data_units_read = 0, data_units_written = 0;
78 struct nvmet_ctrl *ctrl;
83 list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link) {
84 /* we don't have the right data for file backed ns */
87 host_reads += part_stat_read(ns->bdev->bd_part, ios[READ]);
89 part_stat_read(ns->bdev->bd_part, sectors[READ]);
90 host_writes += part_stat_read(ns->bdev->bd_part, ios[WRITE]);
92 part_stat_read(ns->bdev->bd_part, sectors[WRITE]);
97 put_unaligned_le64(host_reads, &slog->host_reads[0]);
98 put_unaligned_le64(data_units_read, &slog->data_units_read[0]);
99 put_unaligned_le64(host_writes, &slog->host_writes[0]);
100 put_unaligned_le64(data_units_written, &slog->data_units_written[0]);
102 return NVME_SC_SUCCESS;
105 static void nvmet_execute_get_log_page_smart(struct nvmet_req *req)
107 struct nvme_smart_log *log;
108 u16 status = NVME_SC_INTERNAL;
110 if (req->data_len != sizeof(*log))
113 log = kzalloc(sizeof(*log), GFP_KERNEL);
117 if (req->cmd->get_log_page.nsid == cpu_to_le32(NVME_NSID_ALL))
118 status = nvmet_get_smart_log_all(req, log);
120 status = nvmet_get_smart_log_nsid(req, log);
124 status = nvmet_copy_to_sgl(req, 0, log, sizeof(*log));
128 nvmet_req_complete(req, status);
131 static void nvmet_execute_get_log_cmd_effects_ns(struct nvmet_req *req)
133 u16 status = NVME_SC_INTERNAL;
134 struct nvme_effects_log *log;
136 log = kzalloc(sizeof(*log), GFP_KERNEL);
140 log->acs[nvme_admin_get_log_page] = cpu_to_le32(1 << 0);
141 log->acs[nvme_admin_identify] = cpu_to_le32(1 << 0);
142 log->acs[nvme_admin_abort_cmd] = cpu_to_le32(1 << 0);
143 log->acs[nvme_admin_set_features] = cpu_to_le32(1 << 0);
144 log->acs[nvme_admin_get_features] = cpu_to_le32(1 << 0);
145 log->acs[nvme_admin_async_event] = cpu_to_le32(1 << 0);
146 log->acs[nvme_admin_keep_alive] = cpu_to_le32(1 << 0);
148 log->iocs[nvme_cmd_read] = cpu_to_le32(1 << 0);
149 log->iocs[nvme_cmd_write] = cpu_to_le32(1 << 0);
150 log->iocs[nvme_cmd_flush] = cpu_to_le32(1 << 0);
151 log->iocs[nvme_cmd_dsm] = cpu_to_le32(1 << 0);
152 log->iocs[nvme_cmd_write_zeroes] = cpu_to_le32(1 << 0);
154 status = nvmet_copy_to_sgl(req, 0, log, sizeof(*log));
158 nvmet_req_complete(req, status);
161 static void nvmet_execute_get_log_changed_ns(struct nvmet_req *req)
163 struct nvmet_ctrl *ctrl = req->sq->ctrl;
164 u16 status = NVME_SC_INTERNAL;
167 if (req->data_len != NVME_MAX_CHANGED_NAMESPACES * sizeof(__le32))
170 mutex_lock(&ctrl->lock);
171 if (ctrl->nr_changed_ns == U32_MAX)
172 len = sizeof(__le32);
174 len = ctrl->nr_changed_ns * sizeof(__le32);
175 status = nvmet_copy_to_sgl(req, 0, ctrl->changed_ns_list, len);
177 status = nvmet_zero_sgl(req, len, req->data_len - len);
178 ctrl->nr_changed_ns = 0;
179 clear_bit(NVME_AEN_CFG_NS_ATTR, &ctrl->aen_masked);
180 mutex_unlock(&ctrl->lock);
182 nvmet_req_complete(req, status);
185 static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
187 struct nvmet_ctrl *ctrl = req->sq->ctrl;
188 struct nvme_id_ctrl *id;
190 const char model[] = "Linux";
192 id = kzalloc(sizeof(*id), GFP_KERNEL);
194 status = NVME_SC_INTERNAL;
198 /* XXX: figure out how to assign real vendors IDs. */
202 memset(id->sn, ' ', sizeof(id->sn));
203 bin2hex(id->sn, &ctrl->subsys->serial,
204 min(sizeof(ctrl->subsys->serial), sizeof(id->sn) / 2));
205 memcpy_and_pad(id->mn, sizeof(id->mn), model, sizeof(model) - 1, ' ');
206 memcpy_and_pad(id->fr, sizeof(id->fr),
207 UTS_RELEASE, strlen(UTS_RELEASE), ' ');
212 * XXX: figure out how we can assign a IEEE OUI, but until then
213 * the safest is to leave it as zeroes.
216 /* we support multiple ports and multiples hosts: */
217 id->cmic = (1 << 0) | (1 << 1);
219 /* no limit on data transfer sizes for now */
221 id->cntlid = cpu_to_le16(ctrl->cntlid);
222 id->ver = cpu_to_le32(ctrl->subsys->ver);
224 /* XXX: figure out what to do about RTD3R/RTD3 */
225 id->oaes = cpu_to_le32(NVMET_AEN_CFG_OPTIONAL);
226 id->ctratt = cpu_to_le32(1 << 0);
231 * We don't really have a practical limit on the number of abort
232 * comands. But we don't do anything useful for abort either, so
233 * no point in allowing more abort commands than the spec requires.
237 id->aerl = NVMET_ASYNC_EVENTS - 1;
239 /* first slot is read-only, only one slot supported */
240 id->frmw = (1 << 0) | (1 << 1);
241 id->lpa = (1 << 0) | (1 << 1) | (1 << 2);
242 id->elpe = NVMET_ERROR_LOG_SLOTS - 1;
245 /* We support keep-alive timeout in granularity of seconds */
246 id->kas = cpu_to_le16(NVMET_KAS);
248 id->sqes = (0x6 << 4) | 0x6;
249 id->cqes = (0x4 << 4) | 0x4;
251 /* no enforcement soft-limit for maxcmd - pick arbitrary high value */
252 id->maxcmd = cpu_to_le16(NVMET_MAX_CMD);
254 id->nn = cpu_to_le32(ctrl->subsys->max_nsid);
255 id->oncs = cpu_to_le16(NVME_CTRL_ONCS_DSM |
256 NVME_CTRL_ONCS_WRITE_ZEROES);
258 /* XXX: don't report vwc if the underlying device is write through */
259 id->vwc = NVME_CTRL_VWC_PRESENT;
262 * We can't support atomic writes bigger than a LBA without support
263 * from the backend device.
268 id->sgls = cpu_to_le32(1 << 0); /* we always support SGLs */
269 if (ctrl->ops->has_keyed_sgls)
270 id->sgls |= cpu_to_le32(1 << 2);
271 if (req->port->inline_data_size)
272 id->sgls |= cpu_to_le32(1 << 20);
274 strcpy(id->subnqn, ctrl->subsys->subsysnqn);
276 /* Max command capsule size is sqe + single page of in-capsule data */
277 id->ioccsz = cpu_to_le32((sizeof(struct nvme_command) +
278 req->port->inline_data_size) / 16);
279 /* Max response capsule size is cqe */
280 id->iorcsz = cpu_to_le32(sizeof(struct nvme_completion) / 16);
282 id->msdbd = ctrl->ops->msdbd;
285 * Meh, we don't really support any power state. Fake up the same
286 * values that qemu does.
288 id->psd[0].max_power = cpu_to_le16(0x9c4);
289 id->psd[0].entry_lat = cpu_to_le32(0x10);
290 id->psd[0].exit_lat = cpu_to_le32(0x4);
292 status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
296 nvmet_req_complete(req, status);
299 static void nvmet_execute_identify_ns(struct nvmet_req *req)
302 struct nvme_id_ns *id;
305 if (le32_to_cpu(req->cmd->identify.nsid) == NVME_NSID_ALL) {
306 status = NVME_SC_INVALID_NS | NVME_SC_DNR;
310 id = kzalloc(sizeof(*id), GFP_KERNEL);
312 status = NVME_SC_INTERNAL;
316 /* return an all zeroed buffer if we can't find an active namespace */
317 ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->identify.nsid);
322 * nuse = ncap = nsze isn't always true, but we have no way to find
323 * that out from the underlying device.
325 id->ncap = id->nuse = id->nsze =
326 cpu_to_le64(ns->size >> ns->blksize_shift);
329 * We just provide a single LBA format that matches what the
330 * underlying device reports.
336 * Our namespace might always be shared. Not just with other
337 * controllers, but also with any other user of the block device.
341 memcpy(&id->nguid, &ns->nguid, sizeof(id->nguid));
343 id->lbaf[0].ds = ns->blksize_shift;
345 nvmet_put_namespace(ns);
347 status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
350 nvmet_req_complete(req, status);
353 static void nvmet_execute_identify_nslist(struct nvmet_req *req)
355 static const int buf_size = NVME_IDENTIFY_DATA_SIZE;
356 struct nvmet_ctrl *ctrl = req->sq->ctrl;
358 u32 min_nsid = le32_to_cpu(req->cmd->identify.nsid);
363 list = kzalloc(buf_size, GFP_KERNEL);
365 status = NVME_SC_INTERNAL;
370 list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link) {
371 if (ns->nsid <= min_nsid)
373 list[i++] = cpu_to_le32(ns->nsid);
374 if (i == buf_size / sizeof(__le32))
379 status = nvmet_copy_to_sgl(req, 0, list, buf_size);
383 nvmet_req_complete(req, status);
386 static u16 nvmet_copy_ns_identifier(struct nvmet_req *req, u8 type, u8 len,
387 void *id, off_t *off)
389 struct nvme_ns_id_desc desc = {
395 status = nvmet_copy_to_sgl(req, *off, &desc, sizeof(desc));
398 *off += sizeof(desc);
400 status = nvmet_copy_to_sgl(req, *off, id, len);
408 static void nvmet_execute_identify_desclist(struct nvmet_req *req)
414 ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->identify.nsid);
416 status = NVME_SC_INVALID_NS | NVME_SC_DNR;
420 if (memchr_inv(&ns->uuid, 0, sizeof(ns->uuid))) {
421 status = nvmet_copy_ns_identifier(req, NVME_NIDT_UUID,
427 if (memchr_inv(ns->nguid, 0, sizeof(ns->nguid))) {
428 status = nvmet_copy_ns_identifier(req, NVME_NIDT_NGUID,
435 if (sg_zero_buffer(req->sg, req->sg_cnt, NVME_IDENTIFY_DATA_SIZE - off,
436 off) != NVME_IDENTIFY_DATA_SIZE - off)
437 status = NVME_SC_INTERNAL | NVME_SC_DNR;
439 nvmet_put_namespace(ns);
441 nvmet_req_complete(req, status);
445 * A "minimum viable" abort implementation: the command is mandatory in the
446 * spec, but we are not required to do any useful work. We couldn't really
447 * do a useful abort, so don't bother even with waiting for the command
448 * to be exectuted and return immediately telling the command to abort
451 static void nvmet_execute_abort(struct nvmet_req *req)
453 nvmet_set_result(req, 1);
454 nvmet_req_complete(req, 0);
457 static void nvmet_execute_set_features(struct nvmet_req *req)
459 struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
460 u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10[0]);
464 switch (cdw10 & 0xff) {
465 case NVME_FEAT_NUM_QUEUES:
466 nvmet_set_result(req,
467 (subsys->max_qid - 1) | ((subsys->max_qid - 1) << 16));
470 val32 = le32_to_cpu(req->cmd->common.cdw10[1]);
471 req->sq->ctrl->kato = DIV_ROUND_UP(val32, 1000);
472 nvmet_set_result(req, req->sq->ctrl->kato);
474 case NVME_FEAT_ASYNC_EVENT:
475 val32 = le32_to_cpu(req->cmd->common.cdw10[1]);
476 if (val32 & ~NVMET_AEN_CFG_ALL) {
477 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
481 WRITE_ONCE(req->sq->ctrl->aen_enabled, val32);
482 nvmet_set_result(req, val32);
484 case NVME_FEAT_HOST_ID:
485 status = NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
488 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
492 nvmet_req_complete(req, status);
495 static void nvmet_execute_get_features(struct nvmet_req *req)
497 struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
498 u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10[0]);
501 switch (cdw10 & 0xff) {
503 * These features are mandatory in the spec, but we don't
504 * have a useful way to implement them. We'll eventually
505 * need to come up with some fake values for these.
508 case NVME_FEAT_ARBITRATION:
510 case NVME_FEAT_POWER_MGMT:
512 case NVME_FEAT_TEMP_THRESH:
514 case NVME_FEAT_ERR_RECOVERY:
516 case NVME_FEAT_IRQ_COALESCE:
518 case NVME_FEAT_IRQ_CONFIG:
520 case NVME_FEAT_WRITE_ATOMIC:
523 case NVME_FEAT_ASYNC_EVENT:
524 nvmet_set_result(req, READ_ONCE(req->sq->ctrl->aen_enabled));
526 case NVME_FEAT_VOLATILE_WC:
527 nvmet_set_result(req, 1);
529 case NVME_FEAT_NUM_QUEUES:
530 nvmet_set_result(req,
531 (subsys->max_qid-1) | ((subsys->max_qid-1) << 16));
534 nvmet_set_result(req, req->sq->ctrl->kato * 1000);
536 case NVME_FEAT_HOST_ID:
537 /* need 128-bit host identifier flag */
538 if (!(req->cmd->common.cdw10[1] & cpu_to_le32(1 << 0))) {
539 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
543 status = nvmet_copy_to_sgl(req, 0, &req->sq->ctrl->hostid,
544 sizeof(req->sq->ctrl->hostid));
547 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
551 nvmet_req_complete(req, status);
554 static void nvmet_execute_async_event(struct nvmet_req *req)
556 struct nvmet_ctrl *ctrl = req->sq->ctrl;
558 mutex_lock(&ctrl->lock);
559 if (ctrl->nr_async_event_cmds >= NVMET_ASYNC_EVENTS) {
560 mutex_unlock(&ctrl->lock);
561 nvmet_req_complete(req, NVME_SC_ASYNC_LIMIT | NVME_SC_DNR);
564 ctrl->async_event_cmds[ctrl->nr_async_event_cmds++] = req;
565 mutex_unlock(&ctrl->lock);
567 schedule_work(&ctrl->async_event_work);
570 static void nvmet_execute_keep_alive(struct nvmet_req *req)
572 struct nvmet_ctrl *ctrl = req->sq->ctrl;
574 pr_debug("ctrl %d update keep-alive timer for %d secs\n",
575 ctrl->cntlid, ctrl->kato);
577 mod_delayed_work(system_wq, &ctrl->ka_work, ctrl->kato * HZ);
578 nvmet_req_complete(req, 0);
581 u16 nvmet_parse_admin_cmd(struct nvmet_req *req)
583 struct nvme_command *cmd = req->cmd;
586 ret = nvmet_check_ctrl_status(req, cmd);
590 switch (cmd->common.opcode) {
591 case nvme_admin_get_log_page:
592 req->data_len = nvmet_get_log_page_len(cmd);
594 switch (cmd->get_log_page.lid) {
597 * We currently never set the More bit in the status
598 * field, so all error log entries are invalid and can
599 * be zeroed out. This is called a minum viable
600 * implementation (TM) of this mandatory log page.
602 req->execute = nvmet_execute_get_log_page_noop;
605 req->execute = nvmet_execute_get_log_page_smart;
607 case NVME_LOG_FW_SLOT:
609 * We only support a single firmware slot which always
610 * is active, so we can zero out the whole firmware slot
611 * log and still claim to fully implement this mandatory
614 req->execute = nvmet_execute_get_log_page_noop;
616 case NVME_LOG_CHANGED_NS:
617 req->execute = nvmet_execute_get_log_changed_ns;
619 case NVME_LOG_CMD_EFFECTS:
620 req->execute = nvmet_execute_get_log_cmd_effects_ns;
624 case nvme_admin_identify:
625 req->data_len = NVME_IDENTIFY_DATA_SIZE;
626 switch (cmd->identify.cns) {
628 req->execute = nvmet_execute_identify_ns;
630 case NVME_ID_CNS_CTRL:
631 req->execute = nvmet_execute_identify_ctrl;
633 case NVME_ID_CNS_NS_ACTIVE_LIST:
634 req->execute = nvmet_execute_identify_nslist;
636 case NVME_ID_CNS_NS_DESC_LIST:
637 req->execute = nvmet_execute_identify_desclist;
641 case nvme_admin_abort_cmd:
642 req->execute = nvmet_execute_abort;
645 case nvme_admin_set_features:
646 req->execute = nvmet_execute_set_features;
649 case nvme_admin_get_features:
650 req->execute = nvmet_execute_get_features;
653 case nvme_admin_async_event:
654 req->execute = nvmet_execute_async_event;
657 case nvme_admin_keep_alive:
658 req->execute = nvmet_execute_keep_alive;
663 pr_err("unhandled cmd %d on qid %d\n", cmd->common.opcode,
665 return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;