Merge tag 'perf-tools-for-v5.15-2021-09-11' of git://git.kernel.org/pub/scm/linux...
[linux-2.6-microblaze.git] / drivers / nvme / target / admin-cmd.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * NVMe admin command implementation.
4  * Copyright (c) 2015-2016 HGST, a Western Digital Company.
5  */
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 #include <linux/module.h>
8 #include <linux/rculist.h>
9 #include <linux/part_stat.h>
10
11 #include <generated/utsrelease.h>
12 #include <asm/unaligned.h>
13 #include "nvmet.h"
14
15 u32 nvmet_get_log_page_len(struct nvme_command *cmd)
16 {
17         u32 len = le16_to_cpu(cmd->get_log_page.numdu);
18
19         len <<= 16;
20         len += le16_to_cpu(cmd->get_log_page.numdl);
21         /* NUMD is a 0's based value */
22         len += 1;
23         len *= sizeof(u32);
24
25         return len;
26 }
27
28 static u32 nvmet_feat_data_len(struct nvmet_req *req, u32 cdw10)
29 {
30         switch (cdw10 & 0xff) {
31         case NVME_FEAT_HOST_ID:
32                 return sizeof(req->sq->ctrl->hostid);
33         default:
34                 return 0;
35         }
36 }
37
38 u64 nvmet_get_log_page_offset(struct nvme_command *cmd)
39 {
40         return le64_to_cpu(cmd->get_log_page.lpo);
41 }
42
43 static void nvmet_execute_get_log_page_noop(struct nvmet_req *req)
44 {
45         nvmet_req_complete(req, nvmet_zero_sgl(req, 0, req->transfer_len));
46 }
47
48 static void nvmet_execute_get_log_page_error(struct nvmet_req *req)
49 {
50         struct nvmet_ctrl *ctrl = req->sq->ctrl;
51         unsigned long flags;
52         off_t offset = 0;
53         u64 slot;
54         u64 i;
55
56         spin_lock_irqsave(&ctrl->error_lock, flags);
57         slot = ctrl->err_counter % NVMET_ERROR_LOG_SLOTS;
58
59         for (i = 0; i < NVMET_ERROR_LOG_SLOTS; i++) {
60                 if (nvmet_copy_to_sgl(req, offset, &ctrl->slots[slot],
61                                 sizeof(struct nvme_error_slot)))
62                         break;
63
64                 if (slot == 0)
65                         slot = NVMET_ERROR_LOG_SLOTS - 1;
66                 else
67                         slot--;
68                 offset += sizeof(struct nvme_error_slot);
69         }
70         spin_unlock_irqrestore(&ctrl->error_lock, flags);
71         nvmet_req_complete(req, 0);
72 }
73
74 static u16 nvmet_get_smart_log_nsid(struct nvmet_req *req,
75                 struct nvme_smart_log *slog)
76 {
77         u64 host_reads, host_writes, data_units_read, data_units_written;
78         u16 status;
79
80         status = nvmet_req_find_ns(req);
81         if (status)
82                 return status;
83
84         /* we don't have the right data for file backed ns */
85         if (!req->ns->bdev)
86                 return NVME_SC_SUCCESS;
87
88         host_reads = part_stat_read(req->ns->bdev, ios[READ]);
89         data_units_read =
90                 DIV_ROUND_UP(part_stat_read(req->ns->bdev, sectors[READ]), 1000);
91         host_writes = part_stat_read(req->ns->bdev, ios[WRITE]);
92         data_units_written =
93                 DIV_ROUND_UP(part_stat_read(req->ns->bdev, sectors[WRITE]), 1000);
94
95         put_unaligned_le64(host_reads, &slog->host_reads[0]);
96         put_unaligned_le64(data_units_read, &slog->data_units_read[0]);
97         put_unaligned_le64(host_writes, &slog->host_writes[0]);
98         put_unaligned_le64(data_units_written, &slog->data_units_written[0]);
99
100         return NVME_SC_SUCCESS;
101 }
102
103 static u16 nvmet_get_smart_log_all(struct nvmet_req *req,
104                 struct nvme_smart_log *slog)
105 {
106         u64 host_reads = 0, host_writes = 0;
107         u64 data_units_read = 0, data_units_written = 0;
108         struct nvmet_ns *ns;
109         struct nvmet_ctrl *ctrl;
110         unsigned long idx;
111
112         ctrl = req->sq->ctrl;
113         xa_for_each(&ctrl->subsys->namespaces, idx, ns) {
114                 /* we don't have the right data for file backed ns */
115                 if (!ns->bdev)
116                         continue;
117                 host_reads += part_stat_read(ns->bdev, ios[READ]);
118                 data_units_read += DIV_ROUND_UP(
119                         part_stat_read(ns->bdev, sectors[READ]), 1000);
120                 host_writes += part_stat_read(ns->bdev, ios[WRITE]);
121                 data_units_written += DIV_ROUND_UP(
122                         part_stat_read(ns->bdev, sectors[WRITE]), 1000);
123         }
124
125         put_unaligned_le64(host_reads, &slog->host_reads[0]);
126         put_unaligned_le64(data_units_read, &slog->data_units_read[0]);
127         put_unaligned_le64(host_writes, &slog->host_writes[0]);
128         put_unaligned_le64(data_units_written, &slog->data_units_written[0]);
129
130         return NVME_SC_SUCCESS;
131 }
132
133 static void nvmet_execute_get_log_page_smart(struct nvmet_req *req)
134 {
135         struct nvme_smart_log *log;
136         u16 status = NVME_SC_INTERNAL;
137         unsigned long flags;
138
139         if (req->transfer_len != sizeof(*log))
140                 goto out;
141
142         log = kzalloc(sizeof(*log), GFP_KERNEL);
143         if (!log)
144                 goto out;
145
146         if (req->cmd->get_log_page.nsid == cpu_to_le32(NVME_NSID_ALL))
147                 status = nvmet_get_smart_log_all(req, log);
148         else
149                 status = nvmet_get_smart_log_nsid(req, log);
150         if (status)
151                 goto out_free_log;
152
153         spin_lock_irqsave(&req->sq->ctrl->error_lock, flags);
154         put_unaligned_le64(req->sq->ctrl->err_counter,
155                         &log->num_err_log_entries);
156         spin_unlock_irqrestore(&req->sq->ctrl->error_lock, flags);
157
158         status = nvmet_copy_to_sgl(req, 0, log, sizeof(*log));
159 out_free_log:
160         kfree(log);
161 out:
162         nvmet_req_complete(req, status);
163 }
164
165 static void nvmet_get_cmd_effects_nvm(struct nvme_effects_log *log)
166 {
167         log->acs[nvme_admin_get_log_page]       = cpu_to_le32(1 << 0);
168         log->acs[nvme_admin_identify]           = cpu_to_le32(1 << 0);
169         log->acs[nvme_admin_abort_cmd]          = cpu_to_le32(1 << 0);
170         log->acs[nvme_admin_set_features]       = cpu_to_le32(1 << 0);
171         log->acs[nvme_admin_get_features]       = cpu_to_le32(1 << 0);
172         log->acs[nvme_admin_async_event]        = cpu_to_le32(1 << 0);
173         log->acs[nvme_admin_keep_alive]         = cpu_to_le32(1 << 0);
174
175         log->iocs[nvme_cmd_read]                = cpu_to_le32(1 << 0);
176         log->iocs[nvme_cmd_write]               = cpu_to_le32(1 << 0);
177         log->iocs[nvme_cmd_flush]               = cpu_to_le32(1 << 0);
178         log->iocs[nvme_cmd_dsm]                 = cpu_to_le32(1 << 0);
179         log->iocs[nvme_cmd_write_zeroes]        = cpu_to_le32(1 << 0);
180 }
181
182 static void nvmet_get_cmd_effects_zns(struct nvme_effects_log *log)
183 {
184         log->iocs[nvme_cmd_zone_append]         = cpu_to_le32(1 << 0);
185         log->iocs[nvme_cmd_zone_mgmt_send]      = cpu_to_le32(1 << 0);
186         log->iocs[nvme_cmd_zone_mgmt_recv]      = cpu_to_le32(1 << 0);
187 }
188
189 static void nvmet_execute_get_log_cmd_effects_ns(struct nvmet_req *req)
190 {
191         struct nvme_effects_log *log;
192         u16 status = NVME_SC_SUCCESS;
193
194         log = kzalloc(sizeof(*log), GFP_KERNEL);
195         if (!log) {
196                 status = NVME_SC_INTERNAL;
197                 goto out;
198         }
199
200         switch (req->cmd->get_log_page.csi) {
201         case NVME_CSI_NVM:
202                 nvmet_get_cmd_effects_nvm(log);
203                 break;
204         case NVME_CSI_ZNS:
205                 if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED)) {
206                         status = NVME_SC_INVALID_IO_CMD_SET;
207                         goto free;
208                 }
209                 nvmet_get_cmd_effects_nvm(log);
210                 nvmet_get_cmd_effects_zns(log);
211                 break;
212         default:
213                 status = NVME_SC_INVALID_LOG_PAGE;
214                 goto free;
215         }
216
217         status = nvmet_copy_to_sgl(req, 0, log, sizeof(*log));
218 free:
219         kfree(log);
220 out:
221         nvmet_req_complete(req, status);
222 }
223
224 static void nvmet_execute_get_log_changed_ns(struct nvmet_req *req)
225 {
226         struct nvmet_ctrl *ctrl = req->sq->ctrl;
227         u16 status = NVME_SC_INTERNAL;
228         size_t len;
229
230         if (req->transfer_len != NVME_MAX_CHANGED_NAMESPACES * sizeof(__le32))
231                 goto out;
232
233         mutex_lock(&ctrl->lock);
234         if (ctrl->nr_changed_ns == U32_MAX)
235                 len = sizeof(__le32);
236         else
237                 len = ctrl->nr_changed_ns * sizeof(__le32);
238         status = nvmet_copy_to_sgl(req, 0, ctrl->changed_ns_list, len);
239         if (!status)
240                 status = nvmet_zero_sgl(req, len, req->transfer_len - len);
241         ctrl->nr_changed_ns = 0;
242         nvmet_clear_aen_bit(req, NVME_AEN_BIT_NS_ATTR);
243         mutex_unlock(&ctrl->lock);
244 out:
245         nvmet_req_complete(req, status);
246 }
247
248 static u32 nvmet_format_ana_group(struct nvmet_req *req, u32 grpid,
249                 struct nvme_ana_group_desc *desc)
250 {
251         struct nvmet_ctrl *ctrl = req->sq->ctrl;
252         struct nvmet_ns *ns;
253         unsigned long idx;
254         u32 count = 0;
255
256         if (!(req->cmd->get_log_page.lsp & NVME_ANA_LOG_RGO)) {
257                 xa_for_each(&ctrl->subsys->namespaces, idx, ns)
258                         if (ns->anagrpid == grpid)
259                                 desc->nsids[count++] = cpu_to_le32(ns->nsid);
260         }
261
262         desc->grpid = cpu_to_le32(grpid);
263         desc->nnsids = cpu_to_le32(count);
264         desc->chgcnt = cpu_to_le64(nvmet_ana_chgcnt);
265         desc->state = req->port->ana_state[grpid];
266         memset(desc->rsvd17, 0, sizeof(desc->rsvd17));
267         return sizeof(struct nvme_ana_group_desc) + count * sizeof(__le32);
268 }
269
270 static void nvmet_execute_get_log_page_ana(struct nvmet_req *req)
271 {
272         struct nvme_ana_rsp_hdr hdr = { 0, };
273         struct nvme_ana_group_desc *desc;
274         size_t offset = sizeof(struct nvme_ana_rsp_hdr); /* start beyond hdr */
275         size_t len;
276         u32 grpid;
277         u16 ngrps = 0;
278         u16 status;
279
280         status = NVME_SC_INTERNAL;
281         desc = kmalloc(sizeof(struct nvme_ana_group_desc) +
282                         NVMET_MAX_NAMESPACES * sizeof(__le32), GFP_KERNEL);
283         if (!desc)
284                 goto out;
285
286         down_read(&nvmet_ana_sem);
287         for (grpid = 1; grpid <= NVMET_MAX_ANAGRPS; grpid++) {
288                 if (!nvmet_ana_group_enabled[grpid])
289                         continue;
290                 len = nvmet_format_ana_group(req, grpid, desc);
291                 status = nvmet_copy_to_sgl(req, offset, desc, len);
292                 if (status)
293                         break;
294                 offset += len;
295                 ngrps++;
296         }
297         for ( ; grpid <= NVMET_MAX_ANAGRPS; grpid++) {
298                 if (nvmet_ana_group_enabled[grpid])
299                         ngrps++;
300         }
301
302         hdr.chgcnt = cpu_to_le64(nvmet_ana_chgcnt);
303         hdr.ngrps = cpu_to_le16(ngrps);
304         nvmet_clear_aen_bit(req, NVME_AEN_BIT_ANA_CHANGE);
305         up_read(&nvmet_ana_sem);
306
307         kfree(desc);
308
309         /* copy the header last once we know the number of groups */
310         status = nvmet_copy_to_sgl(req, 0, &hdr, sizeof(hdr));
311 out:
312         nvmet_req_complete(req, status);
313 }
314
315 static void nvmet_execute_get_log_page(struct nvmet_req *req)
316 {
317         if (!nvmet_check_transfer_len(req, nvmet_get_log_page_len(req->cmd)))
318                 return;
319
320         switch (req->cmd->get_log_page.lid) {
321         case NVME_LOG_ERROR:
322                 return nvmet_execute_get_log_page_error(req);
323         case NVME_LOG_SMART:
324                 return nvmet_execute_get_log_page_smart(req);
325         case NVME_LOG_FW_SLOT:
326                 /*
327                  * We only support a single firmware slot which always is
328                  * active, so we can zero out the whole firmware slot log and
329                  * still claim to fully implement this mandatory log page.
330                  */
331                 return nvmet_execute_get_log_page_noop(req);
332         case NVME_LOG_CHANGED_NS:
333                 return nvmet_execute_get_log_changed_ns(req);
334         case NVME_LOG_CMD_EFFECTS:
335                 return nvmet_execute_get_log_cmd_effects_ns(req);
336         case NVME_LOG_ANA:
337                 return nvmet_execute_get_log_page_ana(req);
338         }
339         pr_debug("unhandled lid %d on qid %d\n",
340                req->cmd->get_log_page.lid, req->sq->qid);
341         req->error_loc = offsetof(struct nvme_get_log_page_command, lid);
342         nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_SC_DNR);
343 }
344
345 static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
346 {
347         struct nvmet_ctrl *ctrl = req->sq->ctrl;
348         struct nvmet_subsys *subsys = ctrl->subsys;
349         struct nvme_id_ctrl *id;
350         u32 cmd_capsule_size;
351         u16 status = 0;
352
353         if (!subsys->subsys_discovered) {
354                 mutex_lock(&subsys->lock);
355                 subsys->subsys_discovered = true;
356                 mutex_unlock(&subsys->lock);
357         }
358
359         id = kzalloc(sizeof(*id), GFP_KERNEL);
360         if (!id) {
361                 status = NVME_SC_INTERNAL;
362                 goto out;
363         }
364
365         /* XXX: figure out how to assign real vendors IDs. */
366         id->vid = 0;
367         id->ssvid = 0;
368
369         memcpy(id->sn, ctrl->subsys->serial, NVMET_SN_MAX_SIZE);
370         memcpy_and_pad(id->mn, sizeof(id->mn), subsys->model_number,
371                        strlen(subsys->model_number), ' ');
372         memcpy_and_pad(id->fr, sizeof(id->fr),
373                        UTS_RELEASE, strlen(UTS_RELEASE), ' ');
374
375         id->rab = 6;
376
377         /*
378          * XXX: figure out how we can assign a IEEE OUI, but until then
379          * the safest is to leave it as zeroes.
380          */
381
382         /* we support multiple ports, multiples hosts and ANA: */
383         id->cmic = (1 << 0) | (1 << 1) | (1 << 3);
384
385         /* Limit MDTS according to transport capability */
386         if (ctrl->ops->get_mdts)
387                 id->mdts = ctrl->ops->get_mdts(ctrl);
388         else
389                 id->mdts = 0;
390
391         id->cntlid = cpu_to_le16(ctrl->cntlid);
392         id->ver = cpu_to_le32(ctrl->subsys->ver);
393
394         /* XXX: figure out what to do about RTD3R/RTD3 */
395         id->oaes = cpu_to_le32(NVMET_AEN_CFG_OPTIONAL);
396         id->ctratt = cpu_to_le32(NVME_CTRL_ATTR_HID_128_BIT |
397                 NVME_CTRL_ATTR_TBKAS);
398
399         id->oacs = 0;
400
401         /*
402          * We don't really have a practical limit on the number of abort
403          * comands.  But we don't do anything useful for abort either, so
404          * no point in allowing more abort commands than the spec requires.
405          */
406         id->acl = 3;
407
408         id->aerl = NVMET_ASYNC_EVENTS - 1;
409
410         /* first slot is read-only, only one slot supported */
411         id->frmw = (1 << 0) | (1 << 1);
412         id->lpa = (1 << 0) | (1 << 1) | (1 << 2);
413         id->elpe = NVMET_ERROR_LOG_SLOTS - 1;
414         id->npss = 0;
415
416         /* We support keep-alive timeout in granularity of seconds */
417         id->kas = cpu_to_le16(NVMET_KAS);
418
419         id->sqes = (0x6 << 4) | 0x6;
420         id->cqes = (0x4 << 4) | 0x4;
421
422         /* no enforcement soft-limit for maxcmd - pick arbitrary high value */
423         id->maxcmd = cpu_to_le16(NVMET_MAX_CMD);
424
425         id->nn = cpu_to_le32(NVMET_MAX_NAMESPACES);
426         id->mnan = cpu_to_le32(NVMET_MAX_NAMESPACES);
427         id->oncs = cpu_to_le16(NVME_CTRL_ONCS_DSM |
428                         NVME_CTRL_ONCS_WRITE_ZEROES);
429
430         /* XXX: don't report vwc if the underlying device is write through */
431         id->vwc = NVME_CTRL_VWC_PRESENT;
432
433         /*
434          * We can't support atomic writes bigger than a LBA without support
435          * from the backend device.
436          */
437         id->awun = 0;
438         id->awupf = 0;
439
440         id->sgls = cpu_to_le32(1 << 0); /* we always support SGLs */
441         if (ctrl->ops->flags & NVMF_KEYED_SGLS)
442                 id->sgls |= cpu_to_le32(1 << 2);
443         if (req->port->inline_data_size)
444                 id->sgls |= cpu_to_le32(1 << 20);
445
446         strlcpy(id->subnqn, ctrl->subsys->subsysnqn, sizeof(id->subnqn));
447
448         /*
449          * Max command capsule size is sqe + in-capsule data size.
450          * Disable in-capsule data for Metadata capable controllers.
451          */
452         cmd_capsule_size = sizeof(struct nvme_command);
453         if (!ctrl->pi_support)
454                 cmd_capsule_size += req->port->inline_data_size;
455         id->ioccsz = cpu_to_le32(cmd_capsule_size / 16);
456
457         /* Max response capsule size is cqe */
458         id->iorcsz = cpu_to_le32(sizeof(struct nvme_completion) / 16);
459
460         id->msdbd = ctrl->ops->msdbd;
461
462         id->anacap = (1 << 0) | (1 << 1) | (1 << 2) | (1 << 3) | (1 << 4);
463         id->anatt = 10; /* random value */
464         id->anagrpmax = cpu_to_le32(NVMET_MAX_ANAGRPS);
465         id->nanagrpid = cpu_to_le32(NVMET_MAX_ANAGRPS);
466
467         /*
468          * Meh, we don't really support any power state.  Fake up the same
469          * values that qemu does.
470          */
471         id->psd[0].max_power = cpu_to_le16(0x9c4);
472         id->psd[0].entry_lat = cpu_to_le32(0x10);
473         id->psd[0].exit_lat = cpu_to_le32(0x4);
474
475         id->nwpc = 1 << 0; /* write protect and no write protect */
476
477         status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
478
479         kfree(id);
480 out:
481         nvmet_req_complete(req, status);
482 }
483
484 static void nvmet_execute_identify_ns(struct nvmet_req *req)
485 {
486         struct nvme_id_ns *id;
487         u16 status;
488
489         if (le32_to_cpu(req->cmd->identify.nsid) == NVME_NSID_ALL) {
490                 req->error_loc = offsetof(struct nvme_identify, nsid);
491                 status = NVME_SC_INVALID_NS | NVME_SC_DNR;
492                 goto out;
493         }
494
495         id = kzalloc(sizeof(*id), GFP_KERNEL);
496         if (!id) {
497                 status = NVME_SC_INTERNAL;
498                 goto out;
499         }
500
501         /* return an all zeroed buffer if we can't find an active namespace */
502         status = nvmet_req_find_ns(req);
503         if (status) {
504                 status = 0;
505                 goto done;
506         }
507
508         nvmet_ns_revalidate(req->ns);
509
510         /*
511          * nuse = ncap = nsze isn't always true, but we have no way to find
512          * that out from the underlying device.
513          */
514         id->ncap = id->nsze =
515                 cpu_to_le64(req->ns->size >> req->ns->blksize_shift);
516         switch (req->port->ana_state[req->ns->anagrpid]) {
517         case NVME_ANA_INACCESSIBLE:
518         case NVME_ANA_PERSISTENT_LOSS:
519                 break;
520         default:
521                 id->nuse = id->nsze;
522                 break;
523         }
524
525         if (req->ns->bdev)
526                 nvmet_bdev_set_limits(req->ns->bdev, id);
527
528         /*
529          * We just provide a single LBA format that matches what the
530          * underlying device reports.
531          */
532         id->nlbaf = 0;
533         id->flbas = 0;
534
535         /*
536          * Our namespace might always be shared.  Not just with other
537          * controllers, but also with any other user of the block device.
538          */
539         id->nmic = (1 << 0);
540         id->anagrpid = cpu_to_le32(req->ns->anagrpid);
541
542         memcpy(&id->nguid, &req->ns->nguid, sizeof(id->nguid));
543
544         id->lbaf[0].ds = req->ns->blksize_shift;
545
546         if (req->sq->ctrl->pi_support && nvmet_ns_has_pi(req->ns)) {
547                 id->dpc = NVME_NS_DPC_PI_FIRST | NVME_NS_DPC_PI_LAST |
548                           NVME_NS_DPC_PI_TYPE1 | NVME_NS_DPC_PI_TYPE2 |
549                           NVME_NS_DPC_PI_TYPE3;
550                 id->mc = NVME_MC_EXTENDED_LBA;
551                 id->dps = req->ns->pi_type;
552                 id->flbas = NVME_NS_FLBAS_META_EXT;
553                 id->lbaf[0].ms = cpu_to_le16(req->ns->metadata_size);
554         }
555
556         if (req->ns->readonly)
557                 id->nsattr |= (1 << 0);
558 done:
559         if (!status)
560                 status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
561
562         kfree(id);
563 out:
564         nvmet_req_complete(req, status);
565 }
566
567 static void nvmet_execute_identify_nslist(struct nvmet_req *req)
568 {
569         static const int buf_size = NVME_IDENTIFY_DATA_SIZE;
570         struct nvmet_ctrl *ctrl = req->sq->ctrl;
571         struct nvmet_ns *ns;
572         unsigned long idx;
573         u32 min_nsid = le32_to_cpu(req->cmd->identify.nsid);
574         __le32 *list;
575         u16 status = 0;
576         int i = 0;
577
578         list = kzalloc(buf_size, GFP_KERNEL);
579         if (!list) {
580                 status = NVME_SC_INTERNAL;
581                 goto out;
582         }
583
584         xa_for_each(&ctrl->subsys->namespaces, idx, ns) {
585                 if (ns->nsid <= min_nsid)
586                         continue;
587                 list[i++] = cpu_to_le32(ns->nsid);
588                 if (i == buf_size / sizeof(__le32))
589                         break;
590         }
591
592         status = nvmet_copy_to_sgl(req, 0, list, buf_size);
593
594         kfree(list);
595 out:
596         nvmet_req_complete(req, status);
597 }
598
599 static u16 nvmet_copy_ns_identifier(struct nvmet_req *req, u8 type, u8 len,
600                                     void *id, off_t *off)
601 {
602         struct nvme_ns_id_desc desc = {
603                 .nidt = type,
604                 .nidl = len,
605         };
606         u16 status;
607
608         status = nvmet_copy_to_sgl(req, *off, &desc, sizeof(desc));
609         if (status)
610                 return status;
611         *off += sizeof(desc);
612
613         status = nvmet_copy_to_sgl(req, *off, id, len);
614         if (status)
615                 return status;
616         *off += len;
617
618         return 0;
619 }
620
621 static void nvmet_execute_identify_desclist(struct nvmet_req *req)
622 {
623         off_t off = 0;
624         u16 status;
625
626         status = nvmet_req_find_ns(req);
627         if (status)
628                 goto out;
629
630         if (memchr_inv(&req->ns->uuid, 0, sizeof(req->ns->uuid))) {
631                 status = nvmet_copy_ns_identifier(req, NVME_NIDT_UUID,
632                                                   NVME_NIDT_UUID_LEN,
633                                                   &req->ns->uuid, &off);
634                 if (status)
635                         goto out;
636         }
637         if (memchr_inv(req->ns->nguid, 0, sizeof(req->ns->nguid))) {
638                 status = nvmet_copy_ns_identifier(req, NVME_NIDT_NGUID,
639                                                   NVME_NIDT_NGUID_LEN,
640                                                   &req->ns->nguid, &off);
641                 if (status)
642                         goto out;
643         }
644
645         status = nvmet_copy_ns_identifier(req, NVME_NIDT_CSI,
646                                           NVME_NIDT_CSI_LEN,
647                                           &req->ns->csi, &off);
648         if (status)
649                 goto out;
650
651         if (sg_zero_buffer(req->sg, req->sg_cnt, NVME_IDENTIFY_DATA_SIZE - off,
652                         off) != NVME_IDENTIFY_DATA_SIZE - off)
653                 status = NVME_SC_INTERNAL | NVME_SC_DNR;
654
655 out:
656         nvmet_req_complete(req, status);
657 }
658
659 static bool nvmet_handle_identify_desclist(struct nvmet_req *req)
660 {
661         switch (req->cmd->identify.csi) {
662         case NVME_CSI_NVM:
663                 nvmet_execute_identify_desclist(req);
664                 return true;
665         case NVME_CSI_ZNS:
666                 if (IS_ENABLED(CONFIG_BLK_DEV_ZONED)) {
667                         nvmet_execute_identify_desclist(req);
668                         return true;
669                 }
670                 return false;
671         default:
672                 return false;
673         }
674 }
675
676 static void nvmet_execute_identify(struct nvmet_req *req)
677 {
678         if (!nvmet_check_transfer_len(req, NVME_IDENTIFY_DATA_SIZE))
679                 return;
680
681         switch (req->cmd->identify.cns) {
682         case NVME_ID_CNS_NS:
683                 switch (req->cmd->identify.csi) {
684                 case NVME_CSI_NVM:
685                         return nvmet_execute_identify_ns(req);
686                 default:
687                         break;
688                 }
689                 break;
690         case NVME_ID_CNS_CS_NS:
691                 if (IS_ENABLED(CONFIG_BLK_DEV_ZONED)) {
692                         switch (req->cmd->identify.csi) {
693                         case NVME_CSI_ZNS:
694                                 return nvmet_execute_identify_cns_cs_ns(req);
695                         default:
696                                 break;
697                         }
698                 }
699                 break;
700         case NVME_ID_CNS_CTRL:
701                 switch (req->cmd->identify.csi) {
702                 case NVME_CSI_NVM:
703                         return nvmet_execute_identify_ctrl(req);
704                 }
705                 break;
706         case NVME_ID_CNS_CS_CTRL:
707                 if (IS_ENABLED(CONFIG_BLK_DEV_ZONED)) {
708                         switch (req->cmd->identify.csi) {
709                         case NVME_CSI_ZNS:
710                                 return nvmet_execute_identify_cns_cs_ctrl(req);
711                         default:
712                                 break;
713                         }
714                 }
715                 break;
716         case NVME_ID_CNS_NS_ACTIVE_LIST:
717                 switch (req->cmd->identify.csi) {
718                 case NVME_CSI_NVM:
719                         return nvmet_execute_identify_nslist(req);
720                 default:
721                         break;
722                 }
723                 break;
724         case NVME_ID_CNS_NS_DESC_LIST:
725                 if (nvmet_handle_identify_desclist(req) == true)
726                         return;
727                 break;
728         }
729
730         nvmet_req_cns_error_complete(req);
731 }
732
733 /*
734  * A "minimum viable" abort implementation: the command is mandatory in the
735  * spec, but we are not required to do any useful work.  We couldn't really
736  * do a useful abort, so don't bother even with waiting for the command
737  * to be exectuted and return immediately telling the command to abort
738  * wasn't found.
739  */
740 static void nvmet_execute_abort(struct nvmet_req *req)
741 {
742         if (!nvmet_check_transfer_len(req, 0))
743                 return;
744         nvmet_set_result(req, 1);
745         nvmet_req_complete(req, 0);
746 }
747
748 static u16 nvmet_write_protect_flush_sync(struct nvmet_req *req)
749 {
750         u16 status;
751
752         if (req->ns->file)
753                 status = nvmet_file_flush(req);
754         else
755                 status = nvmet_bdev_flush(req);
756
757         if (status)
758                 pr_err("write protect flush failed nsid: %u\n", req->ns->nsid);
759         return status;
760 }
761
762 static u16 nvmet_set_feat_write_protect(struct nvmet_req *req)
763 {
764         u32 write_protect = le32_to_cpu(req->cmd->common.cdw11);
765         struct nvmet_subsys *subsys = nvmet_req_subsys(req);
766         u16 status;
767
768         status = nvmet_req_find_ns(req);
769         if (status)
770                 return status;
771
772         mutex_lock(&subsys->lock);
773         switch (write_protect) {
774         case NVME_NS_WRITE_PROTECT:
775                 req->ns->readonly = true;
776                 status = nvmet_write_protect_flush_sync(req);
777                 if (status)
778                         req->ns->readonly = false;
779                 break;
780         case NVME_NS_NO_WRITE_PROTECT:
781                 req->ns->readonly = false;
782                 status = 0;
783                 break;
784         default:
785                 break;
786         }
787
788         if (!status)
789                 nvmet_ns_changed(subsys, req->ns->nsid);
790         mutex_unlock(&subsys->lock);
791         return status;
792 }
793
794 u16 nvmet_set_feat_kato(struct nvmet_req *req)
795 {
796         u32 val32 = le32_to_cpu(req->cmd->common.cdw11);
797
798         nvmet_stop_keep_alive_timer(req->sq->ctrl);
799         req->sq->ctrl->kato = DIV_ROUND_UP(val32, 1000);
800         nvmet_start_keep_alive_timer(req->sq->ctrl);
801
802         nvmet_set_result(req, req->sq->ctrl->kato);
803
804         return 0;
805 }
806
807 u16 nvmet_set_feat_async_event(struct nvmet_req *req, u32 mask)
808 {
809         u32 val32 = le32_to_cpu(req->cmd->common.cdw11);
810
811         if (val32 & ~mask) {
812                 req->error_loc = offsetof(struct nvme_common_command, cdw11);
813                 return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
814         }
815
816         WRITE_ONCE(req->sq->ctrl->aen_enabled, val32);
817         nvmet_set_result(req, val32);
818
819         return 0;
820 }
821
822 void nvmet_execute_set_features(struct nvmet_req *req)
823 {
824         struct nvmet_subsys *subsys = nvmet_req_subsys(req);
825         u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
826         u32 cdw11 = le32_to_cpu(req->cmd->common.cdw11);
827         u16 status = 0;
828         u16 nsqr;
829         u16 ncqr;
830
831         if (!nvmet_check_transfer_len(req, 0))
832                 return;
833
834         switch (cdw10 & 0xff) {
835         case NVME_FEAT_NUM_QUEUES:
836                 ncqr = (cdw11 >> 16) & 0xffff;
837                 nsqr = cdw11 & 0xffff;
838                 if (ncqr == 0xffff || nsqr == 0xffff) {
839                         status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
840                         break;
841                 }
842                 nvmet_set_result(req,
843                         (subsys->max_qid - 1) | ((subsys->max_qid - 1) << 16));
844                 break;
845         case NVME_FEAT_KATO:
846                 status = nvmet_set_feat_kato(req);
847                 break;
848         case NVME_FEAT_ASYNC_EVENT:
849                 status = nvmet_set_feat_async_event(req, NVMET_AEN_CFG_ALL);
850                 break;
851         case NVME_FEAT_HOST_ID:
852                 status = NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
853                 break;
854         case NVME_FEAT_WRITE_PROTECT:
855                 status = nvmet_set_feat_write_protect(req);
856                 break;
857         default:
858                 req->error_loc = offsetof(struct nvme_common_command, cdw10);
859                 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
860                 break;
861         }
862
863         nvmet_req_complete(req, status);
864 }
865
866 static u16 nvmet_get_feat_write_protect(struct nvmet_req *req)
867 {
868         struct nvmet_subsys *subsys = nvmet_req_subsys(req);
869         u32 result;
870
871         result = nvmet_req_find_ns(req);
872         if (result)
873                 return result;
874
875         mutex_lock(&subsys->lock);
876         if (req->ns->readonly == true)
877                 result = NVME_NS_WRITE_PROTECT;
878         else
879                 result = NVME_NS_NO_WRITE_PROTECT;
880         nvmet_set_result(req, result);
881         mutex_unlock(&subsys->lock);
882
883         return 0;
884 }
885
886 void nvmet_get_feat_kato(struct nvmet_req *req)
887 {
888         nvmet_set_result(req, req->sq->ctrl->kato * 1000);
889 }
890
891 void nvmet_get_feat_async_event(struct nvmet_req *req)
892 {
893         nvmet_set_result(req, READ_ONCE(req->sq->ctrl->aen_enabled));
894 }
895
896 void nvmet_execute_get_features(struct nvmet_req *req)
897 {
898         struct nvmet_subsys *subsys = nvmet_req_subsys(req);
899         u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
900         u16 status = 0;
901
902         if (!nvmet_check_transfer_len(req, nvmet_feat_data_len(req, cdw10)))
903                 return;
904
905         switch (cdw10 & 0xff) {
906         /*
907          * These features are mandatory in the spec, but we don't
908          * have a useful way to implement them.  We'll eventually
909          * need to come up with some fake values for these.
910          */
911 #if 0
912         case NVME_FEAT_ARBITRATION:
913                 break;
914         case NVME_FEAT_POWER_MGMT:
915                 break;
916         case NVME_FEAT_TEMP_THRESH:
917                 break;
918         case NVME_FEAT_ERR_RECOVERY:
919                 break;
920         case NVME_FEAT_IRQ_COALESCE:
921                 break;
922         case NVME_FEAT_IRQ_CONFIG:
923                 break;
924         case NVME_FEAT_WRITE_ATOMIC:
925                 break;
926 #endif
927         case NVME_FEAT_ASYNC_EVENT:
928                 nvmet_get_feat_async_event(req);
929                 break;
930         case NVME_FEAT_VOLATILE_WC:
931                 nvmet_set_result(req, 1);
932                 break;
933         case NVME_FEAT_NUM_QUEUES:
934                 nvmet_set_result(req,
935                         (subsys->max_qid-1) | ((subsys->max_qid-1) << 16));
936                 break;
937         case NVME_FEAT_KATO:
938                 nvmet_get_feat_kato(req);
939                 break;
940         case NVME_FEAT_HOST_ID:
941                 /* need 128-bit host identifier flag */
942                 if (!(req->cmd->common.cdw11 & cpu_to_le32(1 << 0))) {
943                         req->error_loc =
944                                 offsetof(struct nvme_common_command, cdw11);
945                         status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
946                         break;
947                 }
948
949                 status = nvmet_copy_to_sgl(req, 0, &req->sq->ctrl->hostid,
950                                 sizeof(req->sq->ctrl->hostid));
951                 break;
952         case NVME_FEAT_WRITE_PROTECT:
953                 status = nvmet_get_feat_write_protect(req);
954                 break;
955         default:
956                 req->error_loc =
957                         offsetof(struct nvme_common_command, cdw10);
958                 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
959                 break;
960         }
961
962         nvmet_req_complete(req, status);
963 }
964
965 void nvmet_execute_async_event(struct nvmet_req *req)
966 {
967         struct nvmet_ctrl *ctrl = req->sq->ctrl;
968
969         if (!nvmet_check_transfer_len(req, 0))
970                 return;
971
972         mutex_lock(&ctrl->lock);
973         if (ctrl->nr_async_event_cmds >= NVMET_ASYNC_EVENTS) {
974                 mutex_unlock(&ctrl->lock);
975                 nvmet_req_complete(req, NVME_SC_ASYNC_LIMIT | NVME_SC_DNR);
976                 return;
977         }
978         ctrl->async_event_cmds[ctrl->nr_async_event_cmds++] = req;
979         mutex_unlock(&ctrl->lock);
980
981         schedule_work(&ctrl->async_event_work);
982 }
983
984 void nvmet_execute_keep_alive(struct nvmet_req *req)
985 {
986         struct nvmet_ctrl *ctrl = req->sq->ctrl;
987         u16 status = 0;
988
989         if (!nvmet_check_transfer_len(req, 0))
990                 return;
991
992         if (!ctrl->kato) {
993                 status = NVME_SC_KA_TIMEOUT_INVALID;
994                 goto out;
995         }
996
997         pr_debug("ctrl %d update keep-alive timer for %d secs\n",
998                 ctrl->cntlid, ctrl->kato);
999         mod_delayed_work(system_wq, &ctrl->ka_work, ctrl->kato * HZ);
1000 out:
1001         nvmet_req_complete(req, status);
1002 }
1003
1004 u16 nvmet_parse_admin_cmd(struct nvmet_req *req)
1005 {
1006         struct nvme_command *cmd = req->cmd;
1007         u16 ret;
1008
1009         if (nvme_is_fabrics(cmd))
1010                 return nvmet_parse_fabrics_cmd(req);
1011         if (nvmet_req_subsys(req)->type == NVME_NQN_DISC)
1012                 return nvmet_parse_discovery_cmd(req);
1013
1014         ret = nvmet_check_ctrl_status(req);
1015         if (unlikely(ret))
1016                 return ret;
1017
1018         if (nvmet_is_passthru_req(req))
1019                 return nvmet_parse_passthru_admin_cmd(req);
1020
1021         switch (cmd->common.opcode) {
1022         case nvme_admin_get_log_page:
1023                 req->execute = nvmet_execute_get_log_page;
1024                 return 0;
1025         case nvme_admin_identify:
1026                 req->execute = nvmet_execute_identify;
1027                 return 0;
1028         case nvme_admin_abort_cmd:
1029                 req->execute = nvmet_execute_abort;
1030                 return 0;
1031         case nvme_admin_set_features:
1032                 req->execute = nvmet_execute_set_features;
1033                 return 0;
1034         case nvme_admin_get_features:
1035                 req->execute = nvmet_execute_get_features;
1036                 return 0;
1037         case nvme_admin_async_event:
1038                 req->execute = nvmet_execute_async_event;
1039                 return 0;
1040         case nvme_admin_keep_alive:
1041                 req->execute = nvmet_execute_keep_alive;
1042                 return 0;
1043         default:
1044                 return nvmet_report_invalid_opcode(req);
1045         }
1046 }