nvmet: remove extra variable in id-desclist
[linux-2.6-microblaze.git] / drivers / nvme / target / admin-cmd.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * NVMe admin command implementation.
4  * Copyright (c) 2015-2016 HGST, a Western Digital Company.
5  */
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 #include <linux/module.h>
8 #include <linux/rculist.h>
9 #include <linux/part_stat.h>
10
11 #include <generated/utsrelease.h>
12 #include <asm/unaligned.h>
13 #include "nvmet.h"
14
15 u32 nvmet_get_log_page_len(struct nvme_command *cmd)
16 {
17         u32 len = le16_to_cpu(cmd->get_log_page.numdu);
18
19         len <<= 16;
20         len += le16_to_cpu(cmd->get_log_page.numdl);
21         /* NUMD is a 0's based value */
22         len += 1;
23         len *= sizeof(u32);
24
25         return len;
26 }
27
28 static u32 nvmet_feat_data_len(struct nvmet_req *req, u32 cdw10)
29 {
30         switch (cdw10 & 0xff) {
31         case NVME_FEAT_HOST_ID:
32                 return sizeof(req->sq->ctrl->hostid);
33         default:
34                 return 0;
35         }
36 }
37
38 u64 nvmet_get_log_page_offset(struct nvme_command *cmd)
39 {
40         return le64_to_cpu(cmd->get_log_page.lpo);
41 }
42
43 static void nvmet_execute_get_log_page_noop(struct nvmet_req *req)
44 {
45         nvmet_req_complete(req, nvmet_zero_sgl(req, 0, req->transfer_len));
46 }
47
48 static void nvmet_execute_get_log_page_error(struct nvmet_req *req)
49 {
50         struct nvmet_ctrl *ctrl = req->sq->ctrl;
51         unsigned long flags;
52         off_t offset = 0;
53         u64 slot;
54         u64 i;
55
56         spin_lock_irqsave(&ctrl->error_lock, flags);
57         slot = ctrl->err_counter % NVMET_ERROR_LOG_SLOTS;
58
59         for (i = 0; i < NVMET_ERROR_LOG_SLOTS; i++) {
60                 if (nvmet_copy_to_sgl(req, offset, &ctrl->slots[slot],
61                                 sizeof(struct nvme_error_slot)))
62                         break;
63
64                 if (slot == 0)
65                         slot = NVMET_ERROR_LOG_SLOTS - 1;
66                 else
67                         slot--;
68                 offset += sizeof(struct nvme_error_slot);
69         }
70         spin_unlock_irqrestore(&ctrl->error_lock, flags);
71         nvmet_req_complete(req, 0);
72 }
73
74 static u16 nvmet_get_smart_log_nsid(struct nvmet_req *req,
75                 struct nvme_smart_log *slog)
76 {
77         u64 host_reads, host_writes, data_units_read, data_units_written;
78
79         req->ns = nvmet_find_namespace(req->sq->ctrl,
80                                        req->cmd->get_log_page.nsid);
81         if (!req->ns) {
82                 pr_err("Could not find namespace id : %d\n",
83                                 le32_to_cpu(req->cmd->get_log_page.nsid));
84                 req->error_loc = offsetof(struct nvme_rw_command, nsid);
85                 return NVME_SC_INVALID_NS;
86         }
87
88         /* we don't have the right data for file backed ns */
89         if (!req->ns->bdev)
90                 return NVME_SC_SUCCESS;
91
92         host_reads = part_stat_read(req->ns->bdev, ios[READ]);
93         data_units_read =
94                 DIV_ROUND_UP(part_stat_read(req->ns->bdev, sectors[READ]), 1000);
95         host_writes = part_stat_read(req->ns->bdev, ios[WRITE]);
96         data_units_written =
97                 DIV_ROUND_UP(part_stat_read(req->ns->bdev, sectors[WRITE]), 1000);
98
99         put_unaligned_le64(host_reads, &slog->host_reads[0]);
100         put_unaligned_le64(data_units_read, &slog->data_units_read[0]);
101         put_unaligned_le64(host_writes, &slog->host_writes[0]);
102         put_unaligned_le64(data_units_written, &slog->data_units_written[0]);
103
104         return NVME_SC_SUCCESS;
105 }
106
107 static u16 nvmet_get_smart_log_all(struct nvmet_req *req,
108                 struct nvme_smart_log *slog)
109 {
110         u64 host_reads = 0, host_writes = 0;
111         u64 data_units_read = 0, data_units_written = 0;
112         struct nvmet_ns *ns;
113         struct nvmet_ctrl *ctrl;
114         unsigned long idx;
115
116         ctrl = req->sq->ctrl;
117         xa_for_each(&ctrl->subsys->namespaces, idx, ns) {
118                 /* we don't have the right data for file backed ns */
119                 if (!ns->bdev)
120                         continue;
121                 host_reads += part_stat_read(ns->bdev, ios[READ]);
122                 data_units_read += DIV_ROUND_UP(
123                         part_stat_read(ns->bdev, sectors[READ]), 1000);
124                 host_writes += part_stat_read(ns->bdev, ios[WRITE]);
125                 data_units_written += DIV_ROUND_UP(
126                         part_stat_read(ns->bdev, sectors[WRITE]), 1000);
127         }
128
129         put_unaligned_le64(host_reads, &slog->host_reads[0]);
130         put_unaligned_le64(data_units_read, &slog->data_units_read[0]);
131         put_unaligned_le64(host_writes, &slog->host_writes[0]);
132         put_unaligned_le64(data_units_written, &slog->data_units_written[0]);
133
134         return NVME_SC_SUCCESS;
135 }
136
137 static void nvmet_execute_get_log_page_smart(struct nvmet_req *req)
138 {
139         struct nvme_smart_log *log;
140         u16 status = NVME_SC_INTERNAL;
141         unsigned long flags;
142
143         if (req->transfer_len != sizeof(*log))
144                 goto out;
145
146         log = kzalloc(sizeof(*log), GFP_KERNEL);
147         if (!log)
148                 goto out;
149
150         if (req->cmd->get_log_page.nsid == cpu_to_le32(NVME_NSID_ALL))
151                 status = nvmet_get_smart_log_all(req, log);
152         else
153                 status = nvmet_get_smart_log_nsid(req, log);
154         if (status)
155                 goto out_free_log;
156
157         spin_lock_irqsave(&req->sq->ctrl->error_lock, flags);
158         put_unaligned_le64(req->sq->ctrl->err_counter,
159                         &log->num_err_log_entries);
160         spin_unlock_irqrestore(&req->sq->ctrl->error_lock, flags);
161
162         status = nvmet_copy_to_sgl(req, 0, log, sizeof(*log));
163 out_free_log:
164         kfree(log);
165 out:
166         nvmet_req_complete(req, status);
167 }
168
169 static void nvmet_execute_get_log_cmd_effects_ns(struct nvmet_req *req)
170 {
171         u16 status = NVME_SC_INTERNAL;
172         struct nvme_effects_log *log;
173
174         log = kzalloc(sizeof(*log), GFP_KERNEL);
175         if (!log)
176                 goto out;
177
178         log->acs[nvme_admin_get_log_page]       = cpu_to_le32(1 << 0);
179         log->acs[nvme_admin_identify]           = cpu_to_le32(1 << 0);
180         log->acs[nvme_admin_abort_cmd]          = cpu_to_le32(1 << 0);
181         log->acs[nvme_admin_set_features]       = cpu_to_le32(1 << 0);
182         log->acs[nvme_admin_get_features]       = cpu_to_le32(1 << 0);
183         log->acs[nvme_admin_async_event]        = cpu_to_le32(1 << 0);
184         log->acs[nvme_admin_keep_alive]         = cpu_to_le32(1 << 0);
185
186         log->iocs[nvme_cmd_read]                = cpu_to_le32(1 << 0);
187         log->iocs[nvme_cmd_write]               = cpu_to_le32(1 << 0);
188         log->iocs[nvme_cmd_flush]               = cpu_to_le32(1 << 0);
189         log->iocs[nvme_cmd_dsm]                 = cpu_to_le32(1 << 0);
190         log->iocs[nvme_cmd_write_zeroes]        = cpu_to_le32(1 << 0);
191
192         status = nvmet_copy_to_sgl(req, 0, log, sizeof(*log));
193
194         kfree(log);
195 out:
196         nvmet_req_complete(req, status);
197 }
198
199 static void nvmet_execute_get_log_changed_ns(struct nvmet_req *req)
200 {
201         struct nvmet_ctrl *ctrl = req->sq->ctrl;
202         u16 status = NVME_SC_INTERNAL;
203         size_t len;
204
205         if (req->transfer_len != NVME_MAX_CHANGED_NAMESPACES * sizeof(__le32))
206                 goto out;
207
208         mutex_lock(&ctrl->lock);
209         if (ctrl->nr_changed_ns == U32_MAX)
210                 len = sizeof(__le32);
211         else
212                 len = ctrl->nr_changed_ns * sizeof(__le32);
213         status = nvmet_copy_to_sgl(req, 0, ctrl->changed_ns_list, len);
214         if (!status)
215                 status = nvmet_zero_sgl(req, len, req->transfer_len - len);
216         ctrl->nr_changed_ns = 0;
217         nvmet_clear_aen_bit(req, NVME_AEN_BIT_NS_ATTR);
218         mutex_unlock(&ctrl->lock);
219 out:
220         nvmet_req_complete(req, status);
221 }
222
223 static u32 nvmet_format_ana_group(struct nvmet_req *req, u32 grpid,
224                 struct nvme_ana_group_desc *desc)
225 {
226         struct nvmet_ctrl *ctrl = req->sq->ctrl;
227         struct nvmet_ns *ns;
228         unsigned long idx;
229         u32 count = 0;
230
231         if (!(req->cmd->get_log_page.lsp & NVME_ANA_LOG_RGO)) {
232                 xa_for_each(&ctrl->subsys->namespaces, idx, ns)
233                         if (ns->anagrpid == grpid)
234                                 desc->nsids[count++] = cpu_to_le32(ns->nsid);
235         }
236
237         desc->grpid = cpu_to_le32(grpid);
238         desc->nnsids = cpu_to_le32(count);
239         desc->chgcnt = cpu_to_le64(nvmet_ana_chgcnt);
240         desc->state = req->port->ana_state[grpid];
241         memset(desc->rsvd17, 0, sizeof(desc->rsvd17));
242         return sizeof(struct nvme_ana_group_desc) + count * sizeof(__le32);
243 }
244
245 static void nvmet_execute_get_log_page_ana(struct nvmet_req *req)
246 {
247         struct nvme_ana_rsp_hdr hdr = { 0, };
248         struct nvme_ana_group_desc *desc;
249         size_t offset = sizeof(struct nvme_ana_rsp_hdr); /* start beyond hdr */
250         size_t len;
251         u32 grpid;
252         u16 ngrps = 0;
253         u16 status;
254
255         status = NVME_SC_INTERNAL;
256         desc = kmalloc(sizeof(struct nvme_ana_group_desc) +
257                         NVMET_MAX_NAMESPACES * sizeof(__le32), GFP_KERNEL);
258         if (!desc)
259                 goto out;
260
261         down_read(&nvmet_ana_sem);
262         for (grpid = 1; grpid <= NVMET_MAX_ANAGRPS; grpid++) {
263                 if (!nvmet_ana_group_enabled[grpid])
264                         continue;
265                 len = nvmet_format_ana_group(req, grpid, desc);
266                 status = nvmet_copy_to_sgl(req, offset, desc, len);
267                 if (status)
268                         break;
269                 offset += len;
270                 ngrps++;
271         }
272         for ( ; grpid <= NVMET_MAX_ANAGRPS; grpid++) {
273                 if (nvmet_ana_group_enabled[grpid])
274                         ngrps++;
275         }
276
277         hdr.chgcnt = cpu_to_le64(nvmet_ana_chgcnt);
278         hdr.ngrps = cpu_to_le16(ngrps);
279         nvmet_clear_aen_bit(req, NVME_AEN_BIT_ANA_CHANGE);
280         up_read(&nvmet_ana_sem);
281
282         kfree(desc);
283
284         /* copy the header last once we know the number of groups */
285         status = nvmet_copy_to_sgl(req, 0, &hdr, sizeof(hdr));
286 out:
287         nvmet_req_complete(req, status);
288 }
289
290 static void nvmet_execute_get_log_page(struct nvmet_req *req)
291 {
292         if (!nvmet_check_transfer_len(req, nvmet_get_log_page_len(req->cmd)))
293                 return;
294
295         switch (req->cmd->get_log_page.lid) {
296         case NVME_LOG_ERROR:
297                 return nvmet_execute_get_log_page_error(req);
298         case NVME_LOG_SMART:
299                 return nvmet_execute_get_log_page_smart(req);
300         case NVME_LOG_FW_SLOT:
301                 /*
302                  * We only support a single firmware slot which always is
303                  * active, so we can zero out the whole firmware slot log and
304                  * still claim to fully implement this mandatory log page.
305                  */
306                 return nvmet_execute_get_log_page_noop(req);
307         case NVME_LOG_CHANGED_NS:
308                 return nvmet_execute_get_log_changed_ns(req);
309         case NVME_LOG_CMD_EFFECTS:
310                 return nvmet_execute_get_log_cmd_effects_ns(req);
311         case NVME_LOG_ANA:
312                 return nvmet_execute_get_log_page_ana(req);
313         }
314         pr_err("unhandled lid %d on qid %d\n",
315                req->cmd->get_log_page.lid, req->sq->qid);
316         req->error_loc = offsetof(struct nvme_get_log_page_command, lid);
317         nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_SC_DNR);
318 }
319
320 static void nvmet_id_set_model_number(struct nvme_id_ctrl *id,
321                                       struct nvmet_subsys *subsys)
322 {
323         const char *model = NVMET_DEFAULT_CTRL_MODEL;
324         struct nvmet_subsys_model *subsys_model;
325
326         rcu_read_lock();
327         subsys_model = rcu_dereference(subsys->model);
328         if (subsys_model)
329                 model = subsys_model->number;
330         memcpy_and_pad(id->mn, sizeof(id->mn), model, strlen(model), ' ');
331         rcu_read_unlock();
332 }
333
334 static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
335 {
336         struct nvmet_ctrl *ctrl = req->sq->ctrl;
337         struct nvme_id_ctrl *id;
338         u32 cmd_capsule_size;
339         u16 status = 0;
340
341         id = kzalloc(sizeof(*id), GFP_KERNEL);
342         if (!id) {
343                 status = NVME_SC_INTERNAL;
344                 goto out;
345         }
346
347         /* XXX: figure out how to assign real vendors IDs. */
348         id->vid = 0;
349         id->ssvid = 0;
350
351         memset(id->sn, ' ', sizeof(id->sn));
352         bin2hex(id->sn, &ctrl->subsys->serial,
353                 min(sizeof(ctrl->subsys->serial), sizeof(id->sn) / 2));
354         nvmet_id_set_model_number(id, ctrl->subsys);
355         memcpy_and_pad(id->fr, sizeof(id->fr),
356                        UTS_RELEASE, strlen(UTS_RELEASE), ' ');
357
358         id->rab = 6;
359
360         /*
361          * XXX: figure out how we can assign a IEEE OUI, but until then
362          * the safest is to leave it as zeroes.
363          */
364
365         /* we support multiple ports, multiples hosts and ANA: */
366         id->cmic = (1 << 0) | (1 << 1) | (1 << 3);
367
368         /* Limit MDTS according to transport capability */
369         if (ctrl->ops->get_mdts)
370                 id->mdts = ctrl->ops->get_mdts(ctrl);
371         else
372                 id->mdts = 0;
373
374         id->cntlid = cpu_to_le16(ctrl->cntlid);
375         id->ver = cpu_to_le32(ctrl->subsys->ver);
376
377         /* XXX: figure out what to do about RTD3R/RTD3 */
378         id->oaes = cpu_to_le32(NVMET_AEN_CFG_OPTIONAL);
379         id->ctratt = cpu_to_le32(NVME_CTRL_ATTR_HID_128_BIT |
380                 NVME_CTRL_ATTR_TBKAS);
381
382         id->oacs = 0;
383
384         /*
385          * We don't really have a practical limit on the number of abort
386          * comands.  But we don't do anything useful for abort either, so
387          * no point in allowing more abort commands than the spec requires.
388          */
389         id->acl = 3;
390
391         id->aerl = NVMET_ASYNC_EVENTS - 1;
392
393         /* first slot is read-only, only one slot supported */
394         id->frmw = (1 << 0) | (1 << 1);
395         id->lpa = (1 << 0) | (1 << 1) | (1 << 2);
396         id->elpe = NVMET_ERROR_LOG_SLOTS - 1;
397         id->npss = 0;
398
399         /* We support keep-alive timeout in granularity of seconds */
400         id->kas = cpu_to_le16(NVMET_KAS);
401
402         id->sqes = (0x6 << 4) | 0x6;
403         id->cqes = (0x4 << 4) | 0x4;
404
405         /* no enforcement soft-limit for maxcmd - pick arbitrary high value */
406         id->maxcmd = cpu_to_le16(NVMET_MAX_CMD);
407
408         id->nn = cpu_to_le32(ctrl->subsys->max_nsid);
409         id->mnan = cpu_to_le32(NVMET_MAX_NAMESPACES);
410         id->oncs = cpu_to_le16(NVME_CTRL_ONCS_DSM |
411                         NVME_CTRL_ONCS_WRITE_ZEROES);
412
413         /* XXX: don't report vwc if the underlying device is write through */
414         id->vwc = NVME_CTRL_VWC_PRESENT;
415
416         /*
417          * We can't support atomic writes bigger than a LBA without support
418          * from the backend device.
419          */
420         id->awun = 0;
421         id->awupf = 0;
422
423         id->sgls = cpu_to_le32(1 << 0); /* we always support SGLs */
424         if (ctrl->ops->flags & NVMF_KEYED_SGLS)
425                 id->sgls |= cpu_to_le32(1 << 2);
426         if (req->port->inline_data_size)
427                 id->sgls |= cpu_to_le32(1 << 20);
428
429         strlcpy(id->subnqn, ctrl->subsys->subsysnqn, sizeof(id->subnqn));
430
431         /*
432          * Max command capsule size is sqe + in-capsule data size.
433          * Disable in-capsule data for Metadata capable controllers.
434          */
435         cmd_capsule_size = sizeof(struct nvme_command);
436         if (!ctrl->pi_support)
437                 cmd_capsule_size += req->port->inline_data_size;
438         id->ioccsz = cpu_to_le32(cmd_capsule_size / 16);
439
440         /* Max response capsule size is cqe */
441         id->iorcsz = cpu_to_le32(sizeof(struct nvme_completion) / 16);
442
443         id->msdbd = ctrl->ops->msdbd;
444
445         id->anacap = (1 << 0) | (1 << 1) | (1 << 2) | (1 << 3) | (1 << 4);
446         id->anatt = 10; /* random value */
447         id->anagrpmax = cpu_to_le32(NVMET_MAX_ANAGRPS);
448         id->nanagrpid = cpu_to_le32(NVMET_MAX_ANAGRPS);
449
450         /*
451          * Meh, we don't really support any power state.  Fake up the same
452          * values that qemu does.
453          */
454         id->psd[0].max_power = cpu_to_le16(0x9c4);
455         id->psd[0].entry_lat = cpu_to_le32(0x10);
456         id->psd[0].exit_lat = cpu_to_le32(0x4);
457
458         id->nwpc = 1 << 0; /* write protect and no write protect */
459
460         status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
461
462         kfree(id);
463 out:
464         nvmet_req_complete(req, status);
465 }
466
467 static void nvmet_execute_identify_ns(struct nvmet_req *req)
468 {
469         struct nvmet_ctrl *ctrl = req->sq->ctrl;
470         struct nvmet_ns *ns;
471         struct nvme_id_ns *id;
472         u16 status = 0;
473
474         if (le32_to_cpu(req->cmd->identify.nsid) == NVME_NSID_ALL) {
475                 req->error_loc = offsetof(struct nvme_identify, nsid);
476                 status = NVME_SC_INVALID_NS | NVME_SC_DNR;
477                 goto out;
478         }
479
480         id = kzalloc(sizeof(*id), GFP_KERNEL);
481         if (!id) {
482                 status = NVME_SC_INTERNAL;
483                 goto out;
484         }
485
486         /* return an all zeroed buffer if we can't find an active namespace */
487         ns = nvmet_find_namespace(ctrl, req->cmd->identify.nsid);
488         if (!ns) {
489                 status = NVME_SC_INVALID_NS;
490                 goto done;
491         }
492
493         nvmet_ns_revalidate(ns);
494
495         /*
496          * nuse = ncap = nsze isn't always true, but we have no way to find
497          * that out from the underlying device.
498          */
499         id->ncap = id->nsze = cpu_to_le64(ns->size >> ns->blksize_shift);
500         switch (req->port->ana_state[ns->anagrpid]) {
501         case NVME_ANA_INACCESSIBLE:
502         case NVME_ANA_PERSISTENT_LOSS:
503                 break;
504         default:
505                 id->nuse = id->nsze;
506                 break;
507         }
508
509         if (ns->bdev)
510                 nvmet_bdev_set_limits(ns->bdev, id);
511
512         /*
513          * We just provide a single LBA format that matches what the
514          * underlying device reports.
515          */
516         id->nlbaf = 0;
517         id->flbas = 0;
518
519         /*
520          * Our namespace might always be shared.  Not just with other
521          * controllers, but also with any other user of the block device.
522          */
523         id->nmic = (1 << 0);
524         id->anagrpid = cpu_to_le32(ns->anagrpid);
525
526         memcpy(&id->nguid, &ns->nguid, sizeof(id->nguid));
527
528         id->lbaf[0].ds = ns->blksize_shift;
529
530         if (ctrl->pi_support && nvmet_ns_has_pi(ns)) {
531                 id->dpc = NVME_NS_DPC_PI_FIRST | NVME_NS_DPC_PI_LAST |
532                           NVME_NS_DPC_PI_TYPE1 | NVME_NS_DPC_PI_TYPE2 |
533                           NVME_NS_DPC_PI_TYPE3;
534                 id->mc = NVME_MC_EXTENDED_LBA;
535                 id->dps = ns->pi_type;
536                 id->flbas = NVME_NS_FLBAS_META_EXT;
537                 id->lbaf[0].ms = cpu_to_le16(ns->metadata_size);
538         }
539
540         if (ns->readonly)
541                 id->nsattr |= (1 << 0);
542         nvmet_put_namespace(ns);
543 done:
544         if (!status)
545                 status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
546
547         kfree(id);
548 out:
549         nvmet_req_complete(req, status);
550 }
551
552 static void nvmet_execute_identify_nslist(struct nvmet_req *req)
553 {
554         static const int buf_size = NVME_IDENTIFY_DATA_SIZE;
555         struct nvmet_ctrl *ctrl = req->sq->ctrl;
556         struct nvmet_ns *ns;
557         unsigned long idx;
558         u32 min_nsid = le32_to_cpu(req->cmd->identify.nsid);
559         __le32 *list;
560         u16 status = 0;
561         int i = 0;
562
563         list = kzalloc(buf_size, GFP_KERNEL);
564         if (!list) {
565                 status = NVME_SC_INTERNAL;
566                 goto out;
567         }
568
569         xa_for_each(&ctrl->subsys->namespaces, idx, ns) {
570                 if (ns->nsid <= min_nsid)
571                         continue;
572                 list[i++] = cpu_to_le32(ns->nsid);
573                 if (i == buf_size / sizeof(__le32))
574                         break;
575         }
576
577         status = nvmet_copy_to_sgl(req, 0, list, buf_size);
578
579         kfree(list);
580 out:
581         nvmet_req_complete(req, status);
582 }
583
584 static u16 nvmet_copy_ns_identifier(struct nvmet_req *req, u8 type, u8 len,
585                                     void *id, off_t *off)
586 {
587         struct nvme_ns_id_desc desc = {
588                 .nidt = type,
589                 .nidl = len,
590         };
591         u16 status;
592
593         status = nvmet_copy_to_sgl(req, *off, &desc, sizeof(desc));
594         if (status)
595                 return status;
596         *off += sizeof(desc);
597
598         status = nvmet_copy_to_sgl(req, *off, id, len);
599         if (status)
600                 return status;
601         *off += len;
602
603         return 0;
604 }
605
606 static void nvmet_execute_identify_desclist(struct nvmet_req *req)
607 {
608         u16 status = 0;
609         off_t off = 0;
610
611         req->ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->identify.nsid);
612         if (!req->ns) {
613                 req->error_loc = offsetof(struct nvme_identify, nsid);
614                 status = NVME_SC_INVALID_NS | NVME_SC_DNR;
615                 goto out;
616         }
617
618         if (memchr_inv(&req->ns->uuid, 0, sizeof(req->ns->uuid))) {
619                 status = nvmet_copy_ns_identifier(req, NVME_NIDT_UUID,
620                                                   NVME_NIDT_UUID_LEN,
621                                                   &req->ns->uuid, &off);
622                 if (status)
623                         goto out;
624         }
625         if (memchr_inv(req->ns->nguid, 0, sizeof(req->ns->nguid))) {
626                 status = nvmet_copy_ns_identifier(req, NVME_NIDT_NGUID,
627                                                   NVME_NIDT_NGUID_LEN,
628                                                   &req->ns->nguid, &off);
629                 if (status)
630                         goto out;
631         }
632
633         if (sg_zero_buffer(req->sg, req->sg_cnt, NVME_IDENTIFY_DATA_SIZE - off,
634                         off) != NVME_IDENTIFY_DATA_SIZE - off)
635                 status = NVME_SC_INTERNAL | NVME_SC_DNR;
636
637 out:
638         nvmet_req_complete(req, status);
639 }
640
641 static void nvmet_execute_identify(struct nvmet_req *req)
642 {
643         if (!nvmet_check_transfer_len(req, NVME_IDENTIFY_DATA_SIZE))
644                 return;
645
646         switch (req->cmd->identify.cns) {
647         case NVME_ID_CNS_NS:
648                 return nvmet_execute_identify_ns(req);
649         case NVME_ID_CNS_CTRL:
650                 return nvmet_execute_identify_ctrl(req);
651         case NVME_ID_CNS_NS_ACTIVE_LIST:
652                 return nvmet_execute_identify_nslist(req);
653         case NVME_ID_CNS_NS_DESC_LIST:
654                 return nvmet_execute_identify_desclist(req);
655         }
656
657         pr_err("unhandled identify cns %d on qid %d\n",
658                req->cmd->identify.cns, req->sq->qid);
659         req->error_loc = offsetof(struct nvme_identify, cns);
660         nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_SC_DNR);
661 }
662
663 /*
664  * A "minimum viable" abort implementation: the command is mandatory in the
665  * spec, but we are not required to do any useful work.  We couldn't really
666  * do a useful abort, so don't bother even with waiting for the command
667  * to be exectuted and return immediately telling the command to abort
668  * wasn't found.
669  */
670 static void nvmet_execute_abort(struct nvmet_req *req)
671 {
672         if (!nvmet_check_transfer_len(req, 0))
673                 return;
674         nvmet_set_result(req, 1);
675         nvmet_req_complete(req, 0);
676 }
677
678 static u16 nvmet_write_protect_flush_sync(struct nvmet_req *req)
679 {
680         u16 status;
681
682         if (req->ns->file)
683                 status = nvmet_file_flush(req);
684         else
685                 status = nvmet_bdev_flush(req);
686
687         if (status)
688                 pr_err("write protect flush failed nsid: %u\n", req->ns->nsid);
689         return status;
690 }
691
692 static u16 nvmet_set_feat_write_protect(struct nvmet_req *req)
693 {
694         u32 write_protect = le32_to_cpu(req->cmd->common.cdw11);
695         struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
696         u16 status = NVME_SC_FEATURE_NOT_CHANGEABLE;
697
698         req->ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->rw.nsid);
699         if (unlikely(!req->ns)) {
700                 req->error_loc = offsetof(struct nvme_common_command, nsid);
701                 return status;
702         }
703
704         mutex_lock(&subsys->lock);
705         switch (write_protect) {
706         case NVME_NS_WRITE_PROTECT:
707                 req->ns->readonly = true;
708                 status = nvmet_write_protect_flush_sync(req);
709                 if (status)
710                         req->ns->readonly = false;
711                 break;
712         case NVME_NS_NO_WRITE_PROTECT:
713                 req->ns->readonly = false;
714                 status = 0;
715                 break;
716         default:
717                 break;
718         }
719
720         if (!status)
721                 nvmet_ns_changed(subsys, req->ns->nsid);
722         mutex_unlock(&subsys->lock);
723         return status;
724 }
725
726 u16 nvmet_set_feat_kato(struct nvmet_req *req)
727 {
728         u32 val32 = le32_to_cpu(req->cmd->common.cdw11);
729
730         nvmet_stop_keep_alive_timer(req->sq->ctrl);
731         req->sq->ctrl->kato = DIV_ROUND_UP(val32, 1000);
732         nvmet_start_keep_alive_timer(req->sq->ctrl);
733
734         nvmet_set_result(req, req->sq->ctrl->kato);
735
736         return 0;
737 }
738
739 u16 nvmet_set_feat_async_event(struct nvmet_req *req, u32 mask)
740 {
741         u32 val32 = le32_to_cpu(req->cmd->common.cdw11);
742
743         if (val32 & ~mask) {
744                 req->error_loc = offsetof(struct nvme_common_command, cdw11);
745                 return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
746         }
747
748         WRITE_ONCE(req->sq->ctrl->aen_enabled, val32);
749         nvmet_set_result(req, val32);
750
751         return 0;
752 }
753
754 void nvmet_execute_set_features(struct nvmet_req *req)
755 {
756         struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
757         u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
758         u32 cdw11 = le32_to_cpu(req->cmd->common.cdw11);
759         u16 status = 0;
760         u16 nsqr;
761         u16 ncqr;
762
763         if (!nvmet_check_transfer_len(req, 0))
764                 return;
765
766         switch (cdw10 & 0xff) {
767         case NVME_FEAT_NUM_QUEUES:
768                 ncqr = (cdw11 >> 16) & 0xffff;
769                 nsqr = cdw11 & 0xffff;
770                 if (ncqr == 0xffff || nsqr == 0xffff) {
771                         status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
772                         break;
773                 }
774                 nvmet_set_result(req,
775                         (subsys->max_qid - 1) | ((subsys->max_qid - 1) << 16));
776                 break;
777         case NVME_FEAT_KATO:
778                 status = nvmet_set_feat_kato(req);
779                 break;
780         case NVME_FEAT_ASYNC_EVENT:
781                 status = nvmet_set_feat_async_event(req, NVMET_AEN_CFG_ALL);
782                 break;
783         case NVME_FEAT_HOST_ID:
784                 status = NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
785                 break;
786         case NVME_FEAT_WRITE_PROTECT:
787                 status = nvmet_set_feat_write_protect(req);
788                 break;
789         default:
790                 req->error_loc = offsetof(struct nvme_common_command, cdw10);
791                 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
792                 break;
793         }
794
795         nvmet_req_complete(req, status);
796 }
797
798 static u16 nvmet_get_feat_write_protect(struct nvmet_req *req)
799 {
800         struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
801         u32 result;
802
803         req->ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->common.nsid);
804         if (!req->ns)  {
805                 req->error_loc = offsetof(struct nvme_common_command, nsid);
806                 return NVME_SC_INVALID_NS | NVME_SC_DNR;
807         }
808         mutex_lock(&subsys->lock);
809         if (req->ns->readonly == true)
810                 result = NVME_NS_WRITE_PROTECT;
811         else
812                 result = NVME_NS_NO_WRITE_PROTECT;
813         nvmet_set_result(req, result);
814         mutex_unlock(&subsys->lock);
815
816         return 0;
817 }
818
819 void nvmet_get_feat_kato(struct nvmet_req *req)
820 {
821         nvmet_set_result(req, req->sq->ctrl->kato * 1000);
822 }
823
824 void nvmet_get_feat_async_event(struct nvmet_req *req)
825 {
826         nvmet_set_result(req, READ_ONCE(req->sq->ctrl->aen_enabled));
827 }
828
829 void nvmet_execute_get_features(struct nvmet_req *req)
830 {
831         struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
832         u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
833         u16 status = 0;
834
835         if (!nvmet_check_transfer_len(req, nvmet_feat_data_len(req, cdw10)))
836                 return;
837
838         switch (cdw10 & 0xff) {
839         /*
840          * These features are mandatory in the spec, but we don't
841          * have a useful way to implement them.  We'll eventually
842          * need to come up with some fake values for these.
843          */
844 #if 0
845         case NVME_FEAT_ARBITRATION:
846                 break;
847         case NVME_FEAT_POWER_MGMT:
848                 break;
849         case NVME_FEAT_TEMP_THRESH:
850                 break;
851         case NVME_FEAT_ERR_RECOVERY:
852                 break;
853         case NVME_FEAT_IRQ_COALESCE:
854                 break;
855         case NVME_FEAT_IRQ_CONFIG:
856                 break;
857         case NVME_FEAT_WRITE_ATOMIC:
858                 break;
859 #endif
860         case NVME_FEAT_ASYNC_EVENT:
861                 nvmet_get_feat_async_event(req);
862                 break;
863         case NVME_FEAT_VOLATILE_WC:
864                 nvmet_set_result(req, 1);
865                 break;
866         case NVME_FEAT_NUM_QUEUES:
867                 nvmet_set_result(req,
868                         (subsys->max_qid-1) | ((subsys->max_qid-1) << 16));
869                 break;
870         case NVME_FEAT_KATO:
871                 nvmet_get_feat_kato(req);
872                 break;
873         case NVME_FEAT_HOST_ID:
874                 /* need 128-bit host identifier flag */
875                 if (!(req->cmd->common.cdw11 & cpu_to_le32(1 << 0))) {
876                         req->error_loc =
877                                 offsetof(struct nvme_common_command, cdw11);
878                         status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
879                         break;
880                 }
881
882                 status = nvmet_copy_to_sgl(req, 0, &req->sq->ctrl->hostid,
883                                 sizeof(req->sq->ctrl->hostid));
884                 break;
885         case NVME_FEAT_WRITE_PROTECT:
886                 status = nvmet_get_feat_write_protect(req);
887                 break;
888         default:
889                 req->error_loc =
890                         offsetof(struct nvme_common_command, cdw10);
891                 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
892                 break;
893         }
894
895         nvmet_req_complete(req, status);
896 }
897
898 void nvmet_execute_async_event(struct nvmet_req *req)
899 {
900         struct nvmet_ctrl *ctrl = req->sq->ctrl;
901
902         if (!nvmet_check_transfer_len(req, 0))
903                 return;
904
905         mutex_lock(&ctrl->lock);
906         if (ctrl->nr_async_event_cmds >= NVMET_ASYNC_EVENTS) {
907                 mutex_unlock(&ctrl->lock);
908                 nvmet_req_complete(req, NVME_SC_ASYNC_LIMIT | NVME_SC_DNR);
909                 return;
910         }
911         ctrl->async_event_cmds[ctrl->nr_async_event_cmds++] = req;
912         mutex_unlock(&ctrl->lock);
913
914         schedule_work(&ctrl->async_event_work);
915 }
916
917 void nvmet_execute_keep_alive(struct nvmet_req *req)
918 {
919         struct nvmet_ctrl *ctrl = req->sq->ctrl;
920
921         if (!nvmet_check_transfer_len(req, 0))
922                 return;
923
924         pr_debug("ctrl %d update keep-alive timer for %d secs\n",
925                 ctrl->cntlid, ctrl->kato);
926
927         mod_delayed_work(system_wq, &ctrl->ka_work, ctrl->kato * HZ);
928         nvmet_req_complete(req, 0);
929 }
930
931 u16 nvmet_parse_admin_cmd(struct nvmet_req *req)
932 {
933         struct nvme_command *cmd = req->cmd;
934         u16 ret;
935
936         if (nvme_is_fabrics(cmd))
937                 return nvmet_parse_fabrics_cmd(req);
938         if (req->sq->ctrl->subsys->type == NVME_NQN_DISC)
939                 return nvmet_parse_discovery_cmd(req);
940
941         ret = nvmet_check_ctrl_status(req, cmd);
942         if (unlikely(ret))
943                 return ret;
944
945         if (nvmet_req_passthru_ctrl(req))
946                 return nvmet_parse_passthru_admin_cmd(req);
947
948         switch (cmd->common.opcode) {
949         case nvme_admin_get_log_page:
950                 req->execute = nvmet_execute_get_log_page;
951                 return 0;
952         case nvme_admin_identify:
953                 req->execute = nvmet_execute_identify;
954                 return 0;
955         case nvme_admin_abort_cmd:
956                 req->execute = nvmet_execute_abort;
957                 return 0;
958         case nvme_admin_set_features:
959                 req->execute = nvmet_execute_set_features;
960                 return 0;
961         case nvme_admin_get_features:
962                 req->execute = nvmet_execute_get_features;
963                 return 0;
964         case nvme_admin_async_event:
965                 req->execute = nvmet_execute_async_event;
966                 return 0;
967         case nvme_admin_keep_alive:
968                 req->execute = nvmet_execute_keep_alive;
969                 return 0;
970         }
971
972         pr_err("unhandled cmd %d on qid %d\n", cmd->common.opcode,
973                req->sq->qid);
974         req->error_loc = offsetof(struct nvme_common_command, opcode);
975         return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
976 }