Merge branch 'i2c/for-5.2' of git://git.kernel.org/pub/scm/linux/kernel/git/wsa/linux
[linux-2.6-microblaze.git] / drivers / nvme / target / admin-cmd.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * NVMe admin command implementation.
4  * Copyright (c) 2015-2016 HGST, a Western Digital Company.
5  */
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 #include <linux/module.h>
8 #include <linux/rculist.h>
9
10 #include <generated/utsrelease.h>
11 #include <asm/unaligned.h>
12 #include "nvmet.h"
13
14 u32 nvmet_get_log_page_len(struct nvme_command *cmd)
15 {
16         u32 len = le16_to_cpu(cmd->get_log_page.numdu);
17
18         len <<= 16;
19         len += le16_to_cpu(cmd->get_log_page.numdl);
20         /* NUMD is a 0's based value */
21         len += 1;
22         len *= sizeof(u32);
23
24         return len;
25 }
26
27 u64 nvmet_get_log_page_offset(struct nvme_command *cmd)
28 {
29         return le64_to_cpu(cmd->get_log_page.lpo);
30 }
31
32 static void nvmet_execute_get_log_page_noop(struct nvmet_req *req)
33 {
34         nvmet_req_complete(req, nvmet_zero_sgl(req, 0, req->data_len));
35 }
36
37 static void nvmet_execute_get_log_page_error(struct nvmet_req *req)
38 {
39         struct nvmet_ctrl *ctrl = req->sq->ctrl;
40         u16 status = NVME_SC_SUCCESS;
41         unsigned long flags;
42         off_t offset = 0;
43         u64 slot;
44         u64 i;
45
46         spin_lock_irqsave(&ctrl->error_lock, flags);
47         slot = ctrl->err_counter % NVMET_ERROR_LOG_SLOTS;
48
49         for (i = 0; i < NVMET_ERROR_LOG_SLOTS; i++) {
50                 status = nvmet_copy_to_sgl(req, offset, &ctrl->slots[slot],
51                                 sizeof(struct nvme_error_slot));
52                 if (status)
53                         break;
54
55                 if (slot == 0)
56                         slot = NVMET_ERROR_LOG_SLOTS - 1;
57                 else
58                         slot--;
59                 offset += sizeof(struct nvme_error_slot);
60         }
61         spin_unlock_irqrestore(&ctrl->error_lock, flags);
62         nvmet_req_complete(req, status);
63 }
64
65 static u16 nvmet_get_smart_log_nsid(struct nvmet_req *req,
66                 struct nvme_smart_log *slog)
67 {
68         struct nvmet_ns *ns;
69         u64 host_reads, host_writes, data_units_read, data_units_written;
70
71         ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->get_log_page.nsid);
72         if (!ns) {
73                 pr_err("Could not find namespace id : %d\n",
74                                 le32_to_cpu(req->cmd->get_log_page.nsid));
75                 req->error_loc = offsetof(struct nvme_rw_command, nsid);
76                 return NVME_SC_INVALID_NS;
77         }
78
79         /* we don't have the right data for file backed ns */
80         if (!ns->bdev)
81                 goto out;
82
83         host_reads = part_stat_read(ns->bdev->bd_part, ios[READ]);
84         data_units_read = part_stat_read(ns->bdev->bd_part, sectors[READ]);
85         host_writes = part_stat_read(ns->bdev->bd_part, ios[WRITE]);
86         data_units_written = part_stat_read(ns->bdev->bd_part, sectors[WRITE]);
87
88         put_unaligned_le64(host_reads, &slog->host_reads[0]);
89         put_unaligned_le64(data_units_read, &slog->data_units_read[0]);
90         put_unaligned_le64(host_writes, &slog->host_writes[0]);
91         put_unaligned_le64(data_units_written, &slog->data_units_written[0]);
92 out:
93         nvmet_put_namespace(ns);
94
95         return NVME_SC_SUCCESS;
96 }
97
98 static u16 nvmet_get_smart_log_all(struct nvmet_req *req,
99                 struct nvme_smart_log *slog)
100 {
101         u64 host_reads = 0, host_writes = 0;
102         u64 data_units_read = 0, data_units_written = 0;
103         struct nvmet_ns *ns;
104         struct nvmet_ctrl *ctrl;
105
106         ctrl = req->sq->ctrl;
107
108         rcu_read_lock();
109         list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link) {
110                 /* we don't have the right data for file backed ns */
111                 if (!ns->bdev)
112                         continue;
113                 host_reads += part_stat_read(ns->bdev->bd_part, ios[READ]);
114                 data_units_read +=
115                         part_stat_read(ns->bdev->bd_part, sectors[READ]);
116                 host_writes += part_stat_read(ns->bdev->bd_part, ios[WRITE]);
117                 data_units_written +=
118                         part_stat_read(ns->bdev->bd_part, sectors[WRITE]);
119
120         }
121         rcu_read_unlock();
122
123         put_unaligned_le64(host_reads, &slog->host_reads[0]);
124         put_unaligned_le64(data_units_read, &slog->data_units_read[0]);
125         put_unaligned_le64(host_writes, &slog->host_writes[0]);
126         put_unaligned_le64(data_units_written, &slog->data_units_written[0]);
127
128         return NVME_SC_SUCCESS;
129 }
130
131 static void nvmet_execute_get_log_page_smart(struct nvmet_req *req)
132 {
133         struct nvme_smart_log *log;
134         u16 status = NVME_SC_INTERNAL;
135         unsigned long flags;
136
137         if (req->data_len != sizeof(*log))
138                 goto out;
139
140         log = kzalloc(sizeof(*log), GFP_KERNEL);
141         if (!log)
142                 goto out;
143
144         if (req->cmd->get_log_page.nsid == cpu_to_le32(NVME_NSID_ALL))
145                 status = nvmet_get_smart_log_all(req, log);
146         else
147                 status = nvmet_get_smart_log_nsid(req, log);
148         if (status)
149                 goto out_free_log;
150
151         spin_lock_irqsave(&req->sq->ctrl->error_lock, flags);
152         put_unaligned_le64(req->sq->ctrl->err_counter,
153                         &log->num_err_log_entries);
154         spin_unlock_irqrestore(&req->sq->ctrl->error_lock, flags);
155
156         status = nvmet_copy_to_sgl(req, 0, log, sizeof(*log));
157 out_free_log:
158         kfree(log);
159 out:
160         nvmet_req_complete(req, status);
161 }
162
163 static void nvmet_execute_get_log_cmd_effects_ns(struct nvmet_req *req)
164 {
165         u16 status = NVME_SC_INTERNAL;
166         struct nvme_effects_log *log;
167
168         log = kzalloc(sizeof(*log), GFP_KERNEL);
169         if (!log)
170                 goto out;
171
172         log->acs[nvme_admin_get_log_page]       = cpu_to_le32(1 << 0);
173         log->acs[nvme_admin_identify]           = cpu_to_le32(1 << 0);
174         log->acs[nvme_admin_abort_cmd]          = cpu_to_le32(1 << 0);
175         log->acs[nvme_admin_set_features]       = cpu_to_le32(1 << 0);
176         log->acs[nvme_admin_get_features]       = cpu_to_le32(1 << 0);
177         log->acs[nvme_admin_async_event]        = cpu_to_le32(1 << 0);
178         log->acs[nvme_admin_keep_alive]         = cpu_to_le32(1 << 0);
179
180         log->iocs[nvme_cmd_read]                = cpu_to_le32(1 << 0);
181         log->iocs[nvme_cmd_write]               = cpu_to_le32(1 << 0);
182         log->iocs[nvme_cmd_flush]               = cpu_to_le32(1 << 0);
183         log->iocs[nvme_cmd_dsm]                 = cpu_to_le32(1 << 0);
184         log->iocs[nvme_cmd_write_zeroes]        = cpu_to_le32(1 << 0);
185
186         status = nvmet_copy_to_sgl(req, 0, log, sizeof(*log));
187
188         kfree(log);
189 out:
190         nvmet_req_complete(req, status);
191 }
192
193 static void nvmet_execute_get_log_changed_ns(struct nvmet_req *req)
194 {
195         struct nvmet_ctrl *ctrl = req->sq->ctrl;
196         u16 status = NVME_SC_INTERNAL;
197         size_t len;
198
199         if (req->data_len != NVME_MAX_CHANGED_NAMESPACES * sizeof(__le32))
200                 goto out;
201
202         mutex_lock(&ctrl->lock);
203         if (ctrl->nr_changed_ns == U32_MAX)
204                 len = sizeof(__le32);
205         else
206                 len = ctrl->nr_changed_ns * sizeof(__le32);
207         status = nvmet_copy_to_sgl(req, 0, ctrl->changed_ns_list, len);
208         if (!status)
209                 status = nvmet_zero_sgl(req, len, req->data_len - len);
210         ctrl->nr_changed_ns = 0;
211         nvmet_clear_aen_bit(req, NVME_AEN_BIT_NS_ATTR);
212         mutex_unlock(&ctrl->lock);
213 out:
214         nvmet_req_complete(req, status);
215 }
216
217 static u32 nvmet_format_ana_group(struct nvmet_req *req, u32 grpid,
218                 struct nvme_ana_group_desc *desc)
219 {
220         struct nvmet_ctrl *ctrl = req->sq->ctrl;
221         struct nvmet_ns *ns;
222         u32 count = 0;
223
224         if (!(req->cmd->get_log_page.lsp & NVME_ANA_LOG_RGO)) {
225                 rcu_read_lock();
226                 list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link)
227                         if (ns->anagrpid == grpid)
228                                 desc->nsids[count++] = cpu_to_le32(ns->nsid);
229                 rcu_read_unlock();
230         }
231
232         desc->grpid = cpu_to_le32(grpid);
233         desc->nnsids = cpu_to_le32(count);
234         desc->chgcnt = cpu_to_le64(nvmet_ana_chgcnt);
235         desc->state = req->port->ana_state[grpid];
236         memset(desc->rsvd17, 0, sizeof(desc->rsvd17));
237         return sizeof(struct nvme_ana_group_desc) + count * sizeof(__le32);
238 }
239
240 static void nvmet_execute_get_log_page_ana(struct nvmet_req *req)
241 {
242         struct nvme_ana_rsp_hdr hdr = { 0, };
243         struct nvme_ana_group_desc *desc;
244         size_t offset = sizeof(struct nvme_ana_rsp_hdr); /* start beyond hdr */
245         size_t len;
246         u32 grpid;
247         u16 ngrps = 0;
248         u16 status;
249
250         status = NVME_SC_INTERNAL;
251         desc = kmalloc(sizeof(struct nvme_ana_group_desc) +
252                         NVMET_MAX_NAMESPACES * sizeof(__le32), GFP_KERNEL);
253         if (!desc)
254                 goto out;
255
256         down_read(&nvmet_ana_sem);
257         for (grpid = 1; grpid <= NVMET_MAX_ANAGRPS; grpid++) {
258                 if (!nvmet_ana_group_enabled[grpid])
259                         continue;
260                 len = nvmet_format_ana_group(req, grpid, desc);
261                 status = nvmet_copy_to_sgl(req, offset, desc, len);
262                 if (status)
263                         break;
264                 offset += len;
265                 ngrps++;
266         }
267         for ( ; grpid <= NVMET_MAX_ANAGRPS; grpid++) {
268                 if (nvmet_ana_group_enabled[grpid])
269                         ngrps++;
270         }
271
272         hdr.chgcnt = cpu_to_le64(nvmet_ana_chgcnt);
273         hdr.ngrps = cpu_to_le16(ngrps);
274         nvmet_clear_aen_bit(req, NVME_AEN_BIT_ANA_CHANGE);
275         up_read(&nvmet_ana_sem);
276
277         kfree(desc);
278
279         /* copy the header last once we know the number of groups */
280         status = nvmet_copy_to_sgl(req, 0, &hdr, sizeof(hdr));
281 out:
282         nvmet_req_complete(req, status);
283 }
284
285 static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
286 {
287         struct nvmet_ctrl *ctrl = req->sq->ctrl;
288         struct nvme_id_ctrl *id;
289         u16 status = 0;
290         const char model[] = "Linux";
291
292         id = kzalloc(sizeof(*id), GFP_KERNEL);
293         if (!id) {
294                 status = NVME_SC_INTERNAL;
295                 goto out;
296         }
297
298         /* XXX: figure out how to assign real vendors IDs. */
299         id->vid = 0;
300         id->ssvid = 0;
301
302         memset(id->sn, ' ', sizeof(id->sn));
303         bin2hex(id->sn, &ctrl->subsys->serial,
304                 min(sizeof(ctrl->subsys->serial), sizeof(id->sn) / 2));
305         memcpy_and_pad(id->mn, sizeof(id->mn), model, sizeof(model) - 1, ' ');
306         memcpy_and_pad(id->fr, sizeof(id->fr),
307                        UTS_RELEASE, strlen(UTS_RELEASE), ' ');
308
309         id->rab = 6;
310
311         /*
312          * XXX: figure out how we can assign a IEEE OUI, but until then
313          * the safest is to leave it as zeroes.
314          */
315
316         /* we support multiple ports, multiples hosts and ANA: */
317         id->cmic = (1 << 0) | (1 << 1) | (1 << 3);
318
319         /* no limit on data transfer sizes for now */
320         id->mdts = 0;
321         id->cntlid = cpu_to_le16(ctrl->cntlid);
322         id->ver = cpu_to_le32(ctrl->subsys->ver);
323
324         /* XXX: figure out what to do about RTD3R/RTD3 */
325         id->oaes = cpu_to_le32(NVMET_AEN_CFG_OPTIONAL);
326         id->ctratt = cpu_to_le32(NVME_CTRL_ATTR_HID_128_BIT |
327                 NVME_CTRL_ATTR_TBKAS);
328
329         id->oacs = 0;
330
331         /*
332          * We don't really have a practical limit on the number of abort
333          * comands.  But we don't do anything useful for abort either, so
334          * no point in allowing more abort commands than the spec requires.
335          */
336         id->acl = 3;
337
338         id->aerl = NVMET_ASYNC_EVENTS - 1;
339
340         /* first slot is read-only, only one slot supported */
341         id->frmw = (1 << 0) | (1 << 1);
342         id->lpa = (1 << 0) | (1 << 1) | (1 << 2);
343         id->elpe = NVMET_ERROR_LOG_SLOTS - 1;
344         id->npss = 0;
345
346         /* We support keep-alive timeout in granularity of seconds */
347         id->kas = cpu_to_le16(NVMET_KAS);
348
349         id->sqes = (0x6 << 4) | 0x6;
350         id->cqes = (0x4 << 4) | 0x4;
351
352         /* no enforcement soft-limit for maxcmd - pick arbitrary high value */
353         id->maxcmd = cpu_to_le16(NVMET_MAX_CMD);
354
355         id->nn = cpu_to_le32(ctrl->subsys->max_nsid);
356         id->mnan = cpu_to_le32(NVMET_MAX_NAMESPACES);
357         id->oncs = cpu_to_le16(NVME_CTRL_ONCS_DSM |
358                         NVME_CTRL_ONCS_WRITE_ZEROES);
359
360         /* XXX: don't report vwc if the underlying device is write through */
361         id->vwc = NVME_CTRL_VWC_PRESENT;
362
363         /*
364          * We can't support atomic writes bigger than a LBA without support
365          * from the backend device.
366          */
367         id->awun = 0;
368         id->awupf = 0;
369
370         id->sgls = cpu_to_le32(1 << 0); /* we always support SGLs */
371         if (ctrl->ops->has_keyed_sgls)
372                 id->sgls |= cpu_to_le32(1 << 2);
373         if (req->port->inline_data_size)
374                 id->sgls |= cpu_to_le32(1 << 20);
375
376         strlcpy(id->subnqn, ctrl->subsys->subsysnqn, sizeof(id->subnqn));
377
378         /* Max command capsule size is sqe + single page of in-capsule data */
379         id->ioccsz = cpu_to_le32((sizeof(struct nvme_command) +
380                                   req->port->inline_data_size) / 16);
381         /* Max response capsule size is cqe */
382         id->iorcsz = cpu_to_le32(sizeof(struct nvme_completion) / 16);
383
384         id->msdbd = ctrl->ops->msdbd;
385
386         id->anacap = (1 << 0) | (1 << 1) | (1 << 2) | (1 << 3) | (1 << 4);
387         id->anatt = 10; /* random value */
388         id->anagrpmax = cpu_to_le32(NVMET_MAX_ANAGRPS);
389         id->nanagrpid = cpu_to_le32(NVMET_MAX_ANAGRPS);
390
391         /*
392          * Meh, we don't really support any power state.  Fake up the same
393          * values that qemu does.
394          */
395         id->psd[0].max_power = cpu_to_le16(0x9c4);
396         id->psd[0].entry_lat = cpu_to_le32(0x10);
397         id->psd[0].exit_lat = cpu_to_le32(0x4);
398
399         id->nwpc = 1 << 0; /* write protect and no write protect */
400
401         status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
402
403         kfree(id);
404 out:
405         nvmet_req_complete(req, status);
406 }
407
408 static void nvmet_execute_identify_ns(struct nvmet_req *req)
409 {
410         struct nvmet_ns *ns;
411         struct nvme_id_ns *id;
412         u16 status = 0;
413
414         if (le32_to_cpu(req->cmd->identify.nsid) == NVME_NSID_ALL) {
415                 req->error_loc = offsetof(struct nvme_identify, nsid);
416                 status = NVME_SC_INVALID_NS | NVME_SC_DNR;
417                 goto out;
418         }
419
420         id = kzalloc(sizeof(*id), GFP_KERNEL);
421         if (!id) {
422                 status = NVME_SC_INTERNAL;
423                 goto out;
424         }
425
426         /* return an all zeroed buffer if we can't find an active namespace */
427         ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->identify.nsid);
428         if (!ns)
429                 goto done;
430
431         /*
432          * nuse = ncap = nsze isn't always true, but we have no way to find
433          * that out from the underlying device.
434          */
435         id->ncap = id->nsze = cpu_to_le64(ns->size >> ns->blksize_shift);
436         switch (req->port->ana_state[ns->anagrpid]) {
437         case NVME_ANA_INACCESSIBLE:
438         case NVME_ANA_PERSISTENT_LOSS:
439                 break;
440         default:
441                 id->nuse = id->nsze;
442                 break;
443         }
444
445         /*
446          * We just provide a single LBA format that matches what the
447          * underlying device reports.
448          */
449         id->nlbaf = 0;
450         id->flbas = 0;
451
452         /*
453          * Our namespace might always be shared.  Not just with other
454          * controllers, but also with any other user of the block device.
455          */
456         id->nmic = (1 << 0);
457         id->anagrpid = cpu_to_le32(ns->anagrpid);
458
459         memcpy(&id->nguid, &ns->nguid, sizeof(id->nguid));
460
461         id->lbaf[0].ds = ns->blksize_shift;
462
463         if (ns->readonly)
464                 id->nsattr |= (1 << 0);
465         nvmet_put_namespace(ns);
466 done:
467         status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
468         kfree(id);
469 out:
470         nvmet_req_complete(req, status);
471 }
472
473 static void nvmet_execute_identify_nslist(struct nvmet_req *req)
474 {
475         static const int buf_size = NVME_IDENTIFY_DATA_SIZE;
476         struct nvmet_ctrl *ctrl = req->sq->ctrl;
477         struct nvmet_ns *ns;
478         u32 min_nsid = le32_to_cpu(req->cmd->identify.nsid);
479         __le32 *list;
480         u16 status = 0;
481         int i = 0;
482
483         list = kzalloc(buf_size, GFP_KERNEL);
484         if (!list) {
485                 status = NVME_SC_INTERNAL;
486                 goto out;
487         }
488
489         rcu_read_lock();
490         list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link) {
491                 if (ns->nsid <= min_nsid)
492                         continue;
493                 list[i++] = cpu_to_le32(ns->nsid);
494                 if (i == buf_size / sizeof(__le32))
495                         break;
496         }
497         rcu_read_unlock();
498
499         status = nvmet_copy_to_sgl(req, 0, list, buf_size);
500
501         kfree(list);
502 out:
503         nvmet_req_complete(req, status);
504 }
505
506 static u16 nvmet_copy_ns_identifier(struct nvmet_req *req, u8 type, u8 len,
507                                     void *id, off_t *off)
508 {
509         struct nvme_ns_id_desc desc = {
510                 .nidt = type,
511                 .nidl = len,
512         };
513         u16 status;
514
515         status = nvmet_copy_to_sgl(req, *off, &desc, sizeof(desc));
516         if (status)
517                 return status;
518         *off += sizeof(desc);
519
520         status = nvmet_copy_to_sgl(req, *off, id, len);
521         if (status)
522                 return status;
523         *off += len;
524
525         return 0;
526 }
527
528 static void nvmet_execute_identify_desclist(struct nvmet_req *req)
529 {
530         struct nvmet_ns *ns;
531         u16 status = 0;
532         off_t off = 0;
533
534         ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->identify.nsid);
535         if (!ns) {
536                 req->error_loc = offsetof(struct nvme_identify, nsid);
537                 status = NVME_SC_INVALID_NS | NVME_SC_DNR;
538                 goto out;
539         }
540
541         if (memchr_inv(&ns->uuid, 0, sizeof(ns->uuid))) {
542                 status = nvmet_copy_ns_identifier(req, NVME_NIDT_UUID,
543                                                   NVME_NIDT_UUID_LEN,
544                                                   &ns->uuid, &off);
545                 if (status)
546                         goto out_put_ns;
547         }
548         if (memchr_inv(ns->nguid, 0, sizeof(ns->nguid))) {
549                 status = nvmet_copy_ns_identifier(req, NVME_NIDT_NGUID,
550                                                   NVME_NIDT_NGUID_LEN,
551                                                   &ns->nguid, &off);
552                 if (status)
553                         goto out_put_ns;
554         }
555
556         if (sg_zero_buffer(req->sg, req->sg_cnt, NVME_IDENTIFY_DATA_SIZE - off,
557                         off) != NVME_IDENTIFY_DATA_SIZE - off)
558                 status = NVME_SC_INTERNAL | NVME_SC_DNR;
559 out_put_ns:
560         nvmet_put_namespace(ns);
561 out:
562         nvmet_req_complete(req, status);
563 }
564
565 /*
566  * A "minimum viable" abort implementation: the command is mandatory in the
567  * spec, but we are not required to do any useful work.  We couldn't really
568  * do a useful abort, so don't bother even with waiting for the command
569  * to be exectuted and return immediately telling the command to abort
570  * wasn't found.
571  */
572 static void nvmet_execute_abort(struct nvmet_req *req)
573 {
574         nvmet_set_result(req, 1);
575         nvmet_req_complete(req, 0);
576 }
577
578 static u16 nvmet_write_protect_flush_sync(struct nvmet_req *req)
579 {
580         u16 status;
581
582         if (req->ns->file)
583                 status = nvmet_file_flush(req);
584         else
585                 status = nvmet_bdev_flush(req);
586
587         if (status)
588                 pr_err("write protect flush failed nsid: %u\n", req->ns->nsid);
589         return status;
590 }
591
592 static u16 nvmet_set_feat_write_protect(struct nvmet_req *req)
593 {
594         u32 write_protect = le32_to_cpu(req->cmd->common.cdw11);
595         struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
596         u16 status = NVME_SC_FEATURE_NOT_CHANGEABLE;
597
598         req->ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->rw.nsid);
599         if (unlikely(!req->ns)) {
600                 req->error_loc = offsetof(struct nvme_common_command, nsid);
601                 return status;
602         }
603
604         mutex_lock(&subsys->lock);
605         switch (write_protect) {
606         case NVME_NS_WRITE_PROTECT:
607                 req->ns->readonly = true;
608                 status = nvmet_write_protect_flush_sync(req);
609                 if (status)
610                         req->ns->readonly = false;
611                 break;
612         case NVME_NS_NO_WRITE_PROTECT:
613                 req->ns->readonly = false;
614                 status = 0;
615                 break;
616         default:
617                 break;
618         }
619
620         if (!status)
621                 nvmet_ns_changed(subsys, req->ns->nsid);
622         mutex_unlock(&subsys->lock);
623         return status;
624 }
625
626 u16 nvmet_set_feat_kato(struct nvmet_req *req)
627 {
628         u32 val32 = le32_to_cpu(req->cmd->common.cdw11);
629
630         req->sq->ctrl->kato = DIV_ROUND_UP(val32, 1000);
631
632         nvmet_set_result(req, req->sq->ctrl->kato);
633
634         return 0;
635 }
636
637 u16 nvmet_set_feat_async_event(struct nvmet_req *req, u32 mask)
638 {
639         u32 val32 = le32_to_cpu(req->cmd->common.cdw11);
640
641         if (val32 & ~mask) {
642                 req->error_loc = offsetof(struct nvme_common_command, cdw11);
643                 return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
644         }
645
646         WRITE_ONCE(req->sq->ctrl->aen_enabled, val32);
647         nvmet_set_result(req, val32);
648
649         return 0;
650 }
651
652 static void nvmet_execute_set_features(struct nvmet_req *req)
653 {
654         struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
655         u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
656         u16 status = 0;
657
658         switch (cdw10 & 0xff) {
659         case NVME_FEAT_NUM_QUEUES:
660                 nvmet_set_result(req,
661                         (subsys->max_qid - 1) | ((subsys->max_qid - 1) << 16));
662                 break;
663         case NVME_FEAT_KATO:
664                 status = nvmet_set_feat_kato(req);
665                 break;
666         case NVME_FEAT_ASYNC_EVENT:
667                 status = nvmet_set_feat_async_event(req, NVMET_AEN_CFG_ALL);
668                 break;
669         case NVME_FEAT_HOST_ID:
670                 status = NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
671                 break;
672         case NVME_FEAT_WRITE_PROTECT:
673                 status = nvmet_set_feat_write_protect(req);
674                 break;
675         default:
676                 req->error_loc = offsetof(struct nvme_common_command, cdw10);
677                 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
678                 break;
679         }
680
681         nvmet_req_complete(req, status);
682 }
683
684 static u16 nvmet_get_feat_write_protect(struct nvmet_req *req)
685 {
686         struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
687         u32 result;
688
689         req->ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->common.nsid);
690         if (!req->ns)  {
691                 req->error_loc = offsetof(struct nvme_common_command, nsid);
692                 return NVME_SC_INVALID_NS | NVME_SC_DNR;
693         }
694         mutex_lock(&subsys->lock);
695         if (req->ns->readonly == true)
696                 result = NVME_NS_WRITE_PROTECT;
697         else
698                 result = NVME_NS_NO_WRITE_PROTECT;
699         nvmet_set_result(req, result);
700         mutex_unlock(&subsys->lock);
701
702         return 0;
703 }
704
705 void nvmet_get_feat_kato(struct nvmet_req *req)
706 {
707         nvmet_set_result(req, req->sq->ctrl->kato * 1000);
708 }
709
710 void nvmet_get_feat_async_event(struct nvmet_req *req)
711 {
712         nvmet_set_result(req, READ_ONCE(req->sq->ctrl->aen_enabled));
713 }
714
715 static void nvmet_execute_get_features(struct nvmet_req *req)
716 {
717         struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
718         u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
719         u16 status = 0;
720
721         switch (cdw10 & 0xff) {
722         /*
723          * These features are mandatory in the spec, but we don't
724          * have a useful way to implement them.  We'll eventually
725          * need to come up with some fake values for these.
726          */
727 #if 0
728         case NVME_FEAT_ARBITRATION:
729                 break;
730         case NVME_FEAT_POWER_MGMT:
731                 break;
732         case NVME_FEAT_TEMP_THRESH:
733                 break;
734         case NVME_FEAT_ERR_RECOVERY:
735                 break;
736         case NVME_FEAT_IRQ_COALESCE:
737                 break;
738         case NVME_FEAT_IRQ_CONFIG:
739                 break;
740         case NVME_FEAT_WRITE_ATOMIC:
741                 break;
742 #endif
743         case NVME_FEAT_ASYNC_EVENT:
744                 nvmet_get_feat_async_event(req);
745                 break;
746         case NVME_FEAT_VOLATILE_WC:
747                 nvmet_set_result(req, 1);
748                 break;
749         case NVME_FEAT_NUM_QUEUES:
750                 nvmet_set_result(req,
751                         (subsys->max_qid-1) | ((subsys->max_qid-1) << 16));
752                 break;
753         case NVME_FEAT_KATO:
754                 nvmet_get_feat_kato(req);
755                 break;
756         case NVME_FEAT_HOST_ID:
757                 /* need 128-bit host identifier flag */
758                 if (!(req->cmd->common.cdw11 & cpu_to_le32(1 << 0))) {
759                         req->error_loc =
760                                 offsetof(struct nvme_common_command, cdw11);
761                         status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
762                         break;
763                 }
764
765                 status = nvmet_copy_to_sgl(req, 0, &req->sq->ctrl->hostid,
766                                 sizeof(req->sq->ctrl->hostid));
767                 break;
768         case NVME_FEAT_WRITE_PROTECT:
769                 status = nvmet_get_feat_write_protect(req);
770                 break;
771         default:
772                 req->error_loc =
773                         offsetof(struct nvme_common_command, cdw10);
774                 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
775                 break;
776         }
777
778         nvmet_req_complete(req, status);
779 }
780
781 void nvmet_execute_async_event(struct nvmet_req *req)
782 {
783         struct nvmet_ctrl *ctrl = req->sq->ctrl;
784
785         mutex_lock(&ctrl->lock);
786         if (ctrl->nr_async_event_cmds >= NVMET_ASYNC_EVENTS) {
787                 mutex_unlock(&ctrl->lock);
788                 nvmet_req_complete(req, NVME_SC_ASYNC_LIMIT | NVME_SC_DNR);
789                 return;
790         }
791         ctrl->async_event_cmds[ctrl->nr_async_event_cmds++] = req;
792         mutex_unlock(&ctrl->lock);
793
794         schedule_work(&ctrl->async_event_work);
795 }
796
797 void nvmet_execute_keep_alive(struct nvmet_req *req)
798 {
799         struct nvmet_ctrl *ctrl = req->sq->ctrl;
800
801         pr_debug("ctrl %d update keep-alive timer for %d secs\n",
802                 ctrl->cntlid, ctrl->kato);
803
804         mod_delayed_work(system_wq, &ctrl->ka_work, ctrl->kato * HZ);
805         nvmet_req_complete(req, 0);
806 }
807
808 u16 nvmet_parse_admin_cmd(struct nvmet_req *req)
809 {
810         struct nvme_command *cmd = req->cmd;
811         u16 ret;
812
813         ret = nvmet_check_ctrl_status(req, cmd);
814         if (unlikely(ret))
815                 return ret;
816
817         switch (cmd->common.opcode) {
818         case nvme_admin_get_log_page:
819                 req->data_len = nvmet_get_log_page_len(cmd);
820
821                 switch (cmd->get_log_page.lid) {
822                 case NVME_LOG_ERROR:
823                         req->execute = nvmet_execute_get_log_page_error;
824                         return 0;
825                 case NVME_LOG_SMART:
826                         req->execute = nvmet_execute_get_log_page_smart;
827                         return 0;
828                 case NVME_LOG_FW_SLOT:
829                         /*
830                          * We only support a single firmware slot which always
831                          * is active, so we can zero out the whole firmware slot
832                          * log and still claim to fully implement this mandatory
833                          * log page.
834                          */
835                         req->execute = nvmet_execute_get_log_page_noop;
836                         return 0;
837                 case NVME_LOG_CHANGED_NS:
838                         req->execute = nvmet_execute_get_log_changed_ns;
839                         return 0;
840                 case NVME_LOG_CMD_EFFECTS:
841                         req->execute = nvmet_execute_get_log_cmd_effects_ns;
842                         return 0;
843                 case NVME_LOG_ANA:
844                         req->execute = nvmet_execute_get_log_page_ana;
845                         return 0;
846                 }
847                 break;
848         case nvme_admin_identify:
849                 req->data_len = NVME_IDENTIFY_DATA_SIZE;
850                 switch (cmd->identify.cns) {
851                 case NVME_ID_CNS_NS:
852                         req->execute = nvmet_execute_identify_ns;
853                         return 0;
854                 case NVME_ID_CNS_CTRL:
855                         req->execute = nvmet_execute_identify_ctrl;
856                         return 0;
857                 case NVME_ID_CNS_NS_ACTIVE_LIST:
858                         req->execute = nvmet_execute_identify_nslist;
859                         return 0;
860                 case NVME_ID_CNS_NS_DESC_LIST:
861                         req->execute = nvmet_execute_identify_desclist;
862                         return 0;
863                 }
864                 break;
865         case nvme_admin_abort_cmd:
866                 req->execute = nvmet_execute_abort;
867                 req->data_len = 0;
868                 return 0;
869         case nvme_admin_set_features:
870                 req->execute = nvmet_execute_set_features;
871                 req->data_len = 0;
872                 return 0;
873         case nvme_admin_get_features:
874                 req->execute = nvmet_execute_get_features;
875                 req->data_len = 0;
876                 return 0;
877         case nvme_admin_async_event:
878                 req->execute = nvmet_execute_async_event;
879                 req->data_len = 0;
880                 return 0;
881         case nvme_admin_keep_alive:
882                 req->execute = nvmet_execute_keep_alive;
883                 req->data_len = 0;
884                 return 0;
885         }
886
887         pr_err("unhandled cmd %d on qid %d\n", cmd->common.opcode,
888                req->sq->qid);
889         req->error_loc = offsetof(struct nvme_common_command, opcode);
890         return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
891 }