lkdtm: Use init_uts_ns.name instead of macros
[linux-2.6-microblaze.git] / drivers / nvme / target / passthru.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * NVMe Over Fabrics Target Passthrough command implementation.
4  *
5  * Copyright (c) 2017-2018 Western Digital Corporation or its
6  * affiliates.
7  * Copyright (c) 2019-2020, Eideticom Inc.
8  *
9  */
10 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11 #include <linux/module.h>
12
13 #include "../host/nvme.h"
14 #include "nvmet.h"
15
16 MODULE_IMPORT_NS(NVME_TARGET_PASSTHRU);
17
18 /*
19  * xarray to maintain one passthru subsystem per nvme controller.
20  */
21 static DEFINE_XARRAY(passthru_subsystems);
22
23 static u16 nvmet_passthru_override_id_ctrl(struct nvmet_req *req)
24 {
25         struct nvmet_ctrl *ctrl = req->sq->ctrl;
26         struct nvme_ctrl *pctrl = ctrl->subsys->passthru_ctrl;
27         u16 status = NVME_SC_SUCCESS;
28         struct nvme_id_ctrl *id;
29         unsigned int max_hw_sectors;
30         int page_shift;
31
32         id = kzalloc(sizeof(*id), GFP_KERNEL);
33         if (!id)
34                 return NVME_SC_INTERNAL;
35
36         status = nvmet_copy_from_sgl(req, 0, id, sizeof(*id));
37         if (status)
38                 goto out_free;
39
40         id->cntlid = cpu_to_le16(ctrl->cntlid);
41         id->ver = cpu_to_le32(ctrl->subsys->ver);
42
43         /*
44          * The passthru NVMe driver may have a limit on the number of segments
45          * which depends on the host's memory fragementation. To solve this,
46          * ensure mdts is limited to the pages equal to the number of segments.
47          */
48         max_hw_sectors = min_not_zero(pctrl->max_segments << (PAGE_SHIFT - 9),
49                                       pctrl->max_hw_sectors);
50
51         /*
52          * nvmet_passthru_map_sg is limitted to using a single bio so limit
53          * the mdts based on BIO_MAX_VECS as well
54          */
55         max_hw_sectors = min_not_zero(BIO_MAX_VECS << (PAGE_SHIFT - 9),
56                                       max_hw_sectors);
57
58         page_shift = NVME_CAP_MPSMIN(ctrl->cap) + 12;
59
60         id->mdts = ilog2(max_hw_sectors) + 9 - page_shift;
61
62         id->acl = 3;
63         /*
64          * We export aerl limit for the fabrics controller, update this when
65          * passthru based aerl support is added.
66          */
67         id->aerl = NVMET_ASYNC_EVENTS - 1;
68
69         /* emulate kas as most of the PCIe ctrl don't have a support for kas */
70         id->kas = cpu_to_le16(NVMET_KAS);
71
72         /* don't support host memory buffer */
73         id->hmpre = 0;
74         id->hmmin = 0;
75
76         id->sqes = min_t(__u8, ((0x6 << 4) | 0x6), id->sqes);
77         id->cqes = min_t(__u8, ((0x4 << 4) | 0x4), id->cqes);
78         id->maxcmd = cpu_to_le16(NVMET_MAX_CMD);
79
80         /* don't support fuse commands */
81         id->fuses = 0;
82
83         id->sgls = cpu_to_le32(1 << 0); /* we always support SGLs */
84         if (ctrl->ops->flags & NVMF_KEYED_SGLS)
85                 id->sgls |= cpu_to_le32(1 << 2);
86         if (req->port->inline_data_size)
87                 id->sgls |= cpu_to_le32(1 << 20);
88
89         /*
90          * When passsthru controller is setup using nvme-loop transport it will
91          * export the passthru ctrl subsysnqn (PCIe NVMe ctrl) and will fail in
92          * the nvme/host/core.c in the nvme_init_subsystem()->nvme_active_ctrl()
93          * code path with duplicate ctr subsynqn. In order to prevent that we
94          * mask the passthru-ctrl subsysnqn with the target ctrl subsysnqn.
95          */
96         memcpy(id->subnqn, ctrl->subsysnqn, sizeof(id->subnqn));
97
98         /* use fabric id-ctrl values */
99         id->ioccsz = cpu_to_le32((sizeof(struct nvme_command) +
100                                 req->port->inline_data_size) / 16);
101         id->iorcsz = cpu_to_le32(sizeof(struct nvme_completion) / 16);
102
103         id->msdbd = ctrl->ops->msdbd;
104
105         /* Support multipath connections with fabrics */
106         id->cmic |= 1 << 1;
107
108         /* Disable reservations, see nvmet_parse_passthru_io_cmd() */
109         id->oncs &= cpu_to_le16(~NVME_CTRL_ONCS_RESERVATIONS);
110
111         status = nvmet_copy_to_sgl(req, 0, id, sizeof(struct nvme_id_ctrl));
112
113 out_free:
114         kfree(id);
115         return status;
116 }
117
118 static u16 nvmet_passthru_override_id_ns(struct nvmet_req *req)
119 {
120         u16 status = NVME_SC_SUCCESS;
121         struct nvme_id_ns *id;
122         int i;
123
124         id = kzalloc(sizeof(*id), GFP_KERNEL);
125         if (!id)
126                 return NVME_SC_INTERNAL;
127
128         status = nvmet_copy_from_sgl(req, 0, id, sizeof(struct nvme_id_ns));
129         if (status)
130                 goto out_free;
131
132         for (i = 0; i < (id->nlbaf + 1); i++)
133                 if (id->lbaf[i].ms)
134                         memset(&id->lbaf[i], 0, sizeof(id->lbaf[i]));
135
136         id->flbas = id->flbas & ~(1 << 4);
137
138         /*
139          * Presently the NVMEof target code does not support sending
140          * metadata, so we must disable it here. This should be updated
141          * once target starts supporting metadata.
142          */
143         id->mc = 0;
144
145         status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
146
147 out_free:
148         kfree(id);
149         return status;
150 }
151
152 static void nvmet_passthru_execute_cmd_work(struct work_struct *w)
153 {
154         struct nvmet_req *req = container_of(w, struct nvmet_req, p.work);
155         struct request *rq = req->p.rq;
156         int status;
157
158         status = nvme_execute_passthru_rq(rq);
159
160         if (status == NVME_SC_SUCCESS &&
161             req->cmd->common.opcode == nvme_admin_identify) {
162                 switch (req->cmd->identify.cns) {
163                 case NVME_ID_CNS_CTRL:
164                         nvmet_passthru_override_id_ctrl(req);
165                         break;
166                 case NVME_ID_CNS_NS:
167                         nvmet_passthru_override_id_ns(req);
168                         break;
169                 }
170         } else if (status < 0)
171                 status = NVME_SC_INTERNAL;
172
173         req->cqe->result = nvme_req(rq)->result;
174         nvmet_req_complete(req, status);
175         blk_mq_free_request(rq);
176 }
177
178 static void nvmet_passthru_req_done(struct request *rq,
179                                     blk_status_t blk_status)
180 {
181         struct nvmet_req *req = rq->end_io_data;
182
183         req->cqe->result = nvme_req(rq)->result;
184         nvmet_req_complete(req, nvme_req(rq)->status);
185         blk_mq_free_request(rq);
186 }
187
188 static int nvmet_passthru_map_sg(struct nvmet_req *req, struct request *rq)
189 {
190         struct scatterlist *sg;
191         struct bio *bio;
192         int i;
193
194         if (req->sg_cnt > BIO_MAX_VECS)
195                 return -EINVAL;
196
197         if (nvmet_use_inline_bvec(req)) {
198                 bio = &req->p.inline_bio;
199                 bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec));
200         } else {
201                 bio = bio_alloc(GFP_KERNEL, bio_max_segs(req->sg_cnt));
202                 bio->bi_end_io = bio_put;
203         }
204         bio->bi_opf = req_op(rq);
205
206         for_each_sg(req->sg, sg, req->sg_cnt, i) {
207                 if (bio_add_pc_page(rq->q, bio, sg_page(sg), sg->length,
208                                     sg->offset) < sg->length) {
209                         nvmet_req_bio_put(req, bio);
210                         return -EINVAL;
211                 }
212         }
213
214         blk_rq_bio_prep(rq, bio, req->sg_cnt);
215
216         return 0;
217 }
218
219 static void nvmet_passthru_execute_cmd(struct nvmet_req *req)
220 {
221         struct nvme_ctrl *ctrl = nvmet_req_passthru_ctrl(req);
222         struct request_queue *q = ctrl->admin_q;
223         struct nvme_ns *ns = NULL;
224         struct request *rq = NULL;
225         unsigned int timeout;
226         u32 effects;
227         u16 status;
228         int ret;
229
230         if (likely(req->sq->qid != 0)) {
231                 u32 nsid = le32_to_cpu(req->cmd->common.nsid);
232
233                 ns = nvme_find_get_ns(ctrl, nsid);
234                 if (unlikely(!ns)) {
235                         pr_err("failed to get passthru ns nsid:%u\n", nsid);
236                         status = NVME_SC_INVALID_NS | NVME_SC_DNR;
237                         goto out;
238                 }
239
240                 q = ns->queue;
241                 timeout = nvmet_req_subsys(req)->io_timeout;
242         } else {
243                 timeout = nvmet_req_subsys(req)->admin_timeout;
244         }
245
246         rq = nvme_alloc_request(q, req->cmd, 0);
247         if (IS_ERR(rq)) {
248                 status = NVME_SC_INTERNAL;
249                 goto out_put_ns;
250         }
251
252         if (timeout)
253                 rq->timeout = timeout;
254
255         if (req->sg_cnt) {
256                 ret = nvmet_passthru_map_sg(req, rq);
257                 if (unlikely(ret)) {
258                         status = NVME_SC_INTERNAL;
259                         goto out_put_req;
260                 }
261         }
262
263         /*
264          * If there are effects for the command we are about to execute, or
265          * an end_req function we need to use nvme_execute_passthru_rq()
266          * synchronously in a work item seeing the end_req function and
267          * nvme_passthru_end() can't be called in the request done callback
268          * which is typically in interrupt context.
269          */
270         effects = nvme_command_effects(ctrl, ns, req->cmd->common.opcode);
271         if (req->p.use_workqueue || effects) {
272                 INIT_WORK(&req->p.work, nvmet_passthru_execute_cmd_work);
273                 req->p.rq = rq;
274                 schedule_work(&req->p.work);
275         } else {
276                 rq->end_io_data = req;
277                 blk_execute_rq_nowait(ns ? ns->disk : NULL, rq, 0,
278                                       nvmet_passthru_req_done);
279         }
280
281         if (ns)
282                 nvme_put_ns(ns);
283
284         return;
285
286 out_put_req:
287         blk_mq_free_request(rq);
288 out_put_ns:
289         if (ns)
290                 nvme_put_ns(ns);
291 out:
292         nvmet_req_complete(req, status);
293 }
294
295 /*
296  * We need to emulate set host behaviour to ensure that any requested
297  * behaviour of the target's host matches the requested behaviour
298  * of the device's host and fail otherwise.
299  */
300 static void nvmet_passthru_set_host_behaviour(struct nvmet_req *req)
301 {
302         struct nvme_ctrl *ctrl = nvmet_req_passthru_ctrl(req);
303         struct nvme_feat_host_behavior *host;
304         u16 status = NVME_SC_INTERNAL;
305         int ret;
306
307         host = kzalloc(sizeof(*host) * 2, GFP_KERNEL);
308         if (!host)
309                 goto out_complete_req;
310
311         ret = nvme_get_features(ctrl, NVME_FEAT_HOST_BEHAVIOR, 0,
312                                 host, sizeof(*host), NULL);
313         if (ret)
314                 goto out_free_host;
315
316         status = nvmet_copy_from_sgl(req, 0, &host[1], sizeof(*host));
317         if (status)
318                 goto out_free_host;
319
320         if (memcmp(&host[0], &host[1], sizeof(host[0]))) {
321                 pr_warn("target host has requested different behaviour from the local host\n");
322                 status = NVME_SC_INTERNAL;
323         }
324
325 out_free_host:
326         kfree(host);
327 out_complete_req:
328         nvmet_req_complete(req, status);
329 }
330
331 static u16 nvmet_setup_passthru_command(struct nvmet_req *req)
332 {
333         req->p.use_workqueue = false;
334         req->execute = nvmet_passthru_execute_cmd;
335         return NVME_SC_SUCCESS;
336 }
337
338 u16 nvmet_parse_passthru_io_cmd(struct nvmet_req *req)
339 {
340         /* Reject any commands with non-sgl flags set (ie. fused commands) */
341         if (req->cmd->common.flags & ~NVME_CMD_SGL_ALL)
342                 return NVME_SC_INVALID_FIELD;
343
344         switch (req->cmd->common.opcode) {
345         case nvme_cmd_resv_register:
346         case nvme_cmd_resv_report:
347         case nvme_cmd_resv_acquire:
348         case nvme_cmd_resv_release:
349                 /*
350                  * Reservations cannot be supported properly because the
351                  * underlying device has no way of differentiating different
352                  * hosts that connect via fabrics. This could potentially be
353                  * emulated in the future if regular targets grow support for
354                  * this feature.
355                  */
356                 return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
357         }
358
359         return nvmet_setup_passthru_command(req);
360 }
361
362 /*
363  * Only features that are emulated or specifically allowed in the list  are
364  * passed down to the controller. This function implements the allow list for
365  * both get and set features.
366  */
367 static u16 nvmet_passthru_get_set_features(struct nvmet_req *req)
368 {
369         switch (le32_to_cpu(req->cmd->features.fid)) {
370         case NVME_FEAT_ARBITRATION:
371         case NVME_FEAT_POWER_MGMT:
372         case NVME_FEAT_LBA_RANGE:
373         case NVME_FEAT_TEMP_THRESH:
374         case NVME_FEAT_ERR_RECOVERY:
375         case NVME_FEAT_VOLATILE_WC:
376         case NVME_FEAT_WRITE_ATOMIC:
377         case NVME_FEAT_AUTO_PST:
378         case NVME_FEAT_TIMESTAMP:
379         case NVME_FEAT_HCTM:
380         case NVME_FEAT_NOPSC:
381         case NVME_FEAT_RRL:
382         case NVME_FEAT_PLM_CONFIG:
383         case NVME_FEAT_PLM_WINDOW:
384         case NVME_FEAT_HOST_BEHAVIOR:
385         case NVME_FEAT_SANITIZE:
386         case NVME_FEAT_VENDOR_START ... NVME_FEAT_VENDOR_END:
387                 return nvmet_setup_passthru_command(req);
388
389         case NVME_FEAT_ASYNC_EVENT:
390                 /* There is no support for forwarding ASYNC events */
391         case NVME_FEAT_IRQ_COALESCE:
392         case NVME_FEAT_IRQ_CONFIG:
393                 /* The IRQ settings will not apply to the target controller */
394         case NVME_FEAT_HOST_MEM_BUF:
395                 /*
396                  * Any HMB that's set will not be passed through and will
397                  * not work as expected
398                  */
399         case NVME_FEAT_SW_PROGRESS:
400                 /*
401                  * The Pre-Boot Software Load Count doesn't make much
402                  * sense for a target to export
403                  */
404         case NVME_FEAT_RESV_MASK:
405         case NVME_FEAT_RESV_PERSIST:
406                 /* No reservations, see nvmet_parse_passthru_io_cmd() */
407         default:
408                 return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
409         }
410 }
411
412 u16 nvmet_parse_passthru_admin_cmd(struct nvmet_req *req)
413 {
414         /* Reject any commands with non-sgl flags set (ie. fused commands) */
415         if (req->cmd->common.flags & ~NVME_CMD_SGL_ALL)
416                 return NVME_SC_INVALID_FIELD;
417
418         /*
419          * Passthru all vendor specific commands
420          */
421         if (req->cmd->common.opcode >= nvme_admin_vendor_start)
422                 return nvmet_setup_passthru_command(req);
423
424         switch (req->cmd->common.opcode) {
425         case nvme_admin_async_event:
426                 req->execute = nvmet_execute_async_event;
427                 return NVME_SC_SUCCESS;
428         case nvme_admin_keep_alive:
429                 /*
430                  * Most PCIe ctrls don't support keep alive cmd, we route keep
431                  * alive to the non-passthru mode. In future please change this
432                  * code when PCIe ctrls with keep alive support available.
433                  */
434                 req->execute = nvmet_execute_keep_alive;
435                 return NVME_SC_SUCCESS;
436         case nvme_admin_set_features:
437                 switch (le32_to_cpu(req->cmd->features.fid)) {
438                 case NVME_FEAT_ASYNC_EVENT:
439                 case NVME_FEAT_KATO:
440                 case NVME_FEAT_NUM_QUEUES:
441                 case NVME_FEAT_HOST_ID:
442                         req->execute = nvmet_execute_set_features;
443                         return NVME_SC_SUCCESS;
444                 case NVME_FEAT_HOST_BEHAVIOR:
445                         req->execute = nvmet_passthru_set_host_behaviour;
446                         return NVME_SC_SUCCESS;
447                 default:
448                         return nvmet_passthru_get_set_features(req);
449                 }
450                 break;
451         case nvme_admin_get_features:
452                 switch (le32_to_cpu(req->cmd->features.fid)) {
453                 case NVME_FEAT_ASYNC_EVENT:
454                 case NVME_FEAT_KATO:
455                 case NVME_FEAT_NUM_QUEUES:
456                 case NVME_FEAT_HOST_ID:
457                         req->execute = nvmet_execute_get_features;
458                         return NVME_SC_SUCCESS;
459                 default:
460                         return nvmet_passthru_get_set_features(req);
461                 }
462                 break;
463         case nvme_admin_identify:
464                 switch (req->cmd->identify.cns) {
465                 case NVME_ID_CNS_CTRL:
466                         req->execute = nvmet_passthru_execute_cmd;
467                         req->p.use_workqueue = true;
468                         return NVME_SC_SUCCESS;
469                 case NVME_ID_CNS_CS_CTRL:
470                         switch (req->cmd->identify.csi) {
471                         case NVME_CSI_ZNS:
472                                 req->execute = nvmet_passthru_execute_cmd;
473                                 req->p.use_workqueue = true;
474                                 return NVME_SC_SUCCESS;
475                         }
476                         return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
477                 case NVME_ID_CNS_NS:
478                         req->execute = nvmet_passthru_execute_cmd;
479                         req->p.use_workqueue = true;
480                         return NVME_SC_SUCCESS;
481                 case NVME_ID_CNS_CS_NS:
482                         switch (req->cmd->identify.csi) {
483                         case NVME_CSI_ZNS:
484                                 req->execute = nvmet_passthru_execute_cmd;
485                                 req->p.use_workqueue = true;
486                                 return NVME_SC_SUCCESS;
487                         }
488                         return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
489                 default:
490                         return nvmet_setup_passthru_command(req);
491                 }
492         case nvme_admin_get_log_page:
493                 return nvmet_setup_passthru_command(req);
494         default:
495                 /* Reject commands not in the allowlist above */
496                 return nvmet_report_invalid_opcode(req);
497         }
498 }
499
500 int nvmet_passthru_ctrl_enable(struct nvmet_subsys *subsys)
501 {
502         struct nvme_ctrl *ctrl;
503         struct file *file;
504         int ret = -EINVAL;
505         void *old;
506
507         mutex_lock(&subsys->lock);
508         if (!subsys->passthru_ctrl_path)
509                 goto out_unlock;
510         if (subsys->passthru_ctrl)
511                 goto out_unlock;
512
513         if (subsys->nr_namespaces) {
514                 pr_info("cannot enable both passthru and regular namespaces for a single subsystem");
515                 goto out_unlock;
516         }
517
518         file = filp_open(subsys->passthru_ctrl_path, O_RDWR, 0);
519         if (IS_ERR(file)) {
520                 ret = PTR_ERR(file);
521                 goto out_unlock;
522         }
523
524         ctrl = nvme_ctrl_from_file(file);
525         if (!ctrl) {
526                 pr_err("failed to open nvme controller %s\n",
527                        subsys->passthru_ctrl_path);
528
529                 goto out_put_file;
530         }
531
532         old = xa_cmpxchg(&passthru_subsystems, ctrl->cntlid, NULL,
533                          subsys, GFP_KERNEL);
534         if (xa_is_err(old)) {
535                 ret = xa_err(old);
536                 goto out_put_file;
537         }
538
539         if (old)
540                 goto out_put_file;
541
542         subsys->passthru_ctrl = ctrl;
543         subsys->ver = ctrl->vs;
544
545         if (subsys->ver < NVME_VS(1, 2, 1)) {
546                 pr_warn("nvme controller version is too old: %llu.%llu.%llu, advertising 1.2.1\n",
547                         NVME_MAJOR(subsys->ver), NVME_MINOR(subsys->ver),
548                         NVME_TERTIARY(subsys->ver));
549                 subsys->ver = NVME_VS(1, 2, 1);
550         }
551         nvme_get_ctrl(ctrl);
552         __module_get(subsys->passthru_ctrl->ops->module);
553         ret = 0;
554
555 out_put_file:
556         filp_close(file, NULL);
557 out_unlock:
558         mutex_unlock(&subsys->lock);
559         return ret;
560 }
561
562 static void __nvmet_passthru_ctrl_disable(struct nvmet_subsys *subsys)
563 {
564         if (subsys->passthru_ctrl) {
565                 xa_erase(&passthru_subsystems, subsys->passthru_ctrl->cntlid);
566                 module_put(subsys->passthru_ctrl->ops->module);
567                 nvme_put_ctrl(subsys->passthru_ctrl);
568         }
569         subsys->passthru_ctrl = NULL;
570         subsys->ver = NVMET_DEFAULT_VS;
571 }
572
573 void nvmet_passthru_ctrl_disable(struct nvmet_subsys *subsys)
574 {
575         mutex_lock(&subsys->lock);
576         __nvmet_passthru_ctrl_disable(subsys);
577         mutex_unlock(&subsys->lock);
578 }
579
580 void nvmet_passthru_subsys_free(struct nvmet_subsys *subsys)
581 {
582         mutex_lock(&subsys->lock);
583         __nvmet_passthru_ctrl_disable(subsys);
584         mutex_unlock(&subsys->lock);
585         kfree(subsys->passthru_ctrl_path);
586 }