arm64/mmap: properly account for stack randomization in mmap_base
[linux-2.6-microblaze.git] / drivers / nvme / target / loop.c
1 /*
2  * NVMe over Fabrics loopback device.
3  * Copyright (c) 2015-2016 HGST, a Western Digital Company.
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms and conditions of the GNU General Public License,
7  * version 2, as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  */
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/scatterlist.h>
16 #include <linux/blk-mq.h>
17 #include <linux/nvme.h>
18 #include <linux/module.h>
19 #include <linux/parser.h>
20 #include "nvmet.h"
21 #include "../host/nvme.h"
22 #include "../host/fabrics.h"
23
24 #define NVME_LOOP_MAX_SEGMENTS          256
25
26 /*
27  * We handle AEN commands ourselves and don't even let the
28  * block layer know about them.
29  */
30 #define NVME_LOOP_NR_AEN_COMMANDS       1
31 #define NVME_LOOP_AQ_BLKMQ_DEPTH        \
32         (NVME_AQ_DEPTH - NVME_LOOP_NR_AEN_COMMANDS)
33
34 struct nvme_loop_iod {
35         struct nvme_request     nvme_req;
36         struct nvme_command     cmd;
37         struct nvme_completion  rsp;
38         struct nvmet_req        req;
39         struct nvme_loop_queue  *queue;
40         struct work_struct      work;
41         struct sg_table         sg_table;
42         struct scatterlist      first_sgl[];
43 };
44
45 struct nvme_loop_ctrl {
46         struct nvme_loop_queue  *queues;
47
48         struct blk_mq_tag_set   admin_tag_set;
49
50         struct list_head        list;
51         struct blk_mq_tag_set   tag_set;
52         struct nvme_loop_iod    async_event_iod;
53         struct nvme_ctrl        ctrl;
54
55         struct nvmet_ctrl       *target_ctrl;
56         struct work_struct      delete_work;
57 };
58
59 static inline struct nvme_loop_ctrl *to_loop_ctrl(struct nvme_ctrl *ctrl)
60 {
61         return container_of(ctrl, struct nvme_loop_ctrl, ctrl);
62 }
63
64 struct nvme_loop_queue {
65         struct nvmet_cq         nvme_cq;
66         struct nvmet_sq         nvme_sq;
67         struct nvme_loop_ctrl   *ctrl;
68 };
69
70 static struct nvmet_port *nvmet_loop_port;
71
72 static LIST_HEAD(nvme_loop_ctrl_list);
73 static DEFINE_MUTEX(nvme_loop_ctrl_mutex);
74
75 static void nvme_loop_queue_response(struct nvmet_req *nvme_req);
76 static void nvme_loop_delete_ctrl(struct nvmet_ctrl *ctrl);
77
78 static struct nvmet_fabrics_ops nvme_loop_ops;
79
80 static inline int nvme_loop_queue_idx(struct nvme_loop_queue *queue)
81 {
82         return queue - queue->ctrl->queues;
83 }
84
85 static void nvme_loop_complete_rq(struct request *req)
86 {
87         struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req);
88
89         nvme_cleanup_cmd(req);
90         sg_free_table_chained(&iod->sg_table, true);
91         nvme_complete_rq(req);
92 }
93
94 static struct blk_mq_tags *nvme_loop_tagset(struct nvme_loop_queue *queue)
95 {
96         u32 queue_idx = nvme_loop_queue_idx(queue);
97
98         if (queue_idx == 0)
99                 return queue->ctrl->admin_tag_set.tags[queue_idx];
100         return queue->ctrl->tag_set.tags[queue_idx - 1];
101 }
102
103 static void nvme_loop_queue_response(struct nvmet_req *req)
104 {
105         struct nvme_loop_queue *queue =
106                 container_of(req->sq, struct nvme_loop_queue, nvme_sq);
107         struct nvme_completion *cqe = req->rsp;
108
109         /*
110          * AEN requests are special as they don't time out and can
111          * survive any kind of queue freeze and often don't respond to
112          * aborts.  We don't even bother to allocate a struct request
113          * for them but rather special case them here.
114          */
115         if (unlikely(nvme_loop_queue_idx(queue) == 0 &&
116                         cqe->command_id >= NVME_LOOP_AQ_BLKMQ_DEPTH)) {
117                 nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status,
118                                 &cqe->result);
119         } else {
120                 struct request *rq;
121
122                 rq = blk_mq_tag_to_rq(nvme_loop_tagset(queue), cqe->command_id);
123                 if (!rq) {
124                         dev_err(queue->ctrl->ctrl.device,
125                                 "tag 0x%x on queue %d not found\n",
126                                 cqe->command_id, nvme_loop_queue_idx(queue));
127                         return;
128                 }
129
130                 nvme_end_request(rq, cqe->status, cqe->result);
131         }
132 }
133
134 static void nvme_loop_execute_work(struct work_struct *work)
135 {
136         struct nvme_loop_iod *iod =
137                 container_of(work, struct nvme_loop_iod, work);
138
139         iod->req.execute(&iod->req);
140 }
141
142 static enum blk_eh_timer_return
143 nvme_loop_timeout(struct request *rq, bool reserved)
144 {
145         struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(rq);
146
147         /* queue error recovery */
148         nvme_reset_ctrl(&iod->queue->ctrl->ctrl);
149
150         /* fail with DNR on admin cmd timeout */
151         nvme_req(rq)->status = NVME_SC_ABORT_REQ | NVME_SC_DNR;
152
153         return BLK_EH_HANDLED;
154 }
155
156 static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
157                 const struct blk_mq_queue_data *bd)
158 {
159         struct nvme_ns *ns = hctx->queue->queuedata;
160         struct nvme_loop_queue *queue = hctx->driver_data;
161         struct request *req = bd->rq;
162         struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req);
163         blk_status_t ret;
164
165         ret = nvme_setup_cmd(ns, req, &iod->cmd);
166         if (ret)
167                 return ret;
168
169         iod->cmd.common.flags |= NVME_CMD_SGL_METABUF;
170         iod->req.port = nvmet_loop_port;
171         if (!nvmet_req_init(&iod->req, &queue->nvme_cq,
172                         &queue->nvme_sq, &nvme_loop_ops)) {
173                 nvme_cleanup_cmd(req);
174                 blk_mq_start_request(req);
175                 nvme_loop_queue_response(&iod->req);
176                 return BLK_STS_OK;
177         }
178
179         if (blk_rq_bytes(req)) {
180                 iod->sg_table.sgl = iod->first_sgl;
181                 if (sg_alloc_table_chained(&iod->sg_table,
182                                 blk_rq_nr_phys_segments(req),
183                                 iod->sg_table.sgl))
184                         return BLK_STS_RESOURCE;
185
186                 iod->req.sg = iod->sg_table.sgl;
187                 iod->req.sg_cnt = blk_rq_map_sg(req->q, req, iod->sg_table.sgl);
188         }
189
190         blk_mq_start_request(req);
191
192         schedule_work(&iod->work);
193         return BLK_STS_OK;
194 }
195
196 static void nvme_loop_submit_async_event(struct nvme_ctrl *arg, int aer_idx)
197 {
198         struct nvme_loop_ctrl *ctrl = to_loop_ctrl(arg);
199         struct nvme_loop_queue *queue = &ctrl->queues[0];
200         struct nvme_loop_iod *iod = &ctrl->async_event_iod;
201
202         memset(&iod->cmd, 0, sizeof(iod->cmd));
203         iod->cmd.common.opcode = nvme_admin_async_event;
204         iod->cmd.common.command_id = NVME_LOOP_AQ_BLKMQ_DEPTH;
205         iod->cmd.common.flags |= NVME_CMD_SGL_METABUF;
206
207         if (!nvmet_req_init(&iod->req, &queue->nvme_cq, &queue->nvme_sq,
208                         &nvme_loop_ops)) {
209                 dev_err(ctrl->ctrl.device, "failed async event work\n");
210                 return;
211         }
212
213         schedule_work(&iod->work);
214 }
215
216 static int nvme_loop_init_iod(struct nvme_loop_ctrl *ctrl,
217                 struct nvme_loop_iod *iod, unsigned int queue_idx)
218 {
219         iod->req.cmd = &iod->cmd;
220         iod->req.rsp = &iod->rsp;
221         iod->queue = &ctrl->queues[queue_idx];
222         INIT_WORK(&iod->work, nvme_loop_execute_work);
223         return 0;
224 }
225
226 static int nvme_loop_init_request(struct blk_mq_tag_set *set,
227                 struct request *req, unsigned int hctx_idx,
228                 unsigned int numa_node)
229 {
230         struct nvme_loop_ctrl *ctrl = set->driver_data;
231
232         return nvme_loop_init_iod(ctrl, blk_mq_rq_to_pdu(req),
233                         (set == &ctrl->tag_set) ? hctx_idx + 1 : 0);
234 }
235
236 static int nvme_loop_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
237                 unsigned int hctx_idx)
238 {
239         struct nvme_loop_ctrl *ctrl = data;
240         struct nvme_loop_queue *queue = &ctrl->queues[hctx_idx + 1];
241
242         BUG_ON(hctx_idx >= ctrl->ctrl.queue_count);
243
244         hctx->driver_data = queue;
245         return 0;
246 }
247
248 static int nvme_loop_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
249                 unsigned int hctx_idx)
250 {
251         struct nvme_loop_ctrl *ctrl = data;
252         struct nvme_loop_queue *queue = &ctrl->queues[0];
253
254         BUG_ON(hctx_idx != 0);
255
256         hctx->driver_data = queue;
257         return 0;
258 }
259
260 static const struct blk_mq_ops nvme_loop_mq_ops = {
261         .queue_rq       = nvme_loop_queue_rq,
262         .complete       = nvme_loop_complete_rq,
263         .init_request   = nvme_loop_init_request,
264         .init_hctx      = nvme_loop_init_hctx,
265         .timeout        = nvme_loop_timeout,
266 };
267
268 static const struct blk_mq_ops nvme_loop_admin_mq_ops = {
269         .queue_rq       = nvme_loop_queue_rq,
270         .complete       = nvme_loop_complete_rq,
271         .init_request   = nvme_loop_init_request,
272         .init_hctx      = nvme_loop_init_admin_hctx,
273         .timeout        = nvme_loop_timeout,
274 };
275
276 static void nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl *ctrl)
277 {
278         nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
279         blk_cleanup_queue(ctrl->ctrl.admin_q);
280         blk_mq_free_tag_set(&ctrl->admin_tag_set);
281 }
282
283 static void nvme_loop_free_ctrl(struct nvme_ctrl *nctrl)
284 {
285         struct nvme_loop_ctrl *ctrl = to_loop_ctrl(nctrl);
286
287         if (list_empty(&ctrl->list))
288                 goto free_ctrl;
289
290         mutex_lock(&nvme_loop_ctrl_mutex);
291         list_del(&ctrl->list);
292         mutex_unlock(&nvme_loop_ctrl_mutex);
293
294         if (nctrl->tagset) {
295                 blk_cleanup_queue(ctrl->ctrl.connect_q);
296                 blk_mq_free_tag_set(&ctrl->tag_set);
297         }
298         kfree(ctrl->queues);
299         nvmf_free_options(nctrl->opts);
300 free_ctrl:
301         kfree(ctrl);
302 }
303
304 static void nvme_loop_destroy_io_queues(struct nvme_loop_ctrl *ctrl)
305 {
306         int i;
307
308         for (i = 1; i < ctrl->ctrl.queue_count; i++)
309                 nvmet_sq_destroy(&ctrl->queues[i].nvme_sq);
310 }
311
312 static int nvme_loop_init_io_queues(struct nvme_loop_ctrl *ctrl)
313 {
314         struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
315         unsigned int nr_io_queues;
316         int ret, i;
317
318         nr_io_queues = min(opts->nr_io_queues, num_online_cpus());
319         ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues);
320         if (ret || !nr_io_queues)
321                 return ret;
322
323         dev_info(ctrl->ctrl.device, "creating %d I/O queues.\n", nr_io_queues);
324
325         for (i = 1; i <= nr_io_queues; i++) {
326                 ctrl->queues[i].ctrl = ctrl;
327                 ret = nvmet_sq_init(&ctrl->queues[i].nvme_sq);
328                 if (ret)
329                         goto out_destroy_queues;
330
331                 ctrl->ctrl.queue_count++;
332         }
333
334         return 0;
335
336 out_destroy_queues:
337         nvme_loop_destroy_io_queues(ctrl);
338         return ret;
339 }
340
341 static int nvme_loop_connect_io_queues(struct nvme_loop_ctrl *ctrl)
342 {
343         int i, ret;
344
345         for (i = 1; i < ctrl->ctrl.queue_count; i++) {
346                 ret = nvmf_connect_io_queue(&ctrl->ctrl, i);
347                 if (ret)
348                         return ret;
349         }
350
351         return 0;
352 }
353
354 static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
355 {
356         int error;
357
358         memset(&ctrl->admin_tag_set, 0, sizeof(ctrl->admin_tag_set));
359         ctrl->admin_tag_set.ops = &nvme_loop_admin_mq_ops;
360         ctrl->admin_tag_set.queue_depth = NVME_LOOP_AQ_BLKMQ_DEPTH;
361         ctrl->admin_tag_set.reserved_tags = 2; /* connect + keep-alive */
362         ctrl->admin_tag_set.numa_node = NUMA_NO_NODE;
363         ctrl->admin_tag_set.cmd_size = sizeof(struct nvme_loop_iod) +
364                 SG_CHUNK_SIZE * sizeof(struct scatterlist);
365         ctrl->admin_tag_set.driver_data = ctrl;
366         ctrl->admin_tag_set.nr_hw_queues = 1;
367         ctrl->admin_tag_set.timeout = ADMIN_TIMEOUT;
368
369         ctrl->queues[0].ctrl = ctrl;
370         error = nvmet_sq_init(&ctrl->queues[0].nvme_sq);
371         if (error)
372                 return error;
373         ctrl->ctrl.queue_count = 1;
374
375         error = blk_mq_alloc_tag_set(&ctrl->admin_tag_set);
376         if (error)
377                 goto out_free_sq;
378
379         ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set);
380         if (IS_ERR(ctrl->ctrl.admin_q)) {
381                 error = PTR_ERR(ctrl->ctrl.admin_q);
382                 goto out_free_tagset;
383         }
384
385         error = nvmf_connect_admin_queue(&ctrl->ctrl);
386         if (error)
387                 goto out_cleanup_queue;
388
389         error = nvmf_reg_read64(&ctrl->ctrl, NVME_REG_CAP, &ctrl->ctrl.cap);
390         if (error) {
391                 dev_err(ctrl->ctrl.device,
392                         "prop_get NVME_REG_CAP failed\n");
393                 goto out_cleanup_queue;
394         }
395
396         ctrl->ctrl.sqsize =
397                 min_t(int, NVME_CAP_MQES(ctrl->ctrl.cap), ctrl->ctrl.sqsize);
398
399         error = nvme_enable_ctrl(&ctrl->ctrl, ctrl->ctrl.cap);
400         if (error)
401                 goto out_cleanup_queue;
402
403         ctrl->ctrl.max_hw_sectors =
404                 (NVME_LOOP_MAX_SEGMENTS - 1) << (PAGE_SHIFT - 9);
405
406         error = nvme_init_identify(&ctrl->ctrl);
407         if (error)
408                 goto out_cleanup_queue;
409
410         return 0;
411
412 out_cleanup_queue:
413         blk_cleanup_queue(ctrl->ctrl.admin_q);
414 out_free_tagset:
415         blk_mq_free_tag_set(&ctrl->admin_tag_set);
416 out_free_sq:
417         nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
418         return error;
419 }
420
421 static void nvme_loop_shutdown_ctrl(struct nvme_loop_ctrl *ctrl)
422 {
423         if (ctrl->ctrl.queue_count > 1) {
424                 nvme_stop_queues(&ctrl->ctrl);
425                 blk_mq_tagset_busy_iter(&ctrl->tag_set,
426                                         nvme_cancel_request, &ctrl->ctrl);
427                 nvme_loop_destroy_io_queues(ctrl);
428         }
429
430         if (ctrl->ctrl.state == NVME_CTRL_LIVE)
431                 nvme_shutdown_ctrl(&ctrl->ctrl);
432
433         blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
434         blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
435                                 nvme_cancel_request, &ctrl->ctrl);
436         blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
437         nvme_loop_destroy_admin_queue(ctrl);
438 }
439
440 static void nvme_loop_del_ctrl_work(struct work_struct *work)
441 {
442         struct nvme_loop_ctrl *ctrl = container_of(work,
443                                 struct nvme_loop_ctrl, delete_work);
444
445         nvme_stop_ctrl(&ctrl->ctrl);
446         nvme_remove_namespaces(&ctrl->ctrl);
447         nvme_loop_shutdown_ctrl(ctrl);
448         nvme_uninit_ctrl(&ctrl->ctrl);
449         nvme_put_ctrl(&ctrl->ctrl);
450 }
451
452 static int __nvme_loop_del_ctrl(struct nvme_loop_ctrl *ctrl)
453 {
454         if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING))
455                 return -EBUSY;
456
457         if (!queue_work(nvme_wq, &ctrl->delete_work))
458                 return -EBUSY;
459
460         return 0;
461 }
462
463 static int nvme_loop_del_ctrl(struct nvme_ctrl *nctrl)
464 {
465         struct nvme_loop_ctrl *ctrl = to_loop_ctrl(nctrl);
466         int ret;
467
468         ret = __nvme_loop_del_ctrl(ctrl);
469         if (ret)
470                 return ret;
471
472         flush_work(&ctrl->delete_work);
473
474         return 0;
475 }
476
477 static void nvme_loop_delete_ctrl(struct nvmet_ctrl *nctrl)
478 {
479         struct nvme_loop_ctrl *ctrl;
480
481         mutex_lock(&nvme_loop_ctrl_mutex);
482         list_for_each_entry(ctrl, &nvme_loop_ctrl_list, list) {
483                 if (ctrl->ctrl.cntlid == nctrl->cntlid)
484                         __nvme_loop_del_ctrl(ctrl);
485         }
486         mutex_unlock(&nvme_loop_ctrl_mutex);
487 }
488
489 static void nvme_loop_reset_ctrl_work(struct work_struct *work)
490 {
491         struct nvme_loop_ctrl *ctrl =
492                 container_of(work, struct nvme_loop_ctrl, ctrl.reset_work);
493         bool changed;
494         int ret;
495
496         nvme_stop_ctrl(&ctrl->ctrl);
497         nvme_loop_shutdown_ctrl(ctrl);
498
499         ret = nvme_loop_configure_admin_queue(ctrl);
500         if (ret)
501                 goto out_disable;
502
503         ret = nvme_loop_init_io_queues(ctrl);
504         if (ret)
505                 goto out_destroy_admin;
506
507         ret = nvme_loop_connect_io_queues(ctrl);
508         if (ret)
509                 goto out_destroy_io;
510
511         blk_mq_update_nr_hw_queues(&ctrl->tag_set,
512                         ctrl->ctrl.queue_count - 1);
513
514         changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
515         WARN_ON_ONCE(!changed);
516
517         nvme_start_ctrl(&ctrl->ctrl);
518
519         return;
520
521 out_destroy_io:
522         nvme_loop_destroy_io_queues(ctrl);
523 out_destroy_admin:
524         nvme_loop_destroy_admin_queue(ctrl);
525 out_disable:
526         dev_warn(ctrl->ctrl.device, "Removing after reset failure\n");
527         nvme_uninit_ctrl(&ctrl->ctrl);
528         nvme_put_ctrl(&ctrl->ctrl);
529 }
530
531 static const struct nvme_ctrl_ops nvme_loop_ctrl_ops = {
532         .name                   = "loop",
533         .module                 = THIS_MODULE,
534         .flags                  = NVME_F_FABRICS,
535         .reg_read32             = nvmf_reg_read32,
536         .reg_read64             = nvmf_reg_read64,
537         .reg_write32            = nvmf_reg_write32,
538         .free_ctrl              = nvme_loop_free_ctrl,
539         .submit_async_event     = nvme_loop_submit_async_event,
540         .delete_ctrl            = nvme_loop_del_ctrl,
541 };
542
543 static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl)
544 {
545         int ret;
546
547         ret = nvme_loop_init_io_queues(ctrl);
548         if (ret)
549                 return ret;
550
551         memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set));
552         ctrl->tag_set.ops = &nvme_loop_mq_ops;
553         ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size;
554         ctrl->tag_set.reserved_tags = 1; /* fabric connect */
555         ctrl->tag_set.numa_node = NUMA_NO_NODE;
556         ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
557         ctrl->tag_set.cmd_size = sizeof(struct nvme_loop_iod) +
558                 SG_CHUNK_SIZE * sizeof(struct scatterlist);
559         ctrl->tag_set.driver_data = ctrl;
560         ctrl->tag_set.nr_hw_queues = ctrl->ctrl.queue_count - 1;
561         ctrl->tag_set.timeout = NVME_IO_TIMEOUT;
562         ctrl->ctrl.tagset = &ctrl->tag_set;
563
564         ret = blk_mq_alloc_tag_set(&ctrl->tag_set);
565         if (ret)
566                 goto out_destroy_queues;
567
568         ctrl->ctrl.connect_q = blk_mq_init_queue(&ctrl->tag_set);
569         if (IS_ERR(ctrl->ctrl.connect_q)) {
570                 ret = PTR_ERR(ctrl->ctrl.connect_q);
571                 goto out_free_tagset;
572         }
573
574         ret = nvme_loop_connect_io_queues(ctrl);
575         if (ret)
576                 goto out_cleanup_connect_q;
577
578         return 0;
579
580 out_cleanup_connect_q:
581         blk_cleanup_queue(ctrl->ctrl.connect_q);
582 out_free_tagset:
583         blk_mq_free_tag_set(&ctrl->tag_set);
584 out_destroy_queues:
585         nvme_loop_destroy_io_queues(ctrl);
586         return ret;
587 }
588
589 static struct nvme_ctrl *nvme_loop_create_ctrl(struct device *dev,
590                 struct nvmf_ctrl_options *opts)
591 {
592         struct nvme_loop_ctrl *ctrl;
593         bool changed;
594         int ret;
595
596         ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
597         if (!ctrl)
598                 return ERR_PTR(-ENOMEM);
599         ctrl->ctrl.opts = opts;
600         INIT_LIST_HEAD(&ctrl->list);
601
602         INIT_WORK(&ctrl->delete_work, nvme_loop_del_ctrl_work);
603         INIT_WORK(&ctrl->ctrl.reset_work, nvme_loop_reset_ctrl_work);
604
605         ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_loop_ctrl_ops,
606                                 0 /* no quirks, we're perfect! */);
607         if (ret)
608                 goto out_put_ctrl;
609
610         ret = -ENOMEM;
611
612         ctrl->ctrl.sqsize = opts->queue_size - 1;
613         ctrl->ctrl.kato = opts->kato;
614
615         ctrl->queues = kcalloc(opts->nr_io_queues + 1, sizeof(*ctrl->queues),
616                         GFP_KERNEL);
617         if (!ctrl->queues)
618                 goto out_uninit_ctrl;
619
620         ret = nvme_loop_configure_admin_queue(ctrl);
621         if (ret)
622                 goto out_free_queues;
623
624         if (opts->queue_size > ctrl->ctrl.maxcmd) {
625                 /* warn if maxcmd is lower than queue_size */
626                 dev_warn(ctrl->ctrl.device,
627                         "queue_size %zu > ctrl maxcmd %u, clamping down\n",
628                         opts->queue_size, ctrl->ctrl.maxcmd);
629                 opts->queue_size = ctrl->ctrl.maxcmd;
630         }
631
632         if (opts->nr_io_queues) {
633                 ret = nvme_loop_create_io_queues(ctrl);
634                 if (ret)
635                         goto out_remove_admin_queue;
636         }
637
638         nvme_loop_init_iod(ctrl, &ctrl->async_event_iod, 0);
639
640         dev_info(ctrl->ctrl.device,
641                  "new ctrl: \"%s\"\n", ctrl->ctrl.opts->subsysnqn);
642
643         kref_get(&ctrl->ctrl.kref);
644
645         changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
646         WARN_ON_ONCE(!changed);
647
648         mutex_lock(&nvme_loop_ctrl_mutex);
649         list_add_tail(&ctrl->list, &nvme_loop_ctrl_list);
650         mutex_unlock(&nvme_loop_ctrl_mutex);
651
652         nvme_start_ctrl(&ctrl->ctrl);
653
654         return &ctrl->ctrl;
655
656 out_remove_admin_queue:
657         nvme_loop_destroy_admin_queue(ctrl);
658 out_free_queues:
659         kfree(ctrl->queues);
660 out_uninit_ctrl:
661         nvme_uninit_ctrl(&ctrl->ctrl);
662 out_put_ctrl:
663         nvme_put_ctrl(&ctrl->ctrl);
664         if (ret > 0)
665                 ret = -EIO;
666         return ERR_PTR(ret);
667 }
668
669 static int nvme_loop_add_port(struct nvmet_port *port)
670 {
671         /*
672          * XXX: disalow adding more than one port so
673          * there is no connection rejections when a
674          * a subsystem is assigned to a port for which
675          * loop doesn't have a pointer.
676          * This scenario would be possible if we allowed
677          * more than one port to be added and a subsystem
678          * was assigned to a port other than nvmet_loop_port.
679          */
680
681         if (nvmet_loop_port)
682                 return -EPERM;
683
684         nvmet_loop_port = port;
685         return 0;
686 }
687
688 static void nvme_loop_remove_port(struct nvmet_port *port)
689 {
690         if (port == nvmet_loop_port)
691                 nvmet_loop_port = NULL;
692 }
693
694 static struct nvmet_fabrics_ops nvme_loop_ops = {
695         .owner          = THIS_MODULE,
696         .type           = NVMF_TRTYPE_LOOP,
697         .add_port       = nvme_loop_add_port,
698         .remove_port    = nvme_loop_remove_port,
699         .queue_response = nvme_loop_queue_response,
700         .delete_ctrl    = nvme_loop_delete_ctrl,
701 };
702
703 static struct nvmf_transport_ops nvme_loop_transport = {
704         .name           = "loop",
705         .create_ctrl    = nvme_loop_create_ctrl,
706 };
707
708 static int __init nvme_loop_init_module(void)
709 {
710         int ret;
711
712         ret = nvmet_register_transport(&nvme_loop_ops);
713         if (ret)
714                 return ret;
715
716         ret = nvmf_register_transport(&nvme_loop_transport);
717         if (ret)
718                 nvmet_unregister_transport(&nvme_loop_ops);
719
720         return ret;
721 }
722
723 static void __exit nvme_loop_cleanup_module(void)
724 {
725         struct nvme_loop_ctrl *ctrl, *next;
726
727         nvmf_unregister_transport(&nvme_loop_transport);
728         nvmet_unregister_transport(&nvme_loop_ops);
729
730         mutex_lock(&nvme_loop_ctrl_mutex);
731         list_for_each_entry_safe(ctrl, next, &nvme_loop_ctrl_list, list)
732                 __nvme_loop_del_ctrl(ctrl);
733         mutex_unlock(&nvme_loop_ctrl_mutex);
734
735         flush_workqueue(nvme_wq);
736 }
737
738 module_init(nvme_loop_init_module);
739 module_exit(nvme_loop_cleanup_module);
740
741 MODULE_LICENSE("GPL v2");
742 MODULE_ALIAS("nvmet-transport-254"); /* 254 == NVMF_TRTYPE_LOOP */