c8708dcaeaa5940bbbe43a8f1f50056b45e337fa
[linux-2.6-microblaze.git] / drivers / nvme / target / core.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Common code for the NVMe target.
4  * Copyright (c) 2015-2016 HGST, a Western Digital Company.
5  */
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 #include <linux/module.h>
8 #include <linux/random.h>
9 #include <linux/rculist.h>
10 #include <linux/pci-p2pdma.h>
11 #include <linux/scatterlist.h>
12
13 #define CREATE_TRACE_POINTS
14 #include "trace.h"
15
16 #include "nvmet.h"
17
18 struct workqueue_struct *buffered_io_wq;
19 static const struct nvmet_fabrics_ops *nvmet_transports[NVMF_TRTYPE_MAX];
20 static DEFINE_IDA(cntlid_ida);
21
22 /*
23  * This read/write semaphore is used to synchronize access to configuration
24  * information on a target system that will result in discovery log page
25  * information change for at least one host.
26  * The full list of resources to protected by this semaphore is:
27  *
28  *  - subsystems list
29  *  - per-subsystem allowed hosts list
30  *  - allow_any_host subsystem attribute
31  *  - nvmet_genctr
32  *  - the nvmet_transports array
33  *
34  * When updating any of those lists/structures write lock should be obtained,
35  * while when reading (popolating discovery log page or checking host-subsystem
36  * link) read lock is obtained to allow concurrent reads.
37  */
38 DECLARE_RWSEM(nvmet_config_sem);
39
40 u32 nvmet_ana_group_enabled[NVMET_MAX_ANAGRPS + 1];
41 u64 nvmet_ana_chgcnt;
42 DECLARE_RWSEM(nvmet_ana_sem);
43
44 inline u16 errno_to_nvme_status(struct nvmet_req *req, int errno)
45 {
46         switch (errno) {
47         case 0:
48                 return NVME_SC_SUCCESS;
49         case -ENOSPC:
50                 req->error_loc = offsetof(struct nvme_rw_command, length);
51                 return NVME_SC_CAP_EXCEEDED | NVME_SC_DNR;
52         case -EREMOTEIO:
53                 req->error_loc = offsetof(struct nvme_rw_command, slba);
54                 return  NVME_SC_LBA_RANGE | NVME_SC_DNR;
55         case -EOPNOTSUPP:
56                 req->error_loc = offsetof(struct nvme_common_command, opcode);
57                 switch (req->cmd->common.opcode) {
58                 case nvme_cmd_dsm:
59                 case nvme_cmd_write_zeroes:
60                         return NVME_SC_ONCS_NOT_SUPPORTED | NVME_SC_DNR;
61                 default:
62                         return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
63                 }
64                 break;
65         case -ENODATA:
66                 req->error_loc = offsetof(struct nvme_rw_command, nsid);
67                 return NVME_SC_ACCESS_DENIED;
68         case -EIO:
69                 fallthrough;
70         default:
71                 req->error_loc = offsetof(struct nvme_common_command, opcode);
72                 return NVME_SC_INTERNAL | NVME_SC_DNR;
73         }
74 }
75
76 u16 nvmet_report_invalid_opcode(struct nvmet_req *req)
77 {
78         pr_debug("unhandled cmd %d on qid %d\n", req->cmd->common.opcode,
79                  req->sq->qid);
80
81         req->error_loc = offsetof(struct nvme_common_command, opcode);
82         return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
83 }
84
85 static struct nvmet_subsys *nvmet_find_get_subsys(struct nvmet_port *port,
86                 const char *subsysnqn);
87
88 u16 nvmet_copy_to_sgl(struct nvmet_req *req, off_t off, const void *buf,
89                 size_t len)
90 {
91         if (sg_pcopy_from_buffer(req->sg, req->sg_cnt, buf, len, off) != len) {
92                 req->error_loc = offsetof(struct nvme_common_command, dptr);
93                 return NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR;
94         }
95         return 0;
96 }
97
98 u16 nvmet_copy_from_sgl(struct nvmet_req *req, off_t off, void *buf, size_t len)
99 {
100         if (sg_pcopy_to_buffer(req->sg, req->sg_cnt, buf, len, off) != len) {
101                 req->error_loc = offsetof(struct nvme_common_command, dptr);
102                 return NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR;
103         }
104         return 0;
105 }
106
107 u16 nvmet_zero_sgl(struct nvmet_req *req, off_t off, size_t len)
108 {
109         if (sg_zero_buffer(req->sg, req->sg_cnt, len, off) != len) {
110                 req->error_loc = offsetof(struct nvme_common_command, dptr);
111                 return NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR;
112         }
113         return 0;
114 }
115
116 static u32 nvmet_max_nsid(struct nvmet_subsys *subsys)
117 {
118         struct nvmet_ns *cur;
119         unsigned long idx;
120         u32 nsid = 0;
121
122         xa_for_each(&subsys->namespaces, idx, cur)
123                 nsid = cur->nsid;
124
125         return nsid;
126 }
127
128 static u32 nvmet_async_event_result(struct nvmet_async_event *aen)
129 {
130         return aen->event_type | (aen->event_info << 8) | (aen->log_page << 16);
131 }
132
133 static void nvmet_async_events_failall(struct nvmet_ctrl *ctrl)
134 {
135         struct nvmet_req *req;
136
137         mutex_lock(&ctrl->lock);
138         while (ctrl->nr_async_event_cmds) {
139                 req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds];
140                 mutex_unlock(&ctrl->lock);
141                 nvmet_req_complete(req, NVME_SC_INTERNAL | NVME_SC_DNR);
142                 mutex_lock(&ctrl->lock);
143         }
144         mutex_unlock(&ctrl->lock);
145 }
146
147 static void nvmet_async_events_process(struct nvmet_ctrl *ctrl)
148 {
149         struct nvmet_async_event *aen;
150         struct nvmet_req *req;
151
152         mutex_lock(&ctrl->lock);
153         while (ctrl->nr_async_event_cmds && !list_empty(&ctrl->async_events)) {
154                 aen = list_first_entry(&ctrl->async_events,
155                                        struct nvmet_async_event, entry);
156                 req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds];
157                 nvmet_set_result(req, nvmet_async_event_result(aen));
158
159                 list_del(&aen->entry);
160                 kfree(aen);
161
162                 mutex_unlock(&ctrl->lock);
163                 trace_nvmet_async_event(ctrl, req->cqe->result.u32);
164                 nvmet_req_complete(req, 0);
165                 mutex_lock(&ctrl->lock);
166         }
167         mutex_unlock(&ctrl->lock);
168 }
169
170 static void nvmet_async_events_free(struct nvmet_ctrl *ctrl)
171 {
172         struct nvmet_async_event *aen, *tmp;
173
174         mutex_lock(&ctrl->lock);
175         list_for_each_entry_safe(aen, tmp, &ctrl->async_events, entry) {
176                 list_del(&aen->entry);
177                 kfree(aen);
178         }
179         mutex_unlock(&ctrl->lock);
180 }
181
182 static void nvmet_async_event_work(struct work_struct *work)
183 {
184         struct nvmet_ctrl *ctrl =
185                 container_of(work, struct nvmet_ctrl, async_event_work);
186
187         nvmet_async_events_process(ctrl);
188 }
189
190 void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type,
191                 u8 event_info, u8 log_page)
192 {
193         struct nvmet_async_event *aen;
194
195         aen = kmalloc(sizeof(*aen), GFP_KERNEL);
196         if (!aen)
197                 return;
198
199         aen->event_type = event_type;
200         aen->event_info = event_info;
201         aen->log_page = log_page;
202
203         mutex_lock(&ctrl->lock);
204         list_add_tail(&aen->entry, &ctrl->async_events);
205         mutex_unlock(&ctrl->lock);
206
207         schedule_work(&ctrl->async_event_work);
208 }
209
210 static void nvmet_add_to_changed_ns_log(struct nvmet_ctrl *ctrl, __le32 nsid)
211 {
212         u32 i;
213
214         mutex_lock(&ctrl->lock);
215         if (ctrl->nr_changed_ns > NVME_MAX_CHANGED_NAMESPACES)
216                 goto out_unlock;
217
218         for (i = 0; i < ctrl->nr_changed_ns; i++) {
219                 if (ctrl->changed_ns_list[i] == nsid)
220                         goto out_unlock;
221         }
222
223         if (ctrl->nr_changed_ns == NVME_MAX_CHANGED_NAMESPACES) {
224                 ctrl->changed_ns_list[0] = cpu_to_le32(0xffffffff);
225                 ctrl->nr_changed_ns = U32_MAX;
226                 goto out_unlock;
227         }
228
229         ctrl->changed_ns_list[ctrl->nr_changed_ns++] = nsid;
230 out_unlock:
231         mutex_unlock(&ctrl->lock);
232 }
233
234 void nvmet_ns_changed(struct nvmet_subsys *subsys, u32 nsid)
235 {
236         struct nvmet_ctrl *ctrl;
237
238         lockdep_assert_held(&subsys->lock);
239
240         list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
241                 nvmet_add_to_changed_ns_log(ctrl, cpu_to_le32(nsid));
242                 if (nvmet_aen_bit_disabled(ctrl, NVME_AEN_BIT_NS_ATTR))
243                         continue;
244                 nvmet_add_async_event(ctrl, NVME_AER_TYPE_NOTICE,
245                                 NVME_AER_NOTICE_NS_CHANGED,
246                                 NVME_LOG_CHANGED_NS);
247         }
248 }
249
250 void nvmet_send_ana_event(struct nvmet_subsys *subsys,
251                 struct nvmet_port *port)
252 {
253         struct nvmet_ctrl *ctrl;
254
255         mutex_lock(&subsys->lock);
256         list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
257                 if (port && ctrl->port != port)
258                         continue;
259                 if (nvmet_aen_bit_disabled(ctrl, NVME_AEN_BIT_ANA_CHANGE))
260                         continue;
261                 nvmet_add_async_event(ctrl, NVME_AER_TYPE_NOTICE,
262                                 NVME_AER_NOTICE_ANA, NVME_LOG_ANA);
263         }
264         mutex_unlock(&subsys->lock);
265 }
266
267 void nvmet_port_send_ana_event(struct nvmet_port *port)
268 {
269         struct nvmet_subsys_link *p;
270
271         down_read(&nvmet_config_sem);
272         list_for_each_entry(p, &port->subsystems, entry)
273                 nvmet_send_ana_event(p->subsys, port);
274         up_read(&nvmet_config_sem);
275 }
276
277 int nvmet_register_transport(const struct nvmet_fabrics_ops *ops)
278 {
279         int ret = 0;
280
281         down_write(&nvmet_config_sem);
282         if (nvmet_transports[ops->type])
283                 ret = -EINVAL;
284         else
285                 nvmet_transports[ops->type] = ops;
286         up_write(&nvmet_config_sem);
287
288         return ret;
289 }
290 EXPORT_SYMBOL_GPL(nvmet_register_transport);
291
292 void nvmet_unregister_transport(const struct nvmet_fabrics_ops *ops)
293 {
294         down_write(&nvmet_config_sem);
295         nvmet_transports[ops->type] = NULL;
296         up_write(&nvmet_config_sem);
297 }
298 EXPORT_SYMBOL_GPL(nvmet_unregister_transport);
299
300 void nvmet_port_del_ctrls(struct nvmet_port *port, struct nvmet_subsys *subsys)
301 {
302         struct nvmet_ctrl *ctrl;
303
304         mutex_lock(&subsys->lock);
305         list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
306                 if (ctrl->port == port)
307                         ctrl->ops->delete_ctrl(ctrl);
308         }
309         mutex_unlock(&subsys->lock);
310 }
311
312 int nvmet_enable_port(struct nvmet_port *port)
313 {
314         const struct nvmet_fabrics_ops *ops;
315         int ret;
316
317         lockdep_assert_held(&nvmet_config_sem);
318
319         ops = nvmet_transports[port->disc_addr.trtype];
320         if (!ops) {
321                 up_write(&nvmet_config_sem);
322                 request_module("nvmet-transport-%d", port->disc_addr.trtype);
323                 down_write(&nvmet_config_sem);
324                 ops = nvmet_transports[port->disc_addr.trtype];
325                 if (!ops) {
326                         pr_err("transport type %d not supported\n",
327                                 port->disc_addr.trtype);
328                         return -EINVAL;
329                 }
330         }
331
332         if (!try_module_get(ops->owner))
333                 return -EINVAL;
334
335         /*
336          * If the user requested PI support and the transport isn't pi capable,
337          * don't enable the port.
338          */
339         if (port->pi_enable && !(ops->flags & NVMF_METADATA_SUPPORTED)) {
340                 pr_err("T10-PI is not supported by transport type %d\n",
341                        port->disc_addr.trtype);
342                 ret = -EINVAL;
343                 goto out_put;
344         }
345
346         ret = ops->add_port(port);
347         if (ret)
348                 goto out_put;
349
350         /* If the transport didn't set inline_data_size, then disable it. */
351         if (port->inline_data_size < 0)
352                 port->inline_data_size = 0;
353
354         port->enabled = true;
355         port->tr_ops = ops;
356         return 0;
357
358 out_put:
359         module_put(ops->owner);
360         return ret;
361 }
362
363 void nvmet_disable_port(struct nvmet_port *port)
364 {
365         const struct nvmet_fabrics_ops *ops;
366
367         lockdep_assert_held(&nvmet_config_sem);
368
369         port->enabled = false;
370         port->tr_ops = NULL;
371
372         ops = nvmet_transports[port->disc_addr.trtype];
373         ops->remove_port(port);
374         module_put(ops->owner);
375 }
376
377 static void nvmet_keep_alive_timer(struct work_struct *work)
378 {
379         struct nvmet_ctrl *ctrl = container_of(to_delayed_work(work),
380                         struct nvmet_ctrl, ka_work);
381         bool cmd_seen = ctrl->cmd_seen;
382
383         ctrl->cmd_seen = false;
384         if (cmd_seen) {
385                 pr_debug("ctrl %d reschedule traffic based keep-alive timer\n",
386                         ctrl->cntlid);
387                 schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
388                 return;
389         }
390
391         pr_err("ctrl %d keep-alive timer (%d seconds) expired!\n",
392                 ctrl->cntlid, ctrl->kato);
393
394         nvmet_ctrl_fatal_error(ctrl);
395 }
396
397 void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl)
398 {
399         if (unlikely(ctrl->kato == 0))
400                 return;
401
402         pr_debug("ctrl %d start keep-alive timer for %d secs\n",
403                 ctrl->cntlid, ctrl->kato);
404
405         schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
406 }
407
408 void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl)
409 {
410         if (unlikely(ctrl->kato == 0))
411                 return;
412
413         pr_debug("ctrl %d stop keep-alive\n", ctrl->cntlid);
414
415         cancel_delayed_work_sync(&ctrl->ka_work);
416 }
417
418 u16 nvmet_req_find_ns(struct nvmet_req *req)
419 {
420         u32 nsid = le32_to_cpu(req->cmd->common.nsid);
421
422         req->ns = xa_load(&nvmet_req_subsys(req)->namespaces, nsid);
423         if (unlikely(!req->ns)) {
424                 req->error_loc = offsetof(struct nvme_common_command, nsid);
425                 return NVME_SC_INVALID_NS | NVME_SC_DNR;
426         }
427
428         percpu_ref_get(&req->ns->ref);
429         return NVME_SC_SUCCESS;
430 }
431
432 static void nvmet_destroy_namespace(struct percpu_ref *ref)
433 {
434         struct nvmet_ns *ns = container_of(ref, struct nvmet_ns, ref);
435
436         complete(&ns->disable_done);
437 }
438
439 void nvmet_put_namespace(struct nvmet_ns *ns)
440 {
441         percpu_ref_put(&ns->ref);
442 }
443
444 static void nvmet_ns_dev_disable(struct nvmet_ns *ns)
445 {
446         nvmet_bdev_ns_disable(ns);
447         nvmet_file_ns_disable(ns);
448 }
449
450 static int nvmet_p2pmem_ns_enable(struct nvmet_ns *ns)
451 {
452         int ret;
453         struct pci_dev *p2p_dev;
454
455         if (!ns->use_p2pmem)
456                 return 0;
457
458         if (!ns->bdev) {
459                 pr_err("peer-to-peer DMA is not supported by non-block device namespaces\n");
460                 return -EINVAL;
461         }
462
463         if (!blk_queue_pci_p2pdma(ns->bdev->bd_disk->queue)) {
464                 pr_err("peer-to-peer DMA is not supported by the driver of %s\n",
465                        ns->device_path);
466                 return -EINVAL;
467         }
468
469         if (ns->p2p_dev) {
470                 ret = pci_p2pdma_distance(ns->p2p_dev, nvmet_ns_dev(ns), true);
471                 if (ret < 0)
472                         return -EINVAL;
473         } else {
474                 /*
475                  * Right now we just check that there is p2pmem available so
476                  * we can report an error to the user right away if there
477                  * is not. We'll find the actual device to use once we
478                  * setup the controller when the port's device is available.
479                  */
480
481                 p2p_dev = pci_p2pmem_find(nvmet_ns_dev(ns));
482                 if (!p2p_dev) {
483                         pr_err("no peer-to-peer memory is available for %s\n",
484                                ns->device_path);
485                         return -EINVAL;
486                 }
487
488                 pci_dev_put(p2p_dev);
489         }
490
491         return 0;
492 }
493
494 /*
495  * Note: ctrl->subsys->lock should be held when calling this function
496  */
497 static void nvmet_p2pmem_ns_add_p2p(struct nvmet_ctrl *ctrl,
498                                     struct nvmet_ns *ns)
499 {
500         struct device *clients[2];
501         struct pci_dev *p2p_dev;
502         int ret;
503
504         if (!ctrl->p2p_client || !ns->use_p2pmem)
505                 return;
506
507         if (ns->p2p_dev) {
508                 ret = pci_p2pdma_distance(ns->p2p_dev, ctrl->p2p_client, true);
509                 if (ret < 0)
510                         return;
511
512                 p2p_dev = pci_dev_get(ns->p2p_dev);
513         } else {
514                 clients[0] = ctrl->p2p_client;
515                 clients[1] = nvmet_ns_dev(ns);
516
517                 p2p_dev = pci_p2pmem_find_many(clients, ARRAY_SIZE(clients));
518                 if (!p2p_dev) {
519                         pr_err("no peer-to-peer memory is available that's supported by %s and %s\n",
520                                dev_name(ctrl->p2p_client), ns->device_path);
521                         return;
522                 }
523         }
524
525         ret = radix_tree_insert(&ctrl->p2p_ns_map, ns->nsid, p2p_dev);
526         if (ret < 0)
527                 pci_dev_put(p2p_dev);
528
529         pr_info("using p2pmem on %s for nsid %d\n", pci_name(p2p_dev),
530                 ns->nsid);
531 }
532
533 void nvmet_ns_revalidate(struct nvmet_ns *ns)
534 {
535         loff_t oldsize = ns->size;
536
537         if (ns->bdev)
538                 nvmet_bdev_ns_revalidate(ns);
539         else
540                 nvmet_file_ns_revalidate(ns);
541
542         if (oldsize != ns->size)
543                 nvmet_ns_changed(ns->subsys, ns->nsid);
544 }
545
546 int nvmet_ns_enable(struct nvmet_ns *ns)
547 {
548         struct nvmet_subsys *subsys = ns->subsys;
549         struct nvmet_ctrl *ctrl;
550         int ret;
551
552         mutex_lock(&subsys->lock);
553         ret = 0;
554
555         if (nvmet_passthru_ctrl(subsys)) {
556                 pr_info("cannot enable both passthru and regular namespaces for a single subsystem");
557                 goto out_unlock;
558         }
559
560         if (ns->enabled)
561                 goto out_unlock;
562
563         ret = -EMFILE;
564         if (subsys->nr_namespaces == NVMET_MAX_NAMESPACES)
565                 goto out_unlock;
566
567         ret = nvmet_bdev_ns_enable(ns);
568         if (ret == -ENOTBLK)
569                 ret = nvmet_file_ns_enable(ns);
570         if (ret)
571                 goto out_unlock;
572
573         ret = nvmet_p2pmem_ns_enable(ns);
574         if (ret)
575                 goto out_dev_disable;
576
577         list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
578                 nvmet_p2pmem_ns_add_p2p(ctrl, ns);
579
580         ret = percpu_ref_init(&ns->ref, nvmet_destroy_namespace,
581                                 0, GFP_KERNEL);
582         if (ret)
583                 goto out_dev_put;
584
585         if (ns->nsid > subsys->max_nsid)
586                 subsys->max_nsid = ns->nsid;
587
588         ret = xa_insert(&subsys->namespaces, ns->nsid, ns, GFP_KERNEL);
589         if (ret)
590                 goto out_restore_subsys_maxnsid;
591
592         subsys->nr_namespaces++;
593
594         nvmet_ns_changed(subsys, ns->nsid);
595         ns->enabled = true;
596         ret = 0;
597 out_unlock:
598         mutex_unlock(&subsys->lock);
599         return ret;
600
601 out_restore_subsys_maxnsid:
602         subsys->max_nsid = nvmet_max_nsid(subsys);
603         percpu_ref_exit(&ns->ref);
604 out_dev_put:
605         list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
606                 pci_dev_put(radix_tree_delete(&ctrl->p2p_ns_map, ns->nsid));
607 out_dev_disable:
608         nvmet_ns_dev_disable(ns);
609         goto out_unlock;
610 }
611
612 void nvmet_ns_disable(struct nvmet_ns *ns)
613 {
614         struct nvmet_subsys *subsys = ns->subsys;
615         struct nvmet_ctrl *ctrl;
616
617         mutex_lock(&subsys->lock);
618         if (!ns->enabled)
619                 goto out_unlock;
620
621         ns->enabled = false;
622         xa_erase(&ns->subsys->namespaces, ns->nsid);
623         if (ns->nsid == subsys->max_nsid)
624                 subsys->max_nsid = nvmet_max_nsid(subsys);
625
626         list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
627                 pci_dev_put(radix_tree_delete(&ctrl->p2p_ns_map, ns->nsid));
628
629         mutex_unlock(&subsys->lock);
630
631         /*
632          * Now that we removed the namespaces from the lookup list, we
633          * can kill the per_cpu ref and wait for any remaining references
634          * to be dropped, as well as a RCU grace period for anyone only
635          * using the namepace under rcu_read_lock().  Note that we can't
636          * use call_rcu here as we need to ensure the namespaces have
637          * been fully destroyed before unloading the module.
638          */
639         percpu_ref_kill(&ns->ref);
640         synchronize_rcu();
641         wait_for_completion(&ns->disable_done);
642         percpu_ref_exit(&ns->ref);
643
644         mutex_lock(&subsys->lock);
645
646         subsys->nr_namespaces--;
647         nvmet_ns_changed(subsys, ns->nsid);
648         nvmet_ns_dev_disable(ns);
649 out_unlock:
650         mutex_unlock(&subsys->lock);
651 }
652
653 void nvmet_ns_free(struct nvmet_ns *ns)
654 {
655         nvmet_ns_disable(ns);
656
657         down_write(&nvmet_ana_sem);
658         nvmet_ana_group_enabled[ns->anagrpid]--;
659         up_write(&nvmet_ana_sem);
660
661         kfree(ns->device_path);
662         kfree(ns);
663 }
664
665 struct nvmet_ns *nvmet_ns_alloc(struct nvmet_subsys *subsys, u32 nsid)
666 {
667         struct nvmet_ns *ns;
668
669         ns = kzalloc(sizeof(*ns), GFP_KERNEL);
670         if (!ns)
671                 return NULL;
672
673         init_completion(&ns->disable_done);
674
675         ns->nsid = nsid;
676         ns->subsys = subsys;
677
678         down_write(&nvmet_ana_sem);
679         ns->anagrpid = NVMET_DEFAULT_ANA_GRPID;
680         nvmet_ana_group_enabled[ns->anagrpid]++;
681         up_write(&nvmet_ana_sem);
682
683         uuid_gen(&ns->uuid);
684         ns->buffered_io = false;
685
686         return ns;
687 }
688
689 static void nvmet_update_sq_head(struct nvmet_req *req)
690 {
691         if (req->sq->size) {
692                 u32 old_sqhd, new_sqhd;
693
694                 do {
695                         old_sqhd = req->sq->sqhd;
696                         new_sqhd = (old_sqhd + 1) % req->sq->size;
697                 } while (cmpxchg(&req->sq->sqhd, old_sqhd, new_sqhd) !=
698                                         old_sqhd);
699         }
700         req->cqe->sq_head = cpu_to_le16(req->sq->sqhd & 0x0000FFFF);
701 }
702
703 static void nvmet_set_error(struct nvmet_req *req, u16 status)
704 {
705         struct nvmet_ctrl *ctrl = req->sq->ctrl;
706         struct nvme_error_slot *new_error_slot;
707         unsigned long flags;
708
709         req->cqe->status = cpu_to_le16(status << 1);
710
711         if (!ctrl || req->error_loc == NVMET_NO_ERROR_LOC)
712                 return;
713
714         spin_lock_irqsave(&ctrl->error_lock, flags);
715         ctrl->err_counter++;
716         new_error_slot =
717                 &ctrl->slots[ctrl->err_counter % NVMET_ERROR_LOG_SLOTS];
718
719         new_error_slot->error_count = cpu_to_le64(ctrl->err_counter);
720         new_error_slot->sqid = cpu_to_le16(req->sq->qid);
721         new_error_slot->cmdid = cpu_to_le16(req->cmd->common.command_id);
722         new_error_slot->status_field = cpu_to_le16(status << 1);
723         new_error_slot->param_error_location = cpu_to_le16(req->error_loc);
724         new_error_slot->lba = cpu_to_le64(req->error_slba);
725         new_error_slot->nsid = req->cmd->common.nsid;
726         spin_unlock_irqrestore(&ctrl->error_lock, flags);
727
728         /* set the more bit for this request */
729         req->cqe->status |= cpu_to_le16(1 << 14);
730 }
731
732 static void __nvmet_req_complete(struct nvmet_req *req, u16 status)
733 {
734         if (!req->sq->sqhd_disabled)
735                 nvmet_update_sq_head(req);
736         req->cqe->sq_id = cpu_to_le16(req->sq->qid);
737         req->cqe->command_id = req->cmd->common.command_id;
738
739         if (unlikely(status))
740                 nvmet_set_error(req, status);
741
742         trace_nvmet_req_complete(req);
743
744         if (req->ns)
745                 nvmet_put_namespace(req->ns);
746         req->ops->queue_response(req);
747 }
748
749 void nvmet_req_complete(struct nvmet_req *req, u16 status)
750 {
751         __nvmet_req_complete(req, status);
752         percpu_ref_put(&req->sq->ref);
753 }
754 EXPORT_SYMBOL_GPL(nvmet_req_complete);
755
756 void nvmet_cq_setup(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq,
757                 u16 qid, u16 size)
758 {
759         cq->qid = qid;
760         cq->size = size;
761 }
762
763 void nvmet_sq_setup(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq,
764                 u16 qid, u16 size)
765 {
766         sq->sqhd = 0;
767         sq->qid = qid;
768         sq->size = size;
769
770         ctrl->sqs[qid] = sq;
771 }
772
773 static void nvmet_confirm_sq(struct percpu_ref *ref)
774 {
775         struct nvmet_sq *sq = container_of(ref, struct nvmet_sq, ref);
776
777         complete(&sq->confirm_done);
778 }
779
780 void nvmet_sq_destroy(struct nvmet_sq *sq)
781 {
782         struct nvmet_ctrl *ctrl = sq->ctrl;
783
784         /*
785          * If this is the admin queue, complete all AERs so that our
786          * queue doesn't have outstanding requests on it.
787          */
788         if (ctrl && ctrl->sqs && ctrl->sqs[0] == sq)
789                 nvmet_async_events_failall(ctrl);
790         percpu_ref_kill_and_confirm(&sq->ref, nvmet_confirm_sq);
791         wait_for_completion(&sq->confirm_done);
792         wait_for_completion(&sq->free_done);
793         percpu_ref_exit(&sq->ref);
794
795         if (ctrl) {
796                 nvmet_ctrl_put(ctrl);
797                 sq->ctrl = NULL; /* allows reusing the queue later */
798         }
799 }
800 EXPORT_SYMBOL_GPL(nvmet_sq_destroy);
801
802 static void nvmet_sq_free(struct percpu_ref *ref)
803 {
804         struct nvmet_sq *sq = container_of(ref, struct nvmet_sq, ref);
805
806         complete(&sq->free_done);
807 }
808
809 int nvmet_sq_init(struct nvmet_sq *sq)
810 {
811         int ret;
812
813         ret = percpu_ref_init(&sq->ref, nvmet_sq_free, 0, GFP_KERNEL);
814         if (ret) {
815                 pr_err("percpu_ref init failed!\n");
816                 return ret;
817         }
818         init_completion(&sq->free_done);
819         init_completion(&sq->confirm_done);
820
821         return 0;
822 }
823 EXPORT_SYMBOL_GPL(nvmet_sq_init);
824
825 static inline u16 nvmet_check_ana_state(struct nvmet_port *port,
826                 struct nvmet_ns *ns)
827 {
828         enum nvme_ana_state state = port->ana_state[ns->anagrpid];
829
830         if (unlikely(state == NVME_ANA_INACCESSIBLE))
831                 return NVME_SC_ANA_INACCESSIBLE;
832         if (unlikely(state == NVME_ANA_PERSISTENT_LOSS))
833                 return NVME_SC_ANA_PERSISTENT_LOSS;
834         if (unlikely(state == NVME_ANA_CHANGE))
835                 return NVME_SC_ANA_TRANSITION;
836         return 0;
837 }
838
839 static inline u16 nvmet_io_cmd_check_access(struct nvmet_req *req)
840 {
841         if (unlikely(req->ns->readonly)) {
842                 switch (req->cmd->common.opcode) {
843                 case nvme_cmd_read:
844                 case nvme_cmd_flush:
845                         break;
846                 default:
847                         return NVME_SC_NS_WRITE_PROTECTED;
848                 }
849         }
850
851         return 0;
852 }
853
854 static u16 nvmet_parse_io_cmd(struct nvmet_req *req)
855 {
856         u16 ret;
857
858         ret = nvmet_check_ctrl_status(req);
859         if (unlikely(ret))
860                 return ret;
861
862         if (nvmet_req_passthru_ctrl(req))
863                 return nvmet_parse_passthru_io_cmd(req);
864
865         ret = nvmet_req_find_ns(req);
866         if (unlikely(ret))
867                 return ret;
868
869         ret = nvmet_check_ana_state(req->port, req->ns);
870         if (unlikely(ret)) {
871                 req->error_loc = offsetof(struct nvme_common_command, nsid);
872                 return ret;
873         }
874         ret = nvmet_io_cmd_check_access(req);
875         if (unlikely(ret)) {
876                 req->error_loc = offsetof(struct nvme_common_command, nsid);
877                 return ret;
878         }
879
880         if (req->ns->file)
881                 return nvmet_file_parse_io_cmd(req);
882
883         return nvmet_bdev_parse_io_cmd(req);
884 }
885
886 bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
887                 struct nvmet_sq *sq, const struct nvmet_fabrics_ops *ops)
888 {
889         u8 flags = req->cmd->common.flags;
890         u16 status;
891
892         req->cq = cq;
893         req->sq = sq;
894         req->ops = ops;
895         req->sg = NULL;
896         req->metadata_sg = NULL;
897         req->sg_cnt = 0;
898         req->metadata_sg_cnt = 0;
899         req->transfer_len = 0;
900         req->metadata_len = 0;
901         req->cqe->status = 0;
902         req->cqe->sq_head = 0;
903         req->ns = NULL;
904         req->error_loc = NVMET_NO_ERROR_LOC;
905         req->error_slba = 0;
906
907         /* no support for fused commands yet */
908         if (unlikely(flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND))) {
909                 req->error_loc = offsetof(struct nvme_common_command, flags);
910                 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
911                 goto fail;
912         }
913
914         /*
915          * For fabrics, PSDT field shall describe metadata pointer (MPTR) that
916          * contains an address of a single contiguous physical buffer that is
917          * byte aligned.
918          */
919         if (unlikely((flags & NVME_CMD_SGL_ALL) != NVME_CMD_SGL_METABUF)) {
920                 req->error_loc = offsetof(struct nvme_common_command, flags);
921                 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
922                 goto fail;
923         }
924
925         if (unlikely(!req->sq->ctrl))
926                 /* will return an error for any non-connect command: */
927                 status = nvmet_parse_connect_cmd(req);
928         else if (likely(req->sq->qid != 0))
929                 status = nvmet_parse_io_cmd(req);
930         else
931                 status = nvmet_parse_admin_cmd(req);
932
933         if (status)
934                 goto fail;
935
936         trace_nvmet_req_init(req, req->cmd);
937
938         if (unlikely(!percpu_ref_tryget_live(&sq->ref))) {
939                 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
940                 goto fail;
941         }
942
943         if (sq->ctrl)
944                 sq->ctrl->cmd_seen = true;
945
946         return true;
947
948 fail:
949         __nvmet_req_complete(req, status);
950         return false;
951 }
952 EXPORT_SYMBOL_GPL(nvmet_req_init);
953
954 void nvmet_req_uninit(struct nvmet_req *req)
955 {
956         percpu_ref_put(&req->sq->ref);
957         if (req->ns)
958                 nvmet_put_namespace(req->ns);
959 }
960 EXPORT_SYMBOL_GPL(nvmet_req_uninit);
961
962 bool nvmet_check_transfer_len(struct nvmet_req *req, size_t len)
963 {
964         if (unlikely(len != req->transfer_len)) {
965                 req->error_loc = offsetof(struct nvme_common_command, dptr);
966                 nvmet_req_complete(req, NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR);
967                 return false;
968         }
969
970         return true;
971 }
972 EXPORT_SYMBOL_GPL(nvmet_check_transfer_len);
973
974 bool nvmet_check_data_len_lte(struct nvmet_req *req, size_t data_len)
975 {
976         if (unlikely(data_len > req->transfer_len)) {
977                 req->error_loc = offsetof(struct nvme_common_command, dptr);
978                 nvmet_req_complete(req, NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR);
979                 return false;
980         }
981
982         return true;
983 }
984
985 static unsigned int nvmet_data_transfer_len(struct nvmet_req *req)
986 {
987         return req->transfer_len - req->metadata_len;
988 }
989
990 static int nvmet_req_alloc_p2pmem_sgls(struct nvmet_req *req)
991 {
992         req->sg = pci_p2pmem_alloc_sgl(req->p2p_dev, &req->sg_cnt,
993                         nvmet_data_transfer_len(req));
994         if (!req->sg)
995                 goto out_err;
996
997         if (req->metadata_len) {
998                 req->metadata_sg = pci_p2pmem_alloc_sgl(req->p2p_dev,
999                                 &req->metadata_sg_cnt, req->metadata_len);
1000                 if (!req->metadata_sg)
1001                         goto out_free_sg;
1002         }
1003         return 0;
1004 out_free_sg:
1005         pci_p2pmem_free_sgl(req->p2p_dev, req->sg);
1006 out_err:
1007         return -ENOMEM;
1008 }
1009
1010 static bool nvmet_req_find_p2p_dev(struct nvmet_req *req)
1011 {
1012         if (!IS_ENABLED(CONFIG_PCI_P2PDMA))
1013                 return false;
1014
1015         if (req->sq->ctrl && req->sq->qid && req->ns) {
1016                 req->p2p_dev = radix_tree_lookup(&req->sq->ctrl->p2p_ns_map,
1017                                                  req->ns->nsid);
1018                 if (req->p2p_dev)
1019                         return true;
1020         }
1021
1022         req->p2p_dev = NULL;
1023         return false;
1024 }
1025
1026 int nvmet_req_alloc_sgls(struct nvmet_req *req)
1027 {
1028         if (nvmet_req_find_p2p_dev(req) && !nvmet_req_alloc_p2pmem_sgls(req))
1029                 return 0;
1030
1031         req->sg = sgl_alloc(nvmet_data_transfer_len(req), GFP_KERNEL,
1032                             &req->sg_cnt);
1033         if (unlikely(!req->sg))
1034                 goto out;
1035
1036         if (req->metadata_len) {
1037                 req->metadata_sg = sgl_alloc(req->metadata_len, GFP_KERNEL,
1038                                              &req->metadata_sg_cnt);
1039                 if (unlikely(!req->metadata_sg))
1040                         goto out_free;
1041         }
1042
1043         return 0;
1044 out_free:
1045         sgl_free(req->sg);
1046 out:
1047         return -ENOMEM;
1048 }
1049 EXPORT_SYMBOL_GPL(nvmet_req_alloc_sgls);
1050
1051 void nvmet_req_free_sgls(struct nvmet_req *req)
1052 {
1053         if (req->p2p_dev) {
1054                 pci_p2pmem_free_sgl(req->p2p_dev, req->sg);
1055                 if (req->metadata_sg)
1056                         pci_p2pmem_free_sgl(req->p2p_dev, req->metadata_sg);
1057         } else {
1058                 sgl_free(req->sg);
1059                 if (req->metadata_sg)
1060                         sgl_free(req->metadata_sg);
1061         }
1062
1063         req->sg = NULL;
1064         req->metadata_sg = NULL;
1065         req->sg_cnt = 0;
1066         req->metadata_sg_cnt = 0;
1067 }
1068 EXPORT_SYMBOL_GPL(nvmet_req_free_sgls);
1069
1070 static inline bool nvmet_cc_en(u32 cc)
1071 {
1072         return (cc >> NVME_CC_EN_SHIFT) & 0x1;
1073 }
1074
1075 static inline u8 nvmet_cc_css(u32 cc)
1076 {
1077         return (cc >> NVME_CC_CSS_SHIFT) & 0x7;
1078 }
1079
1080 static inline u8 nvmet_cc_mps(u32 cc)
1081 {
1082         return (cc >> NVME_CC_MPS_SHIFT) & 0xf;
1083 }
1084
1085 static inline u8 nvmet_cc_ams(u32 cc)
1086 {
1087         return (cc >> NVME_CC_AMS_SHIFT) & 0x7;
1088 }
1089
1090 static inline u8 nvmet_cc_shn(u32 cc)
1091 {
1092         return (cc >> NVME_CC_SHN_SHIFT) & 0x3;
1093 }
1094
1095 static inline u8 nvmet_cc_iosqes(u32 cc)
1096 {
1097         return (cc >> NVME_CC_IOSQES_SHIFT) & 0xf;
1098 }
1099
1100 static inline u8 nvmet_cc_iocqes(u32 cc)
1101 {
1102         return (cc >> NVME_CC_IOCQES_SHIFT) & 0xf;
1103 }
1104
1105 static void nvmet_start_ctrl(struct nvmet_ctrl *ctrl)
1106 {
1107         lockdep_assert_held(&ctrl->lock);
1108
1109         /*
1110          * Only I/O controllers should verify iosqes,iocqes.
1111          * Strictly speaking, the spec says a discovery controller
1112          * should verify iosqes,iocqes are zeroed, however that
1113          * would break backwards compatibility, so don't enforce it.
1114          */
1115         if (ctrl->subsys->type != NVME_NQN_DISC &&
1116             (nvmet_cc_iosqes(ctrl->cc) != NVME_NVM_IOSQES ||
1117              nvmet_cc_iocqes(ctrl->cc) != NVME_NVM_IOCQES)) {
1118                 ctrl->csts = NVME_CSTS_CFS;
1119                 return;
1120         }
1121
1122         if (nvmet_cc_mps(ctrl->cc) != 0 ||
1123             nvmet_cc_ams(ctrl->cc) != 0 ||
1124             nvmet_cc_css(ctrl->cc) != 0) {
1125                 ctrl->csts = NVME_CSTS_CFS;
1126                 return;
1127         }
1128
1129         ctrl->csts = NVME_CSTS_RDY;
1130
1131         /*
1132          * Controllers that are not yet enabled should not really enforce the
1133          * keep alive timeout, but we still want to track a timeout and cleanup
1134          * in case a host died before it enabled the controller.  Hence, simply
1135          * reset the keep alive timer when the controller is enabled.
1136          */
1137         if (ctrl->kato)
1138                 mod_delayed_work(system_wq, &ctrl->ka_work, ctrl->kato * HZ);
1139 }
1140
1141 static void nvmet_clear_ctrl(struct nvmet_ctrl *ctrl)
1142 {
1143         lockdep_assert_held(&ctrl->lock);
1144
1145         /* XXX: tear down queues? */
1146         ctrl->csts &= ~NVME_CSTS_RDY;
1147         ctrl->cc = 0;
1148 }
1149
1150 void nvmet_update_cc(struct nvmet_ctrl *ctrl, u32 new)
1151 {
1152         u32 old;
1153
1154         mutex_lock(&ctrl->lock);
1155         old = ctrl->cc;
1156         ctrl->cc = new;
1157
1158         if (nvmet_cc_en(new) && !nvmet_cc_en(old))
1159                 nvmet_start_ctrl(ctrl);
1160         if (!nvmet_cc_en(new) && nvmet_cc_en(old))
1161                 nvmet_clear_ctrl(ctrl);
1162         if (nvmet_cc_shn(new) && !nvmet_cc_shn(old)) {
1163                 nvmet_clear_ctrl(ctrl);
1164                 ctrl->csts |= NVME_CSTS_SHST_CMPLT;
1165         }
1166         if (!nvmet_cc_shn(new) && nvmet_cc_shn(old))
1167                 ctrl->csts &= ~NVME_CSTS_SHST_CMPLT;
1168         mutex_unlock(&ctrl->lock);
1169 }
1170
1171 static void nvmet_init_cap(struct nvmet_ctrl *ctrl)
1172 {
1173         /* command sets supported: NVMe command set: */
1174         ctrl->cap = (1ULL << 37);
1175         /* CC.EN timeout in 500msec units: */
1176         ctrl->cap |= (15ULL << 24);
1177         /* maximum queue entries supported: */
1178         ctrl->cap |= NVMET_QUEUE_SIZE - 1;
1179 }
1180
1181 struct nvmet_ctrl *nvmet_ctrl_find_get(const char *subsysnqn,
1182                                        const char *hostnqn, u16 cntlid,
1183                                        struct nvmet_req *req)
1184 {
1185         struct nvmet_ctrl *ctrl = NULL;
1186         struct nvmet_subsys *subsys;
1187
1188         subsys = nvmet_find_get_subsys(req->port, subsysnqn);
1189         if (!subsys) {
1190                 pr_warn("connect request for invalid subsystem %s!\n",
1191                         subsysnqn);
1192                 req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(subsysnqn);
1193                 goto out;
1194         }
1195
1196         mutex_lock(&subsys->lock);
1197         list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
1198                 if (ctrl->cntlid == cntlid) {
1199                         if (strncmp(hostnqn, ctrl->hostnqn, NVMF_NQN_SIZE)) {
1200                                 pr_warn("hostnqn mismatch.\n");
1201                                 continue;
1202                         }
1203                         if (!kref_get_unless_zero(&ctrl->ref))
1204                                 continue;
1205
1206                         /* ctrl found */
1207                         goto found;
1208                 }
1209         }
1210
1211         ctrl = NULL; /* ctrl not found */
1212         pr_warn("could not find controller %d for subsys %s / host %s\n",
1213                 cntlid, subsysnqn, hostnqn);
1214         req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(cntlid);
1215
1216 found:
1217         mutex_unlock(&subsys->lock);
1218         nvmet_subsys_put(subsys);
1219 out:
1220         return ctrl;
1221 }
1222
1223 u16 nvmet_check_ctrl_status(struct nvmet_req *req)
1224 {
1225         if (unlikely(!(req->sq->ctrl->cc & NVME_CC_ENABLE))) {
1226                 pr_err("got cmd %d while CC.EN == 0 on qid = %d\n",
1227                        req->cmd->common.opcode, req->sq->qid);
1228                 return NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
1229         }
1230
1231         if (unlikely(!(req->sq->ctrl->csts & NVME_CSTS_RDY))) {
1232                 pr_err("got cmd %d while CSTS.RDY == 0 on qid = %d\n",
1233                        req->cmd->common.opcode, req->sq->qid);
1234                 return NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
1235         }
1236         return 0;
1237 }
1238
1239 bool nvmet_host_allowed(struct nvmet_subsys *subsys, const char *hostnqn)
1240 {
1241         struct nvmet_host_link *p;
1242
1243         lockdep_assert_held(&nvmet_config_sem);
1244
1245         if (subsys->allow_any_host)
1246                 return true;
1247
1248         if (subsys->type == NVME_NQN_DISC) /* allow all access to disc subsys */
1249                 return true;
1250
1251         list_for_each_entry(p, &subsys->hosts, entry) {
1252                 if (!strcmp(nvmet_host_name(p->host), hostnqn))
1253                         return true;
1254         }
1255
1256         return false;
1257 }
1258
1259 /*
1260  * Note: ctrl->subsys->lock should be held when calling this function
1261  */
1262 static void nvmet_setup_p2p_ns_map(struct nvmet_ctrl *ctrl,
1263                 struct nvmet_req *req)
1264 {
1265         struct nvmet_ns *ns;
1266         unsigned long idx;
1267
1268         if (!req->p2p_client)
1269                 return;
1270
1271         ctrl->p2p_client = get_device(req->p2p_client);
1272
1273         xa_for_each(&ctrl->subsys->namespaces, idx, ns)
1274                 nvmet_p2pmem_ns_add_p2p(ctrl, ns);
1275 }
1276
1277 /*
1278  * Note: ctrl->subsys->lock should be held when calling this function
1279  */
1280 static void nvmet_release_p2p_ns_map(struct nvmet_ctrl *ctrl)
1281 {
1282         struct radix_tree_iter iter;
1283         void __rcu **slot;
1284
1285         radix_tree_for_each_slot(slot, &ctrl->p2p_ns_map, &iter, 0)
1286                 pci_dev_put(radix_tree_deref_slot(slot));
1287
1288         put_device(ctrl->p2p_client);
1289 }
1290
1291 static void nvmet_fatal_error_handler(struct work_struct *work)
1292 {
1293         struct nvmet_ctrl *ctrl =
1294                         container_of(work, struct nvmet_ctrl, fatal_err_work);
1295
1296         pr_err("ctrl %d fatal error occurred!\n", ctrl->cntlid);
1297         ctrl->ops->delete_ctrl(ctrl);
1298 }
1299
1300 u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
1301                 struct nvmet_req *req, u32 kato, struct nvmet_ctrl **ctrlp)
1302 {
1303         struct nvmet_subsys *subsys;
1304         struct nvmet_ctrl *ctrl;
1305         int ret;
1306         u16 status;
1307
1308         status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
1309         subsys = nvmet_find_get_subsys(req->port, subsysnqn);
1310         if (!subsys) {
1311                 pr_warn("connect request for invalid subsystem %s!\n",
1312                         subsysnqn);
1313                 req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(subsysnqn);
1314                 req->error_loc = offsetof(struct nvme_common_command, dptr);
1315                 goto out;
1316         }
1317
1318         down_read(&nvmet_config_sem);
1319         if (!nvmet_host_allowed(subsys, hostnqn)) {
1320                 pr_info("connect by host %s for subsystem %s not allowed\n",
1321                         hostnqn, subsysnqn);
1322                 req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(hostnqn);
1323                 up_read(&nvmet_config_sem);
1324                 status = NVME_SC_CONNECT_INVALID_HOST | NVME_SC_DNR;
1325                 req->error_loc = offsetof(struct nvme_common_command, dptr);
1326                 goto out_put_subsystem;
1327         }
1328         up_read(&nvmet_config_sem);
1329
1330         status = NVME_SC_INTERNAL;
1331         ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
1332         if (!ctrl)
1333                 goto out_put_subsystem;
1334         mutex_init(&ctrl->lock);
1335
1336         nvmet_init_cap(ctrl);
1337
1338         ctrl->port = req->port;
1339
1340         INIT_WORK(&ctrl->async_event_work, nvmet_async_event_work);
1341         INIT_LIST_HEAD(&ctrl->async_events);
1342         INIT_RADIX_TREE(&ctrl->p2p_ns_map, GFP_KERNEL);
1343         INIT_WORK(&ctrl->fatal_err_work, nvmet_fatal_error_handler);
1344         INIT_DELAYED_WORK(&ctrl->ka_work, nvmet_keep_alive_timer);
1345
1346         memcpy(ctrl->subsysnqn, subsysnqn, NVMF_NQN_SIZE);
1347         memcpy(ctrl->hostnqn, hostnqn, NVMF_NQN_SIZE);
1348
1349         kref_init(&ctrl->ref);
1350         ctrl->subsys = subsys;
1351         WRITE_ONCE(ctrl->aen_enabled, NVMET_AEN_CFG_OPTIONAL);
1352
1353         ctrl->changed_ns_list = kmalloc_array(NVME_MAX_CHANGED_NAMESPACES,
1354                         sizeof(__le32), GFP_KERNEL);
1355         if (!ctrl->changed_ns_list)
1356                 goto out_free_ctrl;
1357
1358         ctrl->sqs = kcalloc(subsys->max_qid + 1,
1359                         sizeof(struct nvmet_sq *),
1360                         GFP_KERNEL);
1361         if (!ctrl->sqs)
1362                 goto out_free_changed_ns_list;
1363
1364         if (subsys->cntlid_min > subsys->cntlid_max)
1365                 goto out_free_sqs;
1366
1367         ret = ida_simple_get(&cntlid_ida,
1368                              subsys->cntlid_min, subsys->cntlid_max,
1369                              GFP_KERNEL);
1370         if (ret < 0) {
1371                 status = NVME_SC_CONNECT_CTRL_BUSY | NVME_SC_DNR;
1372                 goto out_free_sqs;
1373         }
1374         ctrl->cntlid = ret;
1375
1376         ctrl->ops = req->ops;
1377
1378         /*
1379          * Discovery controllers may use some arbitrary high value
1380          * in order to cleanup stale discovery sessions
1381          */
1382         if ((ctrl->subsys->type == NVME_NQN_DISC) && !kato)
1383                 kato = NVMET_DISC_KATO_MS;
1384
1385         /* keep-alive timeout in seconds */
1386         ctrl->kato = DIV_ROUND_UP(kato, 1000);
1387
1388         ctrl->err_counter = 0;
1389         spin_lock_init(&ctrl->error_lock);
1390
1391         nvmet_start_keep_alive_timer(ctrl);
1392
1393         mutex_lock(&subsys->lock);
1394         list_add_tail(&ctrl->subsys_entry, &subsys->ctrls);
1395         nvmet_setup_p2p_ns_map(ctrl, req);
1396         mutex_unlock(&subsys->lock);
1397
1398         *ctrlp = ctrl;
1399         return 0;
1400
1401 out_free_sqs:
1402         kfree(ctrl->sqs);
1403 out_free_changed_ns_list:
1404         kfree(ctrl->changed_ns_list);
1405 out_free_ctrl:
1406         kfree(ctrl);
1407 out_put_subsystem:
1408         nvmet_subsys_put(subsys);
1409 out:
1410         return status;
1411 }
1412
1413 static void nvmet_ctrl_free(struct kref *ref)
1414 {
1415         struct nvmet_ctrl *ctrl = container_of(ref, struct nvmet_ctrl, ref);
1416         struct nvmet_subsys *subsys = ctrl->subsys;
1417
1418         mutex_lock(&subsys->lock);
1419         nvmet_release_p2p_ns_map(ctrl);
1420         list_del(&ctrl->subsys_entry);
1421         mutex_unlock(&subsys->lock);
1422
1423         nvmet_stop_keep_alive_timer(ctrl);
1424
1425         flush_work(&ctrl->async_event_work);
1426         cancel_work_sync(&ctrl->fatal_err_work);
1427
1428         ida_simple_remove(&cntlid_ida, ctrl->cntlid);
1429
1430         nvmet_async_events_free(ctrl);
1431         kfree(ctrl->sqs);
1432         kfree(ctrl->changed_ns_list);
1433         kfree(ctrl);
1434
1435         nvmet_subsys_put(subsys);
1436 }
1437
1438 void nvmet_ctrl_put(struct nvmet_ctrl *ctrl)
1439 {
1440         kref_put(&ctrl->ref, nvmet_ctrl_free);
1441 }
1442
1443 void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl)
1444 {
1445         mutex_lock(&ctrl->lock);
1446         if (!(ctrl->csts & NVME_CSTS_CFS)) {
1447                 ctrl->csts |= NVME_CSTS_CFS;
1448                 schedule_work(&ctrl->fatal_err_work);
1449         }
1450         mutex_unlock(&ctrl->lock);
1451 }
1452 EXPORT_SYMBOL_GPL(nvmet_ctrl_fatal_error);
1453
1454 static struct nvmet_subsys *nvmet_find_get_subsys(struct nvmet_port *port,
1455                 const char *subsysnqn)
1456 {
1457         struct nvmet_subsys_link *p;
1458
1459         if (!port)
1460                 return NULL;
1461
1462         if (!strcmp(NVME_DISC_SUBSYS_NAME, subsysnqn)) {
1463                 if (!kref_get_unless_zero(&nvmet_disc_subsys->ref))
1464                         return NULL;
1465                 return nvmet_disc_subsys;
1466         }
1467
1468         down_read(&nvmet_config_sem);
1469         list_for_each_entry(p, &port->subsystems, entry) {
1470                 if (!strncmp(p->subsys->subsysnqn, subsysnqn,
1471                                 NVMF_NQN_SIZE)) {
1472                         if (!kref_get_unless_zero(&p->subsys->ref))
1473                                 break;
1474                         up_read(&nvmet_config_sem);
1475                         return p->subsys;
1476                 }
1477         }
1478         up_read(&nvmet_config_sem);
1479         return NULL;
1480 }
1481
1482 struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
1483                 enum nvme_subsys_type type)
1484 {
1485         struct nvmet_subsys *subsys;
1486         char serial[NVMET_SN_MAX_SIZE / 2];
1487         int ret;
1488
1489         subsys = kzalloc(sizeof(*subsys), GFP_KERNEL);
1490         if (!subsys)
1491                 return ERR_PTR(-ENOMEM);
1492
1493         subsys->ver = NVMET_DEFAULT_VS;
1494         /* generate a random serial number as our controllers are ephemeral: */
1495         get_random_bytes(&serial, sizeof(serial));
1496         bin2hex(subsys->serial, &serial, sizeof(serial));
1497
1498         subsys->model_number = kstrdup(NVMET_DEFAULT_CTRL_MODEL, GFP_KERNEL);
1499         if (!subsys->model_number) {
1500                 ret = -ENOMEM;
1501                 goto free_subsys;
1502         }
1503
1504         switch (type) {
1505         case NVME_NQN_NVME:
1506                 subsys->max_qid = NVMET_NR_QUEUES;
1507                 break;
1508         case NVME_NQN_DISC:
1509                 subsys->max_qid = 0;
1510                 break;
1511         default:
1512                 pr_err("%s: Unknown Subsystem type - %d\n", __func__, type);
1513                 ret = -EINVAL;
1514                 goto free_mn;
1515         }
1516         subsys->type = type;
1517         subsys->subsysnqn = kstrndup(subsysnqn, NVMF_NQN_SIZE,
1518                         GFP_KERNEL);
1519         if (!subsys->subsysnqn) {
1520                 ret = -ENOMEM;
1521                 goto free_mn;
1522         }
1523         subsys->cntlid_min = NVME_CNTLID_MIN;
1524         subsys->cntlid_max = NVME_CNTLID_MAX;
1525         kref_init(&subsys->ref);
1526
1527         mutex_init(&subsys->lock);
1528         xa_init(&subsys->namespaces);
1529         INIT_LIST_HEAD(&subsys->ctrls);
1530         INIT_LIST_HEAD(&subsys->hosts);
1531
1532         return subsys;
1533
1534 free_mn:
1535         kfree(subsys->model_number);
1536 free_subsys:
1537         kfree(subsys);
1538         return ERR_PTR(ret);
1539 }
1540
1541 static void nvmet_subsys_free(struct kref *ref)
1542 {
1543         struct nvmet_subsys *subsys =
1544                 container_of(ref, struct nvmet_subsys, ref);
1545
1546         WARN_ON_ONCE(!xa_empty(&subsys->namespaces));
1547
1548         xa_destroy(&subsys->namespaces);
1549         nvmet_passthru_subsys_free(subsys);
1550
1551         kfree(subsys->subsysnqn);
1552         kfree(subsys->model_number);
1553         kfree(subsys);
1554 }
1555
1556 void nvmet_subsys_del_ctrls(struct nvmet_subsys *subsys)
1557 {
1558         struct nvmet_ctrl *ctrl;
1559
1560         mutex_lock(&subsys->lock);
1561         list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
1562                 ctrl->ops->delete_ctrl(ctrl);
1563         mutex_unlock(&subsys->lock);
1564 }
1565
1566 void nvmet_subsys_put(struct nvmet_subsys *subsys)
1567 {
1568         kref_put(&subsys->ref, nvmet_subsys_free);
1569 }
1570
1571 static int __init nvmet_init(void)
1572 {
1573         int error;
1574
1575         nvmet_ana_group_enabled[NVMET_DEFAULT_ANA_GRPID] = 1;
1576
1577         buffered_io_wq = alloc_workqueue("nvmet-buffered-io-wq",
1578                         WQ_MEM_RECLAIM, 0);
1579         if (!buffered_io_wq) {
1580                 error = -ENOMEM;
1581                 goto out;
1582         }
1583
1584         error = nvmet_init_discovery();
1585         if (error)
1586                 goto out_free_work_queue;
1587
1588         error = nvmet_init_configfs();
1589         if (error)
1590                 goto out_exit_discovery;
1591         return 0;
1592
1593 out_exit_discovery:
1594         nvmet_exit_discovery();
1595 out_free_work_queue:
1596         destroy_workqueue(buffered_io_wq);
1597 out:
1598         return error;
1599 }
1600
1601 static void __exit nvmet_exit(void)
1602 {
1603         nvmet_exit_configfs();
1604         nvmet_exit_discovery();
1605         ida_destroy(&cntlid_ida);
1606         destroy_workqueue(buffered_io_wq);
1607
1608         BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_entry) != 1024);
1609         BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_hdr) != 1024);
1610 }
1611
1612 module_init(nvmet_init);
1613 module_exit(nvmet_exit);
1614
1615 MODULE_LICENSE("GPL v2");