Merge tag 'perf-tools-for-v5.15-2021-09-11' of git://git.kernel.org/pub/scm/linux...
[linux-2.6-microblaze.git] / drivers / nvme / target / configfs.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Configfs interface for the NVMe target.
4  * Copyright (c) 2015-2016 HGST, a Western Digital Company.
5  */
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 #include <linux/kernel.h>
8 #include <linux/module.h>
9 #include <linux/slab.h>
10 #include <linux/stat.h>
11 #include <linux/ctype.h>
12 #include <linux/pci.h>
13 #include <linux/pci-p2pdma.h>
14
15 #include "nvmet.h"
16
17 static const struct config_item_type nvmet_host_type;
18 static const struct config_item_type nvmet_subsys_type;
19
20 static LIST_HEAD(nvmet_ports_list);
21 struct list_head *nvmet_ports = &nvmet_ports_list;
22
23 struct nvmet_type_name_map {
24         u8              type;
25         const char      *name;
26 };
27
28 static struct nvmet_type_name_map nvmet_transport[] = {
29         { NVMF_TRTYPE_RDMA,     "rdma" },
30         { NVMF_TRTYPE_FC,       "fc" },
31         { NVMF_TRTYPE_TCP,      "tcp" },
32         { NVMF_TRTYPE_LOOP,     "loop" },
33 };
34
35 static const struct nvmet_type_name_map nvmet_addr_family[] = {
36         { NVMF_ADDR_FAMILY_PCI,         "pcie" },
37         { NVMF_ADDR_FAMILY_IP4,         "ipv4" },
38         { NVMF_ADDR_FAMILY_IP6,         "ipv6" },
39         { NVMF_ADDR_FAMILY_IB,          "ib" },
40         { NVMF_ADDR_FAMILY_FC,          "fc" },
41         { NVMF_ADDR_FAMILY_LOOP,        "loop" },
42 };
43
44 static bool nvmet_is_port_enabled(struct nvmet_port *p, const char *caller)
45 {
46         if (p->enabled)
47                 pr_err("Disable port '%u' before changing attribute in %s\n",
48                        le16_to_cpu(p->disc_addr.portid), caller);
49         return p->enabled;
50 }
51
52 /*
53  * nvmet_port Generic ConfigFS definitions.
54  * Used in any place in the ConfigFS tree that refers to an address.
55  */
56 static ssize_t nvmet_addr_adrfam_show(struct config_item *item, char *page)
57 {
58         u8 adrfam = to_nvmet_port(item)->disc_addr.adrfam;
59         int i;
60
61         for (i = 1; i < ARRAY_SIZE(nvmet_addr_family); i++) {
62                 if (nvmet_addr_family[i].type == adrfam)
63                         return sprintf(page, "%s\n", nvmet_addr_family[i].name);
64         }
65
66         return sprintf(page, "\n");
67 }
68
69 static ssize_t nvmet_addr_adrfam_store(struct config_item *item,
70                 const char *page, size_t count)
71 {
72         struct nvmet_port *port = to_nvmet_port(item);
73         int i;
74
75         if (nvmet_is_port_enabled(port, __func__))
76                 return -EACCES;
77
78         for (i = 1; i < ARRAY_SIZE(nvmet_addr_family); i++) {
79                 if (sysfs_streq(page, nvmet_addr_family[i].name))
80                         goto found;
81         }
82
83         pr_err("Invalid value '%s' for adrfam\n", page);
84         return -EINVAL;
85
86 found:
87         port->disc_addr.adrfam = nvmet_addr_family[i].type;
88         return count;
89 }
90
91 CONFIGFS_ATTR(nvmet_, addr_adrfam);
92
93 static ssize_t nvmet_addr_portid_show(struct config_item *item,
94                 char *page)
95 {
96         struct nvmet_port *port = to_nvmet_port(item);
97
98         return snprintf(page, PAGE_SIZE, "%d\n",
99                         le16_to_cpu(port->disc_addr.portid));
100 }
101
102 static ssize_t nvmet_addr_portid_store(struct config_item *item,
103                 const char *page, size_t count)
104 {
105         struct nvmet_port *port = to_nvmet_port(item);
106         u16 portid = 0;
107
108         if (kstrtou16(page, 0, &portid)) {
109                 pr_err("Invalid value '%s' for portid\n", page);
110                 return -EINVAL;
111         }
112
113         if (nvmet_is_port_enabled(port, __func__))
114                 return -EACCES;
115
116         port->disc_addr.portid = cpu_to_le16(portid);
117         return count;
118 }
119
120 CONFIGFS_ATTR(nvmet_, addr_portid);
121
122 static ssize_t nvmet_addr_traddr_show(struct config_item *item,
123                 char *page)
124 {
125         struct nvmet_port *port = to_nvmet_port(item);
126
127         return snprintf(page, PAGE_SIZE, "%s\n",
128                         port->disc_addr.traddr);
129 }
130
131 static ssize_t nvmet_addr_traddr_store(struct config_item *item,
132                 const char *page, size_t count)
133 {
134         struct nvmet_port *port = to_nvmet_port(item);
135
136         if (count > NVMF_TRADDR_SIZE) {
137                 pr_err("Invalid value '%s' for traddr\n", page);
138                 return -EINVAL;
139         }
140
141         if (nvmet_is_port_enabled(port, __func__))
142                 return -EACCES;
143
144         if (sscanf(page, "%s\n", port->disc_addr.traddr) != 1)
145                 return -EINVAL;
146         return count;
147 }
148
149 CONFIGFS_ATTR(nvmet_, addr_traddr);
150
151 static const struct nvmet_type_name_map nvmet_addr_treq[] = {
152         { NVMF_TREQ_NOT_SPECIFIED,      "not specified" },
153         { NVMF_TREQ_REQUIRED,           "required" },
154         { NVMF_TREQ_NOT_REQUIRED,       "not required" },
155 };
156
157 static ssize_t nvmet_addr_treq_show(struct config_item *item, char *page)
158 {
159         u8 treq = to_nvmet_port(item)->disc_addr.treq &
160                 NVME_TREQ_SECURE_CHANNEL_MASK;
161         int i;
162
163         for (i = 0; i < ARRAY_SIZE(nvmet_addr_treq); i++) {
164                 if (treq == nvmet_addr_treq[i].type)
165                         return sprintf(page, "%s\n", nvmet_addr_treq[i].name);
166         }
167
168         return sprintf(page, "\n");
169 }
170
171 static ssize_t nvmet_addr_treq_store(struct config_item *item,
172                 const char *page, size_t count)
173 {
174         struct nvmet_port *port = to_nvmet_port(item);
175         u8 treq = port->disc_addr.treq & ~NVME_TREQ_SECURE_CHANNEL_MASK;
176         int i;
177
178         if (nvmet_is_port_enabled(port, __func__))
179                 return -EACCES;
180
181         for (i = 0; i < ARRAY_SIZE(nvmet_addr_treq); i++) {
182                 if (sysfs_streq(page, nvmet_addr_treq[i].name))
183                         goto found;
184         }
185
186         pr_err("Invalid value '%s' for treq\n", page);
187         return -EINVAL;
188
189 found:
190         treq |= nvmet_addr_treq[i].type;
191         port->disc_addr.treq = treq;
192         return count;
193 }
194
195 CONFIGFS_ATTR(nvmet_, addr_treq);
196
197 static ssize_t nvmet_addr_trsvcid_show(struct config_item *item,
198                 char *page)
199 {
200         struct nvmet_port *port = to_nvmet_port(item);
201
202         return snprintf(page, PAGE_SIZE, "%s\n",
203                         port->disc_addr.trsvcid);
204 }
205
206 static ssize_t nvmet_addr_trsvcid_store(struct config_item *item,
207                 const char *page, size_t count)
208 {
209         struct nvmet_port *port = to_nvmet_port(item);
210
211         if (count > NVMF_TRSVCID_SIZE) {
212                 pr_err("Invalid value '%s' for trsvcid\n", page);
213                 return -EINVAL;
214         }
215         if (nvmet_is_port_enabled(port, __func__))
216                 return -EACCES;
217
218         if (sscanf(page, "%s\n", port->disc_addr.trsvcid) != 1)
219                 return -EINVAL;
220         return count;
221 }
222
223 CONFIGFS_ATTR(nvmet_, addr_trsvcid);
224
225 static ssize_t nvmet_param_inline_data_size_show(struct config_item *item,
226                 char *page)
227 {
228         struct nvmet_port *port = to_nvmet_port(item);
229
230         return snprintf(page, PAGE_SIZE, "%d\n", port->inline_data_size);
231 }
232
233 static ssize_t nvmet_param_inline_data_size_store(struct config_item *item,
234                 const char *page, size_t count)
235 {
236         struct nvmet_port *port = to_nvmet_port(item);
237         int ret;
238
239         if (nvmet_is_port_enabled(port, __func__))
240                 return -EACCES;
241         ret = kstrtoint(page, 0, &port->inline_data_size);
242         if (ret) {
243                 pr_err("Invalid value '%s' for inline_data_size\n", page);
244                 return -EINVAL;
245         }
246         return count;
247 }
248
249 CONFIGFS_ATTR(nvmet_, param_inline_data_size);
250
251 #ifdef CONFIG_BLK_DEV_INTEGRITY
252 static ssize_t nvmet_param_pi_enable_show(struct config_item *item,
253                 char *page)
254 {
255         struct nvmet_port *port = to_nvmet_port(item);
256
257         return snprintf(page, PAGE_SIZE, "%d\n", port->pi_enable);
258 }
259
260 static ssize_t nvmet_param_pi_enable_store(struct config_item *item,
261                 const char *page, size_t count)
262 {
263         struct nvmet_port *port = to_nvmet_port(item);
264         bool val;
265
266         if (strtobool(page, &val))
267                 return -EINVAL;
268
269         if (nvmet_is_port_enabled(port, __func__))
270                 return -EACCES;
271
272         port->pi_enable = val;
273         return count;
274 }
275
276 CONFIGFS_ATTR(nvmet_, param_pi_enable);
277 #endif
278
279 static ssize_t nvmet_addr_trtype_show(struct config_item *item,
280                 char *page)
281 {
282         struct nvmet_port *port = to_nvmet_port(item);
283         int i;
284
285         for (i = 0; i < ARRAY_SIZE(nvmet_transport); i++) {
286                 if (port->disc_addr.trtype == nvmet_transport[i].type)
287                         return sprintf(page, "%s\n", nvmet_transport[i].name);
288         }
289
290         return sprintf(page, "\n");
291 }
292
293 static void nvmet_port_init_tsas_rdma(struct nvmet_port *port)
294 {
295         port->disc_addr.tsas.rdma.qptype = NVMF_RDMA_QPTYPE_CONNECTED;
296         port->disc_addr.tsas.rdma.prtype = NVMF_RDMA_PRTYPE_NOT_SPECIFIED;
297         port->disc_addr.tsas.rdma.cms = NVMF_RDMA_CMS_RDMA_CM;
298 }
299
300 static ssize_t nvmet_addr_trtype_store(struct config_item *item,
301                 const char *page, size_t count)
302 {
303         struct nvmet_port *port = to_nvmet_port(item);
304         int i;
305
306         if (nvmet_is_port_enabled(port, __func__))
307                 return -EACCES;
308
309         for (i = 0; i < ARRAY_SIZE(nvmet_transport); i++) {
310                 if (sysfs_streq(page, nvmet_transport[i].name))
311                         goto found;
312         }
313
314         pr_err("Invalid value '%s' for trtype\n", page);
315         return -EINVAL;
316
317 found:
318         memset(&port->disc_addr.tsas, 0, NVMF_TSAS_SIZE);
319         port->disc_addr.trtype = nvmet_transport[i].type;
320         if (port->disc_addr.trtype == NVMF_TRTYPE_RDMA)
321                 nvmet_port_init_tsas_rdma(port);
322         return count;
323 }
324
325 CONFIGFS_ATTR(nvmet_, addr_trtype);
326
327 /*
328  * Namespace structures & file operation functions below
329  */
330 static ssize_t nvmet_ns_device_path_show(struct config_item *item, char *page)
331 {
332         return sprintf(page, "%s\n", to_nvmet_ns(item)->device_path);
333 }
334
335 static ssize_t nvmet_ns_device_path_store(struct config_item *item,
336                 const char *page, size_t count)
337 {
338         struct nvmet_ns *ns = to_nvmet_ns(item);
339         struct nvmet_subsys *subsys = ns->subsys;
340         size_t len;
341         int ret;
342
343         mutex_lock(&subsys->lock);
344         ret = -EBUSY;
345         if (ns->enabled)
346                 goto out_unlock;
347
348         ret = -EINVAL;
349         len = strcspn(page, "\n");
350         if (!len)
351                 goto out_unlock;
352
353         kfree(ns->device_path);
354         ret = -ENOMEM;
355         ns->device_path = kmemdup_nul(page, len, GFP_KERNEL);
356         if (!ns->device_path)
357                 goto out_unlock;
358
359         mutex_unlock(&subsys->lock);
360         return count;
361
362 out_unlock:
363         mutex_unlock(&subsys->lock);
364         return ret;
365 }
366
367 CONFIGFS_ATTR(nvmet_ns_, device_path);
368
369 #ifdef CONFIG_PCI_P2PDMA
370 static ssize_t nvmet_ns_p2pmem_show(struct config_item *item, char *page)
371 {
372         struct nvmet_ns *ns = to_nvmet_ns(item);
373
374         return pci_p2pdma_enable_show(page, ns->p2p_dev, ns->use_p2pmem);
375 }
376
377 static ssize_t nvmet_ns_p2pmem_store(struct config_item *item,
378                 const char *page, size_t count)
379 {
380         struct nvmet_ns *ns = to_nvmet_ns(item);
381         struct pci_dev *p2p_dev = NULL;
382         bool use_p2pmem;
383         int ret = count;
384         int error;
385
386         mutex_lock(&ns->subsys->lock);
387         if (ns->enabled) {
388                 ret = -EBUSY;
389                 goto out_unlock;
390         }
391
392         error = pci_p2pdma_enable_store(page, &p2p_dev, &use_p2pmem);
393         if (error) {
394                 ret = error;
395                 goto out_unlock;
396         }
397
398         ns->use_p2pmem = use_p2pmem;
399         pci_dev_put(ns->p2p_dev);
400         ns->p2p_dev = p2p_dev;
401
402 out_unlock:
403         mutex_unlock(&ns->subsys->lock);
404
405         return ret;
406 }
407
408 CONFIGFS_ATTR(nvmet_ns_, p2pmem);
409 #endif /* CONFIG_PCI_P2PDMA */
410
411 static ssize_t nvmet_ns_device_uuid_show(struct config_item *item, char *page)
412 {
413         return sprintf(page, "%pUb\n", &to_nvmet_ns(item)->uuid);
414 }
415
416 static ssize_t nvmet_ns_device_uuid_store(struct config_item *item,
417                                           const char *page, size_t count)
418 {
419         struct nvmet_ns *ns = to_nvmet_ns(item);
420         struct nvmet_subsys *subsys = ns->subsys;
421         int ret = 0;
422
423         mutex_lock(&subsys->lock);
424         if (ns->enabled) {
425                 ret = -EBUSY;
426                 goto out_unlock;
427         }
428
429         if (uuid_parse(page, &ns->uuid))
430                 ret = -EINVAL;
431
432 out_unlock:
433         mutex_unlock(&subsys->lock);
434         return ret ? ret : count;
435 }
436
437 CONFIGFS_ATTR(nvmet_ns_, device_uuid);
438
439 static ssize_t nvmet_ns_device_nguid_show(struct config_item *item, char *page)
440 {
441         return sprintf(page, "%pUb\n", &to_nvmet_ns(item)->nguid);
442 }
443
444 static ssize_t nvmet_ns_device_nguid_store(struct config_item *item,
445                 const char *page, size_t count)
446 {
447         struct nvmet_ns *ns = to_nvmet_ns(item);
448         struct nvmet_subsys *subsys = ns->subsys;
449         u8 nguid[16];
450         const char *p = page;
451         int i;
452         int ret = 0;
453
454         mutex_lock(&subsys->lock);
455         if (ns->enabled) {
456                 ret = -EBUSY;
457                 goto out_unlock;
458         }
459
460         for (i = 0; i < 16; i++) {
461                 if (p + 2 > page + count) {
462                         ret = -EINVAL;
463                         goto out_unlock;
464                 }
465                 if (!isxdigit(p[0]) || !isxdigit(p[1])) {
466                         ret = -EINVAL;
467                         goto out_unlock;
468                 }
469
470                 nguid[i] = (hex_to_bin(p[0]) << 4) | hex_to_bin(p[1]);
471                 p += 2;
472
473                 if (*p == '-' || *p == ':')
474                         p++;
475         }
476
477         memcpy(&ns->nguid, nguid, sizeof(nguid));
478 out_unlock:
479         mutex_unlock(&subsys->lock);
480         return ret ? ret : count;
481 }
482
483 CONFIGFS_ATTR(nvmet_ns_, device_nguid);
484
485 static ssize_t nvmet_ns_ana_grpid_show(struct config_item *item, char *page)
486 {
487         return sprintf(page, "%u\n", to_nvmet_ns(item)->anagrpid);
488 }
489
490 static ssize_t nvmet_ns_ana_grpid_store(struct config_item *item,
491                 const char *page, size_t count)
492 {
493         struct nvmet_ns *ns = to_nvmet_ns(item);
494         u32 oldgrpid, newgrpid;
495         int ret;
496
497         ret = kstrtou32(page, 0, &newgrpid);
498         if (ret)
499                 return ret;
500
501         if (newgrpid < 1 || newgrpid > NVMET_MAX_ANAGRPS)
502                 return -EINVAL;
503
504         down_write(&nvmet_ana_sem);
505         oldgrpid = ns->anagrpid;
506         nvmet_ana_group_enabled[newgrpid]++;
507         ns->anagrpid = newgrpid;
508         nvmet_ana_group_enabled[oldgrpid]--;
509         nvmet_ana_chgcnt++;
510         up_write(&nvmet_ana_sem);
511
512         nvmet_send_ana_event(ns->subsys, NULL);
513         return count;
514 }
515
516 CONFIGFS_ATTR(nvmet_ns_, ana_grpid);
517
518 static ssize_t nvmet_ns_enable_show(struct config_item *item, char *page)
519 {
520         return sprintf(page, "%d\n", to_nvmet_ns(item)->enabled);
521 }
522
523 static ssize_t nvmet_ns_enable_store(struct config_item *item,
524                 const char *page, size_t count)
525 {
526         struct nvmet_ns *ns = to_nvmet_ns(item);
527         bool enable;
528         int ret = 0;
529
530         if (strtobool(page, &enable))
531                 return -EINVAL;
532
533         if (enable)
534                 ret = nvmet_ns_enable(ns);
535         else
536                 nvmet_ns_disable(ns);
537
538         return ret ? ret : count;
539 }
540
541 CONFIGFS_ATTR(nvmet_ns_, enable);
542
543 static ssize_t nvmet_ns_buffered_io_show(struct config_item *item, char *page)
544 {
545         return sprintf(page, "%d\n", to_nvmet_ns(item)->buffered_io);
546 }
547
548 static ssize_t nvmet_ns_buffered_io_store(struct config_item *item,
549                 const char *page, size_t count)
550 {
551         struct nvmet_ns *ns = to_nvmet_ns(item);
552         bool val;
553
554         if (strtobool(page, &val))
555                 return -EINVAL;
556
557         mutex_lock(&ns->subsys->lock);
558         if (ns->enabled) {
559                 pr_err("disable ns before setting buffered_io value.\n");
560                 mutex_unlock(&ns->subsys->lock);
561                 return -EINVAL;
562         }
563
564         ns->buffered_io = val;
565         mutex_unlock(&ns->subsys->lock);
566         return count;
567 }
568
569 CONFIGFS_ATTR(nvmet_ns_, buffered_io);
570
571 static ssize_t nvmet_ns_revalidate_size_store(struct config_item *item,
572                 const char *page, size_t count)
573 {
574         struct nvmet_ns *ns = to_nvmet_ns(item);
575         bool val;
576
577         if (strtobool(page, &val))
578                 return -EINVAL;
579
580         if (!val)
581                 return -EINVAL;
582
583         mutex_lock(&ns->subsys->lock);
584         if (!ns->enabled) {
585                 pr_err("enable ns before revalidate.\n");
586                 mutex_unlock(&ns->subsys->lock);
587                 return -EINVAL;
588         }
589         nvmet_ns_revalidate(ns);
590         mutex_unlock(&ns->subsys->lock);
591         return count;
592 }
593
594 CONFIGFS_ATTR_WO(nvmet_ns_, revalidate_size);
595
596 static struct configfs_attribute *nvmet_ns_attrs[] = {
597         &nvmet_ns_attr_device_path,
598         &nvmet_ns_attr_device_nguid,
599         &nvmet_ns_attr_device_uuid,
600         &nvmet_ns_attr_ana_grpid,
601         &nvmet_ns_attr_enable,
602         &nvmet_ns_attr_buffered_io,
603         &nvmet_ns_attr_revalidate_size,
604 #ifdef CONFIG_PCI_P2PDMA
605         &nvmet_ns_attr_p2pmem,
606 #endif
607         NULL,
608 };
609
610 static void nvmet_ns_release(struct config_item *item)
611 {
612         struct nvmet_ns *ns = to_nvmet_ns(item);
613
614         nvmet_ns_free(ns);
615 }
616
617 static struct configfs_item_operations nvmet_ns_item_ops = {
618         .release                = nvmet_ns_release,
619 };
620
621 static const struct config_item_type nvmet_ns_type = {
622         .ct_item_ops            = &nvmet_ns_item_ops,
623         .ct_attrs               = nvmet_ns_attrs,
624         .ct_owner               = THIS_MODULE,
625 };
626
627 static struct config_group *nvmet_ns_make(struct config_group *group,
628                 const char *name)
629 {
630         struct nvmet_subsys *subsys = namespaces_to_subsys(&group->cg_item);
631         struct nvmet_ns *ns;
632         int ret;
633         u32 nsid;
634
635         ret = kstrtou32(name, 0, &nsid);
636         if (ret)
637                 goto out;
638
639         ret = -EINVAL;
640         if (nsid == 0 || nsid == NVME_NSID_ALL) {
641                 pr_err("invalid nsid %#x", nsid);
642                 goto out;
643         }
644
645         ret = -ENOMEM;
646         ns = nvmet_ns_alloc(subsys, nsid);
647         if (!ns)
648                 goto out;
649         config_group_init_type_name(&ns->group, name, &nvmet_ns_type);
650
651         pr_info("adding nsid %d to subsystem %s\n", nsid, subsys->subsysnqn);
652
653         return &ns->group;
654 out:
655         return ERR_PTR(ret);
656 }
657
658 static struct configfs_group_operations nvmet_namespaces_group_ops = {
659         .make_group             = nvmet_ns_make,
660 };
661
662 static const struct config_item_type nvmet_namespaces_type = {
663         .ct_group_ops           = &nvmet_namespaces_group_ops,
664         .ct_owner               = THIS_MODULE,
665 };
666
667 #ifdef CONFIG_NVME_TARGET_PASSTHRU
668
669 static ssize_t nvmet_passthru_device_path_show(struct config_item *item,
670                 char *page)
671 {
672         struct nvmet_subsys *subsys = to_subsys(item->ci_parent);
673
674         return snprintf(page, PAGE_SIZE, "%s\n", subsys->passthru_ctrl_path);
675 }
676
677 static ssize_t nvmet_passthru_device_path_store(struct config_item *item,
678                 const char *page, size_t count)
679 {
680         struct nvmet_subsys *subsys = to_subsys(item->ci_parent);
681         size_t len;
682         int ret;
683
684         mutex_lock(&subsys->lock);
685
686         ret = -EBUSY;
687         if (subsys->passthru_ctrl)
688                 goto out_unlock;
689
690         ret = -EINVAL;
691         len = strcspn(page, "\n");
692         if (!len)
693                 goto out_unlock;
694
695         kfree(subsys->passthru_ctrl_path);
696         ret = -ENOMEM;
697         subsys->passthru_ctrl_path = kstrndup(page, len, GFP_KERNEL);
698         if (!subsys->passthru_ctrl_path)
699                 goto out_unlock;
700
701         mutex_unlock(&subsys->lock);
702
703         return count;
704 out_unlock:
705         mutex_unlock(&subsys->lock);
706         return ret;
707 }
708 CONFIGFS_ATTR(nvmet_passthru_, device_path);
709
710 static ssize_t nvmet_passthru_enable_show(struct config_item *item,
711                 char *page)
712 {
713         struct nvmet_subsys *subsys = to_subsys(item->ci_parent);
714
715         return sprintf(page, "%d\n", subsys->passthru_ctrl ? 1 : 0);
716 }
717
718 static ssize_t nvmet_passthru_enable_store(struct config_item *item,
719                 const char *page, size_t count)
720 {
721         struct nvmet_subsys *subsys = to_subsys(item->ci_parent);
722         bool enable;
723         int ret = 0;
724
725         if (strtobool(page, &enable))
726                 return -EINVAL;
727
728         if (enable)
729                 ret = nvmet_passthru_ctrl_enable(subsys);
730         else
731                 nvmet_passthru_ctrl_disable(subsys);
732
733         return ret ? ret : count;
734 }
735 CONFIGFS_ATTR(nvmet_passthru_, enable);
736
737 static ssize_t nvmet_passthru_admin_timeout_show(struct config_item *item,
738                 char *page)
739 {
740         return sprintf(page, "%u\n", to_subsys(item->ci_parent)->admin_timeout);
741 }
742
743 static ssize_t nvmet_passthru_admin_timeout_store(struct config_item *item,
744                 const char *page, size_t count)
745 {
746         struct nvmet_subsys *subsys = to_subsys(item->ci_parent);
747         unsigned int timeout;
748
749         if (kstrtouint(page, 0, &timeout))
750                 return -EINVAL;
751         subsys->admin_timeout = timeout;
752         return count;
753 }
754 CONFIGFS_ATTR(nvmet_passthru_, admin_timeout);
755
756 static ssize_t nvmet_passthru_io_timeout_show(struct config_item *item,
757                 char *page)
758 {
759         return sprintf(page, "%u\n", to_subsys(item->ci_parent)->io_timeout);
760 }
761
762 static ssize_t nvmet_passthru_io_timeout_store(struct config_item *item,
763                 const char *page, size_t count)
764 {
765         struct nvmet_subsys *subsys = to_subsys(item->ci_parent);
766         unsigned int timeout;
767
768         if (kstrtouint(page, 0, &timeout))
769                 return -EINVAL;
770         subsys->io_timeout = timeout;
771         return count;
772 }
773 CONFIGFS_ATTR(nvmet_passthru_, io_timeout);
774
775 static struct configfs_attribute *nvmet_passthru_attrs[] = {
776         &nvmet_passthru_attr_device_path,
777         &nvmet_passthru_attr_enable,
778         &nvmet_passthru_attr_admin_timeout,
779         &nvmet_passthru_attr_io_timeout,
780         NULL,
781 };
782
783 static const struct config_item_type nvmet_passthru_type = {
784         .ct_attrs               = nvmet_passthru_attrs,
785         .ct_owner               = THIS_MODULE,
786 };
787
788 static void nvmet_add_passthru_group(struct nvmet_subsys *subsys)
789 {
790         config_group_init_type_name(&subsys->passthru_group,
791                                     "passthru", &nvmet_passthru_type);
792         configfs_add_default_group(&subsys->passthru_group,
793                                    &subsys->group);
794 }
795
796 #else /* CONFIG_NVME_TARGET_PASSTHRU */
797
798 static void nvmet_add_passthru_group(struct nvmet_subsys *subsys)
799 {
800 }
801
802 #endif /* CONFIG_NVME_TARGET_PASSTHRU */
803
804 static int nvmet_port_subsys_allow_link(struct config_item *parent,
805                 struct config_item *target)
806 {
807         struct nvmet_port *port = to_nvmet_port(parent->ci_parent);
808         struct nvmet_subsys *subsys;
809         struct nvmet_subsys_link *link, *p;
810         int ret;
811
812         if (target->ci_type != &nvmet_subsys_type) {
813                 pr_err("can only link subsystems into the subsystems dir.!\n");
814                 return -EINVAL;
815         }
816         subsys = to_subsys(target);
817         link = kmalloc(sizeof(*link), GFP_KERNEL);
818         if (!link)
819                 return -ENOMEM;
820         link->subsys = subsys;
821
822         down_write(&nvmet_config_sem);
823         ret = -EEXIST;
824         list_for_each_entry(p, &port->subsystems, entry) {
825                 if (p->subsys == subsys)
826                         goto out_free_link;
827         }
828
829         if (list_empty(&port->subsystems)) {
830                 ret = nvmet_enable_port(port);
831                 if (ret)
832                         goto out_free_link;
833         }
834
835         list_add_tail(&link->entry, &port->subsystems);
836         nvmet_port_disc_changed(port, subsys);
837
838         up_write(&nvmet_config_sem);
839         return 0;
840
841 out_free_link:
842         up_write(&nvmet_config_sem);
843         kfree(link);
844         return ret;
845 }
846
847 static void nvmet_port_subsys_drop_link(struct config_item *parent,
848                 struct config_item *target)
849 {
850         struct nvmet_port *port = to_nvmet_port(parent->ci_parent);
851         struct nvmet_subsys *subsys = to_subsys(target);
852         struct nvmet_subsys_link *p;
853
854         down_write(&nvmet_config_sem);
855         list_for_each_entry(p, &port->subsystems, entry) {
856                 if (p->subsys == subsys)
857                         goto found;
858         }
859         up_write(&nvmet_config_sem);
860         return;
861
862 found:
863         list_del(&p->entry);
864         nvmet_port_del_ctrls(port, subsys);
865         nvmet_port_disc_changed(port, subsys);
866
867         if (list_empty(&port->subsystems))
868                 nvmet_disable_port(port);
869         up_write(&nvmet_config_sem);
870         kfree(p);
871 }
872
873 static struct configfs_item_operations nvmet_port_subsys_item_ops = {
874         .allow_link             = nvmet_port_subsys_allow_link,
875         .drop_link              = nvmet_port_subsys_drop_link,
876 };
877
878 static const struct config_item_type nvmet_port_subsys_type = {
879         .ct_item_ops            = &nvmet_port_subsys_item_ops,
880         .ct_owner               = THIS_MODULE,
881 };
882
883 static int nvmet_allowed_hosts_allow_link(struct config_item *parent,
884                 struct config_item *target)
885 {
886         struct nvmet_subsys *subsys = to_subsys(parent->ci_parent);
887         struct nvmet_host *host;
888         struct nvmet_host_link *link, *p;
889         int ret;
890
891         if (target->ci_type != &nvmet_host_type) {
892                 pr_err("can only link hosts into the allowed_hosts directory!\n");
893                 return -EINVAL;
894         }
895
896         host = to_host(target);
897         link = kmalloc(sizeof(*link), GFP_KERNEL);
898         if (!link)
899                 return -ENOMEM;
900         link->host = host;
901
902         down_write(&nvmet_config_sem);
903         ret = -EINVAL;
904         if (subsys->allow_any_host) {
905                 pr_err("can't add hosts when allow_any_host is set!\n");
906                 goto out_free_link;
907         }
908
909         ret = -EEXIST;
910         list_for_each_entry(p, &subsys->hosts, entry) {
911                 if (!strcmp(nvmet_host_name(p->host), nvmet_host_name(host)))
912                         goto out_free_link;
913         }
914         list_add_tail(&link->entry, &subsys->hosts);
915         nvmet_subsys_disc_changed(subsys, host);
916
917         up_write(&nvmet_config_sem);
918         return 0;
919 out_free_link:
920         up_write(&nvmet_config_sem);
921         kfree(link);
922         return ret;
923 }
924
925 static void nvmet_allowed_hosts_drop_link(struct config_item *parent,
926                 struct config_item *target)
927 {
928         struct nvmet_subsys *subsys = to_subsys(parent->ci_parent);
929         struct nvmet_host *host = to_host(target);
930         struct nvmet_host_link *p;
931
932         down_write(&nvmet_config_sem);
933         list_for_each_entry(p, &subsys->hosts, entry) {
934                 if (!strcmp(nvmet_host_name(p->host), nvmet_host_name(host)))
935                         goto found;
936         }
937         up_write(&nvmet_config_sem);
938         return;
939
940 found:
941         list_del(&p->entry);
942         nvmet_subsys_disc_changed(subsys, host);
943
944         up_write(&nvmet_config_sem);
945         kfree(p);
946 }
947
948 static struct configfs_item_operations nvmet_allowed_hosts_item_ops = {
949         .allow_link             = nvmet_allowed_hosts_allow_link,
950         .drop_link              = nvmet_allowed_hosts_drop_link,
951 };
952
953 static const struct config_item_type nvmet_allowed_hosts_type = {
954         .ct_item_ops            = &nvmet_allowed_hosts_item_ops,
955         .ct_owner               = THIS_MODULE,
956 };
957
958 static ssize_t nvmet_subsys_attr_allow_any_host_show(struct config_item *item,
959                 char *page)
960 {
961         return snprintf(page, PAGE_SIZE, "%d\n",
962                 to_subsys(item)->allow_any_host);
963 }
964
965 static ssize_t nvmet_subsys_attr_allow_any_host_store(struct config_item *item,
966                 const char *page, size_t count)
967 {
968         struct nvmet_subsys *subsys = to_subsys(item);
969         bool allow_any_host;
970         int ret = 0;
971
972         if (strtobool(page, &allow_any_host))
973                 return -EINVAL;
974
975         down_write(&nvmet_config_sem);
976         if (allow_any_host && !list_empty(&subsys->hosts)) {
977                 pr_err("Can't set allow_any_host when explicit hosts are set!\n");
978                 ret = -EINVAL;
979                 goto out_unlock;
980         }
981
982         if (subsys->allow_any_host != allow_any_host) {
983                 subsys->allow_any_host = allow_any_host;
984                 nvmet_subsys_disc_changed(subsys, NULL);
985         }
986
987 out_unlock:
988         up_write(&nvmet_config_sem);
989         return ret ? ret : count;
990 }
991
992 CONFIGFS_ATTR(nvmet_subsys_, attr_allow_any_host);
993
994 static ssize_t nvmet_subsys_attr_version_show(struct config_item *item,
995                                               char *page)
996 {
997         struct nvmet_subsys *subsys = to_subsys(item);
998
999         if (NVME_TERTIARY(subsys->ver))
1000                 return snprintf(page, PAGE_SIZE, "%llu.%llu.%llu\n",
1001                                 NVME_MAJOR(subsys->ver),
1002                                 NVME_MINOR(subsys->ver),
1003                                 NVME_TERTIARY(subsys->ver));
1004
1005         return snprintf(page, PAGE_SIZE, "%llu.%llu\n",
1006                         NVME_MAJOR(subsys->ver),
1007                         NVME_MINOR(subsys->ver));
1008 }
1009
1010 static ssize_t
1011 nvmet_subsys_attr_version_store_locked(struct nvmet_subsys *subsys,
1012                 const char *page, size_t count)
1013 {
1014         int major, minor, tertiary = 0;
1015         int ret;
1016
1017         if (subsys->subsys_discovered) {
1018                 if (NVME_TERTIARY(subsys->ver))
1019                         pr_err("Can't set version number. %llu.%llu.%llu is already assigned\n",
1020                                NVME_MAJOR(subsys->ver),
1021                                NVME_MINOR(subsys->ver),
1022                                NVME_TERTIARY(subsys->ver));
1023                 else
1024                         pr_err("Can't set version number. %llu.%llu is already assigned\n",
1025                                NVME_MAJOR(subsys->ver),
1026                                NVME_MINOR(subsys->ver));
1027                 return -EINVAL;
1028         }
1029
1030         /* passthru subsystems use the underlying controller's version */
1031         if (nvmet_is_passthru_subsys(subsys))
1032                 return -EINVAL;
1033
1034         ret = sscanf(page, "%d.%d.%d\n", &major, &minor, &tertiary);
1035         if (ret != 2 && ret != 3)
1036                 return -EINVAL;
1037
1038         subsys->ver = NVME_VS(major, minor, tertiary);
1039
1040         return count;
1041 }
1042
1043 static ssize_t nvmet_subsys_attr_version_store(struct config_item *item,
1044                                                const char *page, size_t count)
1045 {
1046         struct nvmet_subsys *subsys = to_subsys(item);
1047         ssize_t ret;
1048
1049         down_write(&nvmet_config_sem);
1050         mutex_lock(&subsys->lock);
1051         ret = nvmet_subsys_attr_version_store_locked(subsys, page, count);
1052         mutex_unlock(&subsys->lock);
1053         up_write(&nvmet_config_sem);
1054
1055         return ret;
1056 }
1057 CONFIGFS_ATTR(nvmet_subsys_, attr_version);
1058
1059 /* See Section 1.5 of NVMe 1.4 */
1060 static bool nvmet_is_ascii(const char c)
1061 {
1062         return c >= 0x20 && c <= 0x7e;
1063 }
1064
1065 static ssize_t nvmet_subsys_attr_serial_show(struct config_item *item,
1066                                              char *page)
1067 {
1068         struct nvmet_subsys *subsys = to_subsys(item);
1069
1070         return snprintf(page, PAGE_SIZE, "%*s\n",
1071                         NVMET_SN_MAX_SIZE, subsys->serial);
1072 }
1073
1074 static ssize_t
1075 nvmet_subsys_attr_serial_store_locked(struct nvmet_subsys *subsys,
1076                 const char *page, size_t count)
1077 {
1078         int pos, len = strcspn(page, "\n");
1079
1080         if (subsys->subsys_discovered) {
1081                 pr_err("Can't set serial number. %s is already assigned\n",
1082                        subsys->serial);
1083                 return -EINVAL;
1084         }
1085
1086         if (!len || len > NVMET_SN_MAX_SIZE) {
1087                 pr_err("Serial Number can not be empty or exceed %d Bytes\n",
1088                        NVMET_SN_MAX_SIZE);
1089                 return -EINVAL;
1090         }
1091
1092         for (pos = 0; pos < len; pos++) {
1093                 if (!nvmet_is_ascii(page[pos])) {
1094                         pr_err("Serial Number must contain only ASCII strings\n");
1095                         return -EINVAL;
1096                 }
1097         }
1098
1099         memcpy_and_pad(subsys->serial, NVMET_SN_MAX_SIZE, page, len, ' ');
1100
1101         return count;
1102 }
1103
1104 static ssize_t nvmet_subsys_attr_serial_store(struct config_item *item,
1105                                               const char *page, size_t count)
1106 {
1107         struct nvmet_subsys *subsys = to_subsys(item);
1108         ssize_t ret;
1109
1110         down_write(&nvmet_config_sem);
1111         mutex_lock(&subsys->lock);
1112         ret = nvmet_subsys_attr_serial_store_locked(subsys, page, count);
1113         mutex_unlock(&subsys->lock);
1114         up_write(&nvmet_config_sem);
1115
1116         return ret;
1117 }
1118 CONFIGFS_ATTR(nvmet_subsys_, attr_serial);
1119
1120 static ssize_t nvmet_subsys_attr_cntlid_min_show(struct config_item *item,
1121                                                  char *page)
1122 {
1123         return snprintf(page, PAGE_SIZE, "%u\n", to_subsys(item)->cntlid_min);
1124 }
1125
1126 static ssize_t nvmet_subsys_attr_cntlid_min_store(struct config_item *item,
1127                                                   const char *page, size_t cnt)
1128 {
1129         u16 cntlid_min;
1130
1131         if (sscanf(page, "%hu\n", &cntlid_min) != 1)
1132                 return -EINVAL;
1133
1134         if (cntlid_min == 0)
1135                 return -EINVAL;
1136
1137         down_write(&nvmet_config_sem);
1138         if (cntlid_min >= to_subsys(item)->cntlid_max)
1139                 goto out_unlock;
1140         to_subsys(item)->cntlid_min = cntlid_min;
1141         up_write(&nvmet_config_sem);
1142         return cnt;
1143
1144 out_unlock:
1145         up_write(&nvmet_config_sem);
1146         return -EINVAL;
1147 }
1148 CONFIGFS_ATTR(nvmet_subsys_, attr_cntlid_min);
1149
1150 static ssize_t nvmet_subsys_attr_cntlid_max_show(struct config_item *item,
1151                                                  char *page)
1152 {
1153         return snprintf(page, PAGE_SIZE, "%u\n", to_subsys(item)->cntlid_max);
1154 }
1155
1156 static ssize_t nvmet_subsys_attr_cntlid_max_store(struct config_item *item,
1157                                                   const char *page, size_t cnt)
1158 {
1159         u16 cntlid_max;
1160
1161         if (sscanf(page, "%hu\n", &cntlid_max) != 1)
1162                 return -EINVAL;
1163
1164         if (cntlid_max == 0)
1165                 return -EINVAL;
1166
1167         down_write(&nvmet_config_sem);
1168         if (cntlid_max <= to_subsys(item)->cntlid_min)
1169                 goto out_unlock;
1170         to_subsys(item)->cntlid_max = cntlid_max;
1171         up_write(&nvmet_config_sem);
1172         return cnt;
1173
1174 out_unlock:
1175         up_write(&nvmet_config_sem);
1176         return -EINVAL;
1177 }
1178 CONFIGFS_ATTR(nvmet_subsys_, attr_cntlid_max);
1179
1180 static ssize_t nvmet_subsys_attr_model_show(struct config_item *item,
1181                                             char *page)
1182 {
1183         struct nvmet_subsys *subsys = to_subsys(item);
1184
1185         return snprintf(page, PAGE_SIZE, "%s\n", subsys->model_number);
1186 }
1187
1188 static ssize_t nvmet_subsys_attr_model_store_locked(struct nvmet_subsys *subsys,
1189                 const char *page, size_t count)
1190 {
1191         int pos = 0, len;
1192
1193         if (subsys->subsys_discovered) {
1194                 pr_err("Can't set model number. %s is already assigned\n",
1195                        subsys->model_number);
1196                 return -EINVAL;
1197         }
1198
1199         len = strcspn(page, "\n");
1200         if (!len)
1201                 return -EINVAL;
1202
1203         if (len > NVMET_MN_MAX_SIZE) {
1204                 pr_err("Model number size can not exceed %d Bytes\n",
1205                        NVMET_MN_MAX_SIZE);
1206                 return -EINVAL;
1207         }
1208
1209         for (pos = 0; pos < len; pos++) {
1210                 if (!nvmet_is_ascii(page[pos]))
1211                         return -EINVAL;
1212         }
1213
1214         subsys->model_number = kmemdup_nul(page, len, GFP_KERNEL);
1215         if (!subsys->model_number)
1216                 return -ENOMEM;
1217         return count;
1218 }
1219
1220 static ssize_t nvmet_subsys_attr_model_store(struct config_item *item,
1221                                              const char *page, size_t count)
1222 {
1223         struct nvmet_subsys *subsys = to_subsys(item);
1224         ssize_t ret;
1225
1226         down_write(&nvmet_config_sem);
1227         mutex_lock(&subsys->lock);
1228         ret = nvmet_subsys_attr_model_store_locked(subsys, page, count);
1229         mutex_unlock(&subsys->lock);
1230         up_write(&nvmet_config_sem);
1231
1232         return ret;
1233 }
1234 CONFIGFS_ATTR(nvmet_subsys_, attr_model);
1235
1236 #ifdef CONFIG_BLK_DEV_INTEGRITY
1237 static ssize_t nvmet_subsys_attr_pi_enable_show(struct config_item *item,
1238                                                 char *page)
1239 {
1240         return snprintf(page, PAGE_SIZE, "%d\n", to_subsys(item)->pi_support);
1241 }
1242
1243 static ssize_t nvmet_subsys_attr_pi_enable_store(struct config_item *item,
1244                                                  const char *page, size_t count)
1245 {
1246         struct nvmet_subsys *subsys = to_subsys(item);
1247         bool pi_enable;
1248
1249         if (strtobool(page, &pi_enable))
1250                 return -EINVAL;
1251
1252         subsys->pi_support = pi_enable;
1253         return count;
1254 }
1255 CONFIGFS_ATTR(nvmet_subsys_, attr_pi_enable);
1256 #endif
1257
1258 static struct configfs_attribute *nvmet_subsys_attrs[] = {
1259         &nvmet_subsys_attr_attr_allow_any_host,
1260         &nvmet_subsys_attr_attr_version,
1261         &nvmet_subsys_attr_attr_serial,
1262         &nvmet_subsys_attr_attr_cntlid_min,
1263         &nvmet_subsys_attr_attr_cntlid_max,
1264         &nvmet_subsys_attr_attr_model,
1265 #ifdef CONFIG_BLK_DEV_INTEGRITY
1266         &nvmet_subsys_attr_attr_pi_enable,
1267 #endif
1268         NULL,
1269 };
1270
1271 /*
1272  * Subsystem structures & folder operation functions below
1273  */
1274 static void nvmet_subsys_release(struct config_item *item)
1275 {
1276         struct nvmet_subsys *subsys = to_subsys(item);
1277
1278         nvmet_subsys_del_ctrls(subsys);
1279         nvmet_subsys_put(subsys);
1280 }
1281
1282 static struct configfs_item_operations nvmet_subsys_item_ops = {
1283         .release                = nvmet_subsys_release,
1284 };
1285
1286 static const struct config_item_type nvmet_subsys_type = {
1287         .ct_item_ops            = &nvmet_subsys_item_ops,
1288         .ct_attrs               = nvmet_subsys_attrs,
1289         .ct_owner               = THIS_MODULE,
1290 };
1291
1292 static struct config_group *nvmet_subsys_make(struct config_group *group,
1293                 const char *name)
1294 {
1295         struct nvmet_subsys *subsys;
1296
1297         if (sysfs_streq(name, NVME_DISC_SUBSYS_NAME)) {
1298                 pr_err("can't create discovery subsystem through configfs\n");
1299                 return ERR_PTR(-EINVAL);
1300         }
1301
1302         subsys = nvmet_subsys_alloc(name, NVME_NQN_NVME);
1303         if (IS_ERR(subsys))
1304                 return ERR_CAST(subsys);
1305
1306         config_group_init_type_name(&subsys->group, name, &nvmet_subsys_type);
1307
1308         config_group_init_type_name(&subsys->namespaces_group,
1309                         "namespaces", &nvmet_namespaces_type);
1310         configfs_add_default_group(&subsys->namespaces_group, &subsys->group);
1311
1312         config_group_init_type_name(&subsys->allowed_hosts_group,
1313                         "allowed_hosts", &nvmet_allowed_hosts_type);
1314         configfs_add_default_group(&subsys->allowed_hosts_group,
1315                         &subsys->group);
1316
1317         nvmet_add_passthru_group(subsys);
1318
1319         return &subsys->group;
1320 }
1321
1322 static struct configfs_group_operations nvmet_subsystems_group_ops = {
1323         .make_group             = nvmet_subsys_make,
1324 };
1325
1326 static const struct config_item_type nvmet_subsystems_type = {
1327         .ct_group_ops           = &nvmet_subsystems_group_ops,
1328         .ct_owner               = THIS_MODULE,
1329 };
1330
1331 static ssize_t nvmet_referral_enable_show(struct config_item *item,
1332                 char *page)
1333 {
1334         return snprintf(page, PAGE_SIZE, "%d\n", to_nvmet_port(item)->enabled);
1335 }
1336
1337 static ssize_t nvmet_referral_enable_store(struct config_item *item,
1338                 const char *page, size_t count)
1339 {
1340         struct nvmet_port *parent = to_nvmet_port(item->ci_parent->ci_parent);
1341         struct nvmet_port *port = to_nvmet_port(item);
1342         bool enable;
1343
1344         if (strtobool(page, &enable))
1345                 goto inval;
1346
1347         if (enable)
1348                 nvmet_referral_enable(parent, port);
1349         else
1350                 nvmet_referral_disable(parent, port);
1351
1352         return count;
1353 inval:
1354         pr_err("Invalid value '%s' for enable\n", page);
1355         return -EINVAL;
1356 }
1357
1358 CONFIGFS_ATTR(nvmet_referral_, enable);
1359
1360 /*
1361  * Discovery Service subsystem definitions
1362  */
1363 static struct configfs_attribute *nvmet_referral_attrs[] = {
1364         &nvmet_attr_addr_adrfam,
1365         &nvmet_attr_addr_portid,
1366         &nvmet_attr_addr_treq,
1367         &nvmet_attr_addr_traddr,
1368         &nvmet_attr_addr_trsvcid,
1369         &nvmet_attr_addr_trtype,
1370         &nvmet_referral_attr_enable,
1371         NULL,
1372 };
1373
1374 static void nvmet_referral_notify(struct config_group *group,
1375                 struct config_item *item)
1376 {
1377         struct nvmet_port *parent = to_nvmet_port(item->ci_parent->ci_parent);
1378         struct nvmet_port *port = to_nvmet_port(item);
1379
1380         nvmet_referral_disable(parent, port);
1381 }
1382
1383 static void nvmet_referral_release(struct config_item *item)
1384 {
1385         struct nvmet_port *port = to_nvmet_port(item);
1386
1387         kfree(port);
1388 }
1389
1390 static struct configfs_item_operations nvmet_referral_item_ops = {
1391         .release        = nvmet_referral_release,
1392 };
1393
1394 static const struct config_item_type nvmet_referral_type = {
1395         .ct_owner       = THIS_MODULE,
1396         .ct_attrs       = nvmet_referral_attrs,
1397         .ct_item_ops    = &nvmet_referral_item_ops,
1398 };
1399
1400 static struct config_group *nvmet_referral_make(
1401                 struct config_group *group, const char *name)
1402 {
1403         struct nvmet_port *port;
1404
1405         port = kzalloc(sizeof(*port), GFP_KERNEL);
1406         if (!port)
1407                 return ERR_PTR(-ENOMEM);
1408
1409         INIT_LIST_HEAD(&port->entry);
1410         config_group_init_type_name(&port->group, name, &nvmet_referral_type);
1411
1412         return &port->group;
1413 }
1414
1415 static struct configfs_group_operations nvmet_referral_group_ops = {
1416         .make_group             = nvmet_referral_make,
1417         .disconnect_notify      = nvmet_referral_notify,
1418 };
1419
1420 static const struct config_item_type nvmet_referrals_type = {
1421         .ct_owner       = THIS_MODULE,
1422         .ct_group_ops   = &nvmet_referral_group_ops,
1423 };
1424
1425 static struct nvmet_type_name_map nvmet_ana_state[] = {
1426         { NVME_ANA_OPTIMIZED,           "optimized" },
1427         { NVME_ANA_NONOPTIMIZED,        "non-optimized" },
1428         { NVME_ANA_INACCESSIBLE,        "inaccessible" },
1429         { NVME_ANA_PERSISTENT_LOSS,     "persistent-loss" },
1430         { NVME_ANA_CHANGE,              "change" },
1431 };
1432
1433 static ssize_t nvmet_ana_group_ana_state_show(struct config_item *item,
1434                 char *page)
1435 {
1436         struct nvmet_ana_group *grp = to_ana_group(item);
1437         enum nvme_ana_state state = grp->port->ana_state[grp->grpid];
1438         int i;
1439
1440         for (i = 0; i < ARRAY_SIZE(nvmet_ana_state); i++) {
1441                 if (state == nvmet_ana_state[i].type)
1442                         return sprintf(page, "%s\n", nvmet_ana_state[i].name);
1443         }
1444
1445         return sprintf(page, "\n");
1446 }
1447
1448 static ssize_t nvmet_ana_group_ana_state_store(struct config_item *item,
1449                 const char *page, size_t count)
1450 {
1451         struct nvmet_ana_group *grp = to_ana_group(item);
1452         enum nvme_ana_state *ana_state = grp->port->ana_state;
1453         int i;
1454
1455         for (i = 0; i < ARRAY_SIZE(nvmet_ana_state); i++) {
1456                 if (sysfs_streq(page, nvmet_ana_state[i].name))
1457                         goto found;
1458         }
1459
1460         pr_err("Invalid value '%s' for ana_state\n", page);
1461         return -EINVAL;
1462
1463 found:
1464         down_write(&nvmet_ana_sem);
1465         ana_state[grp->grpid] = (enum nvme_ana_state) nvmet_ana_state[i].type;
1466         nvmet_ana_chgcnt++;
1467         up_write(&nvmet_ana_sem);
1468         nvmet_port_send_ana_event(grp->port);
1469         return count;
1470 }
1471
1472 CONFIGFS_ATTR(nvmet_ana_group_, ana_state);
1473
1474 static struct configfs_attribute *nvmet_ana_group_attrs[] = {
1475         &nvmet_ana_group_attr_ana_state,
1476         NULL,
1477 };
1478
1479 static void nvmet_ana_group_release(struct config_item *item)
1480 {
1481         struct nvmet_ana_group *grp = to_ana_group(item);
1482
1483         if (grp == &grp->port->ana_default_group)
1484                 return;
1485
1486         down_write(&nvmet_ana_sem);
1487         grp->port->ana_state[grp->grpid] = NVME_ANA_INACCESSIBLE;
1488         nvmet_ana_group_enabled[grp->grpid]--;
1489         up_write(&nvmet_ana_sem);
1490
1491         nvmet_port_send_ana_event(grp->port);
1492         kfree(grp);
1493 }
1494
1495 static struct configfs_item_operations nvmet_ana_group_item_ops = {
1496         .release                = nvmet_ana_group_release,
1497 };
1498
1499 static const struct config_item_type nvmet_ana_group_type = {
1500         .ct_item_ops            = &nvmet_ana_group_item_ops,
1501         .ct_attrs               = nvmet_ana_group_attrs,
1502         .ct_owner               = THIS_MODULE,
1503 };
1504
1505 static struct config_group *nvmet_ana_groups_make_group(
1506                 struct config_group *group, const char *name)
1507 {
1508         struct nvmet_port *port = ana_groups_to_port(&group->cg_item);
1509         struct nvmet_ana_group *grp;
1510         u32 grpid;
1511         int ret;
1512
1513         ret = kstrtou32(name, 0, &grpid);
1514         if (ret)
1515                 goto out;
1516
1517         ret = -EINVAL;
1518         if (grpid <= 1 || grpid > NVMET_MAX_ANAGRPS)
1519                 goto out;
1520
1521         ret = -ENOMEM;
1522         grp = kzalloc(sizeof(*grp), GFP_KERNEL);
1523         if (!grp)
1524                 goto out;
1525         grp->port = port;
1526         grp->grpid = grpid;
1527
1528         down_write(&nvmet_ana_sem);
1529         nvmet_ana_group_enabled[grpid]++;
1530         up_write(&nvmet_ana_sem);
1531
1532         nvmet_port_send_ana_event(grp->port);
1533
1534         config_group_init_type_name(&grp->group, name, &nvmet_ana_group_type);
1535         return &grp->group;
1536 out:
1537         return ERR_PTR(ret);
1538 }
1539
1540 static struct configfs_group_operations nvmet_ana_groups_group_ops = {
1541         .make_group             = nvmet_ana_groups_make_group,
1542 };
1543
1544 static const struct config_item_type nvmet_ana_groups_type = {
1545         .ct_group_ops           = &nvmet_ana_groups_group_ops,
1546         .ct_owner               = THIS_MODULE,
1547 };
1548
1549 /*
1550  * Ports definitions.
1551  */
1552 static void nvmet_port_release(struct config_item *item)
1553 {
1554         struct nvmet_port *port = to_nvmet_port(item);
1555
1556         list_del(&port->global_entry);
1557
1558         kfree(port->ana_state);
1559         kfree(port);
1560 }
1561
1562 static struct configfs_attribute *nvmet_port_attrs[] = {
1563         &nvmet_attr_addr_adrfam,
1564         &nvmet_attr_addr_treq,
1565         &nvmet_attr_addr_traddr,
1566         &nvmet_attr_addr_trsvcid,
1567         &nvmet_attr_addr_trtype,
1568         &nvmet_attr_param_inline_data_size,
1569 #ifdef CONFIG_BLK_DEV_INTEGRITY
1570         &nvmet_attr_param_pi_enable,
1571 #endif
1572         NULL,
1573 };
1574
1575 static struct configfs_item_operations nvmet_port_item_ops = {
1576         .release                = nvmet_port_release,
1577 };
1578
1579 static const struct config_item_type nvmet_port_type = {
1580         .ct_attrs               = nvmet_port_attrs,
1581         .ct_item_ops            = &nvmet_port_item_ops,
1582         .ct_owner               = THIS_MODULE,
1583 };
1584
1585 static struct config_group *nvmet_ports_make(struct config_group *group,
1586                 const char *name)
1587 {
1588         struct nvmet_port *port;
1589         u16 portid;
1590         u32 i;
1591
1592         if (kstrtou16(name, 0, &portid))
1593                 return ERR_PTR(-EINVAL);
1594
1595         port = kzalloc(sizeof(*port), GFP_KERNEL);
1596         if (!port)
1597                 return ERR_PTR(-ENOMEM);
1598
1599         port->ana_state = kcalloc(NVMET_MAX_ANAGRPS + 1,
1600                         sizeof(*port->ana_state), GFP_KERNEL);
1601         if (!port->ana_state) {
1602                 kfree(port);
1603                 return ERR_PTR(-ENOMEM);
1604         }
1605
1606         for (i = 1; i <= NVMET_MAX_ANAGRPS; i++) {
1607                 if (i == NVMET_DEFAULT_ANA_GRPID)
1608                         port->ana_state[1] = NVME_ANA_OPTIMIZED;
1609                 else
1610                         port->ana_state[i] = NVME_ANA_INACCESSIBLE;
1611         }
1612
1613         list_add(&port->global_entry, &nvmet_ports_list);
1614
1615         INIT_LIST_HEAD(&port->entry);
1616         INIT_LIST_HEAD(&port->subsystems);
1617         INIT_LIST_HEAD(&port->referrals);
1618         port->inline_data_size = -1;    /* < 0 == let the transport choose */
1619
1620         port->disc_addr.portid = cpu_to_le16(portid);
1621         port->disc_addr.adrfam = NVMF_ADDR_FAMILY_MAX;
1622         port->disc_addr.treq = NVMF_TREQ_DISABLE_SQFLOW;
1623         config_group_init_type_name(&port->group, name, &nvmet_port_type);
1624
1625         config_group_init_type_name(&port->subsys_group,
1626                         "subsystems", &nvmet_port_subsys_type);
1627         configfs_add_default_group(&port->subsys_group, &port->group);
1628
1629         config_group_init_type_name(&port->referrals_group,
1630                         "referrals", &nvmet_referrals_type);
1631         configfs_add_default_group(&port->referrals_group, &port->group);
1632
1633         config_group_init_type_name(&port->ana_groups_group,
1634                         "ana_groups", &nvmet_ana_groups_type);
1635         configfs_add_default_group(&port->ana_groups_group, &port->group);
1636
1637         port->ana_default_group.port = port;
1638         port->ana_default_group.grpid = NVMET_DEFAULT_ANA_GRPID;
1639         config_group_init_type_name(&port->ana_default_group.group,
1640                         __stringify(NVMET_DEFAULT_ANA_GRPID),
1641                         &nvmet_ana_group_type);
1642         configfs_add_default_group(&port->ana_default_group.group,
1643                         &port->ana_groups_group);
1644
1645         return &port->group;
1646 }
1647
1648 static struct configfs_group_operations nvmet_ports_group_ops = {
1649         .make_group             = nvmet_ports_make,
1650 };
1651
1652 static const struct config_item_type nvmet_ports_type = {
1653         .ct_group_ops           = &nvmet_ports_group_ops,
1654         .ct_owner               = THIS_MODULE,
1655 };
1656
1657 static struct config_group nvmet_subsystems_group;
1658 static struct config_group nvmet_ports_group;
1659
1660 static void nvmet_host_release(struct config_item *item)
1661 {
1662         struct nvmet_host *host = to_host(item);
1663
1664         kfree(host);
1665 }
1666
1667 static struct configfs_item_operations nvmet_host_item_ops = {
1668         .release                = nvmet_host_release,
1669 };
1670
1671 static const struct config_item_type nvmet_host_type = {
1672         .ct_item_ops            = &nvmet_host_item_ops,
1673         .ct_owner               = THIS_MODULE,
1674 };
1675
1676 static struct config_group *nvmet_hosts_make_group(struct config_group *group,
1677                 const char *name)
1678 {
1679         struct nvmet_host *host;
1680
1681         host = kzalloc(sizeof(*host), GFP_KERNEL);
1682         if (!host)
1683                 return ERR_PTR(-ENOMEM);
1684
1685         config_group_init_type_name(&host->group, name, &nvmet_host_type);
1686
1687         return &host->group;
1688 }
1689
1690 static struct configfs_group_operations nvmet_hosts_group_ops = {
1691         .make_group             = nvmet_hosts_make_group,
1692 };
1693
1694 static const struct config_item_type nvmet_hosts_type = {
1695         .ct_group_ops           = &nvmet_hosts_group_ops,
1696         .ct_owner               = THIS_MODULE,
1697 };
1698
1699 static struct config_group nvmet_hosts_group;
1700
1701 static const struct config_item_type nvmet_root_type = {
1702         .ct_owner               = THIS_MODULE,
1703 };
1704
1705 static struct configfs_subsystem nvmet_configfs_subsystem = {
1706         .su_group = {
1707                 .cg_item = {
1708                         .ci_namebuf     = "nvmet",
1709                         .ci_type        = &nvmet_root_type,
1710                 },
1711         },
1712 };
1713
1714 int __init nvmet_init_configfs(void)
1715 {
1716         int ret;
1717
1718         config_group_init(&nvmet_configfs_subsystem.su_group);
1719         mutex_init(&nvmet_configfs_subsystem.su_mutex);
1720
1721         config_group_init_type_name(&nvmet_subsystems_group,
1722                         "subsystems", &nvmet_subsystems_type);
1723         configfs_add_default_group(&nvmet_subsystems_group,
1724                         &nvmet_configfs_subsystem.su_group);
1725
1726         config_group_init_type_name(&nvmet_ports_group,
1727                         "ports", &nvmet_ports_type);
1728         configfs_add_default_group(&nvmet_ports_group,
1729                         &nvmet_configfs_subsystem.su_group);
1730
1731         config_group_init_type_name(&nvmet_hosts_group,
1732                         "hosts", &nvmet_hosts_type);
1733         configfs_add_default_group(&nvmet_hosts_group,
1734                         &nvmet_configfs_subsystem.su_group);
1735
1736         ret = configfs_register_subsystem(&nvmet_configfs_subsystem);
1737         if (ret) {
1738                 pr_err("configfs_register_subsystem: %d\n", ret);
1739                 return ret;
1740         }
1741
1742         return 0;
1743 }
1744
1745 void __exit nvmet_exit_configfs(void)
1746 {
1747         configfs_unregister_subsystem(&nvmet_configfs_subsystem);
1748 }