drm/amd/display: reuse current context instead of recreating one
[linux-2.6-microblaze.git] / drivers / nvme / target / configfs.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Configfs interface for the NVMe target.
4  * Copyright (c) 2015-2016 HGST, a Western Digital Company.
5  */
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 #include <linux/kernel.h>
8 #include <linux/module.h>
9 #include <linux/slab.h>
10 #include <linux/stat.h>
11 #include <linux/ctype.h>
12 #include <linux/pci.h>
13 #include <linux/pci-p2pdma.h>
14
15 #include "nvmet.h"
16
17 static const struct config_item_type nvmet_host_type;
18 static const struct config_item_type nvmet_subsys_type;
19
20 static LIST_HEAD(nvmet_ports_list);
21 struct list_head *nvmet_ports = &nvmet_ports_list;
22
23 struct nvmet_type_name_map {
24         u8              type;
25         const char      *name;
26 };
27
28 static struct nvmet_type_name_map nvmet_transport[] = {
29         { NVMF_TRTYPE_RDMA,     "rdma" },
30         { NVMF_TRTYPE_FC,       "fc" },
31         { NVMF_TRTYPE_TCP,      "tcp" },
32         { NVMF_TRTYPE_LOOP,     "loop" },
33 };
34
35 static const struct nvmet_type_name_map nvmet_addr_family[] = {
36         { NVMF_ADDR_FAMILY_PCI,         "pcie" },
37         { NVMF_ADDR_FAMILY_IP4,         "ipv4" },
38         { NVMF_ADDR_FAMILY_IP6,         "ipv6" },
39         { NVMF_ADDR_FAMILY_IB,          "ib" },
40         { NVMF_ADDR_FAMILY_FC,          "fc" },
41         { NVMF_ADDR_FAMILY_LOOP,        "loop" },
42 };
43
44 static bool nvmet_is_port_enabled(struct nvmet_port *p, const char *caller)
45 {
46         if (p->enabled)
47                 pr_err("Disable port '%u' before changing attribute in %s\n",
48                                 le16_to_cpu(p->disc_addr.portid), caller);
49         return p->enabled;
50 }
51
52 /*
53  * nvmet_port Generic ConfigFS definitions.
54  * Used in any place in the ConfigFS tree that refers to an address.
55  */
56 static ssize_t nvmet_addr_adrfam_show(struct config_item *item, char *page)
57 {
58         u8 adrfam = to_nvmet_port(item)->disc_addr.adrfam;
59         int i;
60
61         for (i = 1; i < ARRAY_SIZE(nvmet_addr_family); i++) {
62                 if (nvmet_addr_family[i].type == adrfam)
63                         return sprintf(page, "%s\n", nvmet_addr_family[i].name);
64         }
65
66         return sprintf(page, "\n");
67 }
68
69 static ssize_t nvmet_addr_adrfam_store(struct config_item *item,
70                 const char *page, size_t count)
71 {
72         struct nvmet_port *port = to_nvmet_port(item);
73         int i;
74
75         if (nvmet_is_port_enabled(port, __func__))
76                 return -EACCES;
77
78         for (i = 1; i < ARRAY_SIZE(nvmet_addr_family); i++) {
79                 if (sysfs_streq(page, nvmet_addr_family[i].name))
80                         goto found;
81         }
82
83         pr_err("Invalid value '%s' for adrfam\n", page);
84         return -EINVAL;
85
86 found:
87         port->disc_addr.adrfam = nvmet_addr_family[i].type;
88         return count;
89 }
90
91 CONFIGFS_ATTR(nvmet_, addr_adrfam);
92
93 static ssize_t nvmet_addr_portid_show(struct config_item *item,
94                 char *page)
95 {
96         struct nvmet_port *port = to_nvmet_port(item);
97
98         return snprintf(page, PAGE_SIZE, "%d\n",
99                         le16_to_cpu(port->disc_addr.portid));
100 }
101
102 static ssize_t nvmet_addr_portid_store(struct config_item *item,
103                 const char *page, size_t count)
104 {
105         struct nvmet_port *port = to_nvmet_port(item);
106         u16 portid = 0;
107
108         if (kstrtou16(page, 0, &portid)) {
109                 pr_err("Invalid value '%s' for portid\n", page);
110                 return -EINVAL;
111         }
112
113         if (nvmet_is_port_enabled(port, __func__))
114                 return -EACCES;
115
116         port->disc_addr.portid = cpu_to_le16(portid);
117         return count;
118 }
119
120 CONFIGFS_ATTR(nvmet_, addr_portid);
121
122 static ssize_t nvmet_addr_traddr_show(struct config_item *item,
123                 char *page)
124 {
125         struct nvmet_port *port = to_nvmet_port(item);
126
127         return snprintf(page, PAGE_SIZE, "%s\n",
128                         port->disc_addr.traddr);
129 }
130
131 static ssize_t nvmet_addr_traddr_store(struct config_item *item,
132                 const char *page, size_t count)
133 {
134         struct nvmet_port *port = to_nvmet_port(item);
135
136         if (count > NVMF_TRADDR_SIZE) {
137                 pr_err("Invalid value '%s' for traddr\n", page);
138                 return -EINVAL;
139         }
140
141         if (nvmet_is_port_enabled(port, __func__))
142                 return -EACCES;
143
144         if (sscanf(page, "%s\n", port->disc_addr.traddr) != 1)
145                 return -EINVAL;
146         return count;
147 }
148
149 CONFIGFS_ATTR(nvmet_, addr_traddr);
150
151 static const struct nvmet_type_name_map nvmet_addr_treq[] = {
152         { NVMF_TREQ_NOT_SPECIFIED,      "not specified" },
153         { NVMF_TREQ_REQUIRED,           "required" },
154         { NVMF_TREQ_NOT_REQUIRED,       "not required" },
155 };
156
157 static ssize_t nvmet_addr_treq_show(struct config_item *item, char *page)
158 {
159         u8 treq = to_nvmet_port(item)->disc_addr.treq &
160                 NVME_TREQ_SECURE_CHANNEL_MASK;
161         int i;
162
163         for (i = 0; i < ARRAY_SIZE(nvmet_addr_treq); i++) {
164                 if (treq == nvmet_addr_treq[i].type)
165                         return sprintf(page, "%s\n", nvmet_addr_treq[i].name);
166         }
167
168         return sprintf(page, "\n");
169 }
170
171 static ssize_t nvmet_addr_treq_store(struct config_item *item,
172                 const char *page, size_t count)
173 {
174         struct nvmet_port *port = to_nvmet_port(item);
175         u8 treq = port->disc_addr.treq & ~NVME_TREQ_SECURE_CHANNEL_MASK;
176         int i;
177
178         if (nvmet_is_port_enabled(port, __func__))
179                 return -EACCES;
180
181         for (i = 0; i < ARRAY_SIZE(nvmet_addr_treq); i++) {
182                 if (sysfs_streq(page, nvmet_addr_treq[i].name))
183                         goto found;
184         }
185
186         pr_err("Invalid value '%s' for treq\n", page);
187         return -EINVAL;
188
189 found:
190         treq |= nvmet_addr_treq[i].type;
191         port->disc_addr.treq = treq;
192         return count;
193 }
194
195 CONFIGFS_ATTR(nvmet_, addr_treq);
196
197 static ssize_t nvmet_addr_trsvcid_show(struct config_item *item,
198                 char *page)
199 {
200         struct nvmet_port *port = to_nvmet_port(item);
201
202         return snprintf(page, PAGE_SIZE, "%s\n",
203                         port->disc_addr.trsvcid);
204 }
205
206 static ssize_t nvmet_addr_trsvcid_store(struct config_item *item,
207                 const char *page, size_t count)
208 {
209         struct nvmet_port *port = to_nvmet_port(item);
210
211         if (count > NVMF_TRSVCID_SIZE) {
212                 pr_err("Invalid value '%s' for trsvcid\n", page);
213                 return -EINVAL;
214         }
215         if (nvmet_is_port_enabled(port, __func__))
216                 return -EACCES;
217
218         if (sscanf(page, "%s\n", port->disc_addr.trsvcid) != 1)
219                 return -EINVAL;
220         return count;
221 }
222
223 CONFIGFS_ATTR(nvmet_, addr_trsvcid);
224
225 static ssize_t nvmet_param_inline_data_size_show(struct config_item *item,
226                 char *page)
227 {
228         struct nvmet_port *port = to_nvmet_port(item);
229
230         return snprintf(page, PAGE_SIZE, "%d\n", port->inline_data_size);
231 }
232
233 static ssize_t nvmet_param_inline_data_size_store(struct config_item *item,
234                 const char *page, size_t count)
235 {
236         struct nvmet_port *port = to_nvmet_port(item);
237         int ret;
238
239         if (nvmet_is_port_enabled(port, __func__))
240                 return -EACCES;
241         ret = kstrtoint(page, 0, &port->inline_data_size);
242         if (ret) {
243                 pr_err("Invalid value '%s' for inline_data_size\n", page);
244                 return -EINVAL;
245         }
246         return count;
247 }
248
249 CONFIGFS_ATTR(nvmet_, param_inline_data_size);
250
251 #ifdef CONFIG_BLK_DEV_INTEGRITY
252 static ssize_t nvmet_param_pi_enable_show(struct config_item *item,
253                 char *page)
254 {
255         struct nvmet_port *port = to_nvmet_port(item);
256
257         return snprintf(page, PAGE_SIZE, "%d\n", port->pi_enable);
258 }
259
260 static ssize_t nvmet_param_pi_enable_store(struct config_item *item,
261                 const char *page, size_t count)
262 {
263         struct nvmet_port *port = to_nvmet_port(item);
264         bool val;
265
266         if (strtobool(page, &val))
267                 return -EINVAL;
268
269         if (port->enabled) {
270                 pr_err("Disable port before setting pi_enable value.\n");
271                 return -EACCES;
272         }
273
274         port->pi_enable = val;
275         return count;
276 }
277
278 CONFIGFS_ATTR(nvmet_, param_pi_enable);
279 #endif
280
281 static ssize_t nvmet_addr_trtype_show(struct config_item *item,
282                 char *page)
283 {
284         struct nvmet_port *port = to_nvmet_port(item);
285         int i;
286
287         for (i = 0; i < ARRAY_SIZE(nvmet_transport); i++) {
288                 if (port->disc_addr.trtype == nvmet_transport[i].type)
289                         return sprintf(page, "%s\n", nvmet_transport[i].name);
290         }
291
292         return sprintf(page, "\n");
293 }
294
295 static void nvmet_port_init_tsas_rdma(struct nvmet_port *port)
296 {
297         port->disc_addr.tsas.rdma.qptype = NVMF_RDMA_QPTYPE_CONNECTED;
298         port->disc_addr.tsas.rdma.prtype = NVMF_RDMA_PRTYPE_NOT_SPECIFIED;
299         port->disc_addr.tsas.rdma.cms = NVMF_RDMA_CMS_RDMA_CM;
300 }
301
302 static ssize_t nvmet_addr_trtype_store(struct config_item *item,
303                 const char *page, size_t count)
304 {
305         struct nvmet_port *port = to_nvmet_port(item);
306         int i;
307
308         if (nvmet_is_port_enabled(port, __func__))
309                 return -EACCES;
310
311         for (i = 0; i < ARRAY_SIZE(nvmet_transport); i++) {
312                 if (sysfs_streq(page, nvmet_transport[i].name))
313                         goto found;
314         }
315
316         pr_err("Invalid value '%s' for trtype\n", page);
317         return -EINVAL;
318
319 found:
320         memset(&port->disc_addr.tsas, 0, NVMF_TSAS_SIZE);
321         port->disc_addr.trtype = nvmet_transport[i].type;
322         if (port->disc_addr.trtype == NVMF_TRTYPE_RDMA)
323                 nvmet_port_init_tsas_rdma(port);
324         return count;
325 }
326
327 CONFIGFS_ATTR(nvmet_, addr_trtype);
328
329 /*
330  * Namespace structures & file operation functions below
331  */
332 static ssize_t nvmet_ns_device_path_show(struct config_item *item, char *page)
333 {
334         return sprintf(page, "%s\n", to_nvmet_ns(item)->device_path);
335 }
336
337 static ssize_t nvmet_ns_device_path_store(struct config_item *item,
338                 const char *page, size_t count)
339 {
340         struct nvmet_ns *ns = to_nvmet_ns(item);
341         struct nvmet_subsys *subsys = ns->subsys;
342         size_t len;
343         int ret;
344
345         mutex_lock(&subsys->lock);
346         ret = -EBUSY;
347         if (ns->enabled)
348                 goto out_unlock;
349
350         ret = -EINVAL;
351         len = strcspn(page, "\n");
352         if (!len)
353                 goto out_unlock;
354
355         kfree(ns->device_path);
356         ret = -ENOMEM;
357         ns->device_path = kmemdup_nul(page, len, GFP_KERNEL);
358         if (!ns->device_path)
359                 goto out_unlock;
360
361         mutex_unlock(&subsys->lock);
362         return count;
363
364 out_unlock:
365         mutex_unlock(&subsys->lock);
366         return ret;
367 }
368
369 CONFIGFS_ATTR(nvmet_ns_, device_path);
370
371 #ifdef CONFIG_PCI_P2PDMA
372 static ssize_t nvmet_ns_p2pmem_show(struct config_item *item, char *page)
373 {
374         struct nvmet_ns *ns = to_nvmet_ns(item);
375
376         return pci_p2pdma_enable_show(page, ns->p2p_dev, ns->use_p2pmem);
377 }
378
379 static ssize_t nvmet_ns_p2pmem_store(struct config_item *item,
380                 const char *page, size_t count)
381 {
382         struct nvmet_ns *ns = to_nvmet_ns(item);
383         struct pci_dev *p2p_dev = NULL;
384         bool use_p2pmem;
385         int ret = count;
386         int error;
387
388         mutex_lock(&ns->subsys->lock);
389         if (ns->enabled) {
390                 ret = -EBUSY;
391                 goto out_unlock;
392         }
393
394         error = pci_p2pdma_enable_store(page, &p2p_dev, &use_p2pmem);
395         if (error) {
396                 ret = error;
397                 goto out_unlock;
398         }
399
400         ns->use_p2pmem = use_p2pmem;
401         pci_dev_put(ns->p2p_dev);
402         ns->p2p_dev = p2p_dev;
403
404 out_unlock:
405         mutex_unlock(&ns->subsys->lock);
406
407         return ret;
408 }
409
410 CONFIGFS_ATTR(nvmet_ns_, p2pmem);
411 #endif /* CONFIG_PCI_P2PDMA */
412
413 static ssize_t nvmet_ns_device_uuid_show(struct config_item *item, char *page)
414 {
415         return sprintf(page, "%pUb\n", &to_nvmet_ns(item)->uuid);
416 }
417
418 static ssize_t nvmet_ns_device_uuid_store(struct config_item *item,
419                                           const char *page, size_t count)
420 {
421         struct nvmet_ns *ns = to_nvmet_ns(item);
422         struct nvmet_subsys *subsys = ns->subsys;
423         int ret = 0;
424
425         mutex_lock(&subsys->lock);
426         if (ns->enabled) {
427                 ret = -EBUSY;
428                 goto out_unlock;
429         }
430
431         if (uuid_parse(page, &ns->uuid))
432                 ret = -EINVAL;
433
434 out_unlock:
435         mutex_unlock(&subsys->lock);
436         return ret ? ret : count;
437 }
438
439 CONFIGFS_ATTR(nvmet_ns_, device_uuid);
440
441 static ssize_t nvmet_ns_device_nguid_show(struct config_item *item, char *page)
442 {
443         return sprintf(page, "%pUb\n", &to_nvmet_ns(item)->nguid);
444 }
445
446 static ssize_t nvmet_ns_device_nguid_store(struct config_item *item,
447                 const char *page, size_t count)
448 {
449         struct nvmet_ns *ns = to_nvmet_ns(item);
450         struct nvmet_subsys *subsys = ns->subsys;
451         u8 nguid[16];
452         const char *p = page;
453         int i;
454         int ret = 0;
455
456         mutex_lock(&subsys->lock);
457         if (ns->enabled) {
458                 ret = -EBUSY;
459                 goto out_unlock;
460         }
461
462         for (i = 0; i < 16; i++) {
463                 if (p + 2 > page + count) {
464                         ret = -EINVAL;
465                         goto out_unlock;
466                 }
467                 if (!isxdigit(p[0]) || !isxdigit(p[1])) {
468                         ret = -EINVAL;
469                         goto out_unlock;
470                 }
471
472                 nguid[i] = (hex_to_bin(p[0]) << 4) | hex_to_bin(p[1]);
473                 p += 2;
474
475                 if (*p == '-' || *p == ':')
476                         p++;
477         }
478
479         memcpy(&ns->nguid, nguid, sizeof(nguid));
480 out_unlock:
481         mutex_unlock(&subsys->lock);
482         return ret ? ret : count;
483 }
484
485 CONFIGFS_ATTR(nvmet_ns_, device_nguid);
486
487 static ssize_t nvmet_ns_ana_grpid_show(struct config_item *item, char *page)
488 {
489         return sprintf(page, "%u\n", to_nvmet_ns(item)->anagrpid);
490 }
491
492 static ssize_t nvmet_ns_ana_grpid_store(struct config_item *item,
493                 const char *page, size_t count)
494 {
495         struct nvmet_ns *ns = to_nvmet_ns(item);
496         u32 oldgrpid, newgrpid;
497         int ret;
498
499         ret = kstrtou32(page, 0, &newgrpid);
500         if (ret)
501                 return ret;
502
503         if (newgrpid < 1 || newgrpid > NVMET_MAX_ANAGRPS)
504                 return -EINVAL;
505
506         down_write(&nvmet_ana_sem);
507         oldgrpid = ns->anagrpid;
508         nvmet_ana_group_enabled[newgrpid]++;
509         ns->anagrpid = newgrpid;
510         nvmet_ana_group_enabled[oldgrpid]--;
511         nvmet_ana_chgcnt++;
512         up_write(&nvmet_ana_sem);
513
514         nvmet_send_ana_event(ns->subsys, NULL);
515         return count;
516 }
517
518 CONFIGFS_ATTR(nvmet_ns_, ana_grpid);
519
520 static ssize_t nvmet_ns_enable_show(struct config_item *item, char *page)
521 {
522         return sprintf(page, "%d\n", to_nvmet_ns(item)->enabled);
523 }
524
525 static ssize_t nvmet_ns_enable_store(struct config_item *item,
526                 const char *page, size_t count)
527 {
528         struct nvmet_ns *ns = to_nvmet_ns(item);
529         bool enable;
530         int ret = 0;
531
532         if (strtobool(page, &enable))
533                 return -EINVAL;
534
535         if (enable)
536                 ret = nvmet_ns_enable(ns);
537         else
538                 nvmet_ns_disable(ns);
539
540         return ret ? ret : count;
541 }
542
543 CONFIGFS_ATTR(nvmet_ns_, enable);
544
545 static ssize_t nvmet_ns_buffered_io_show(struct config_item *item, char *page)
546 {
547         return sprintf(page, "%d\n", to_nvmet_ns(item)->buffered_io);
548 }
549
550 static ssize_t nvmet_ns_buffered_io_store(struct config_item *item,
551                 const char *page, size_t count)
552 {
553         struct nvmet_ns *ns = to_nvmet_ns(item);
554         bool val;
555
556         if (strtobool(page, &val))
557                 return -EINVAL;
558
559         mutex_lock(&ns->subsys->lock);
560         if (ns->enabled) {
561                 pr_err("disable ns before setting buffered_io value.\n");
562                 mutex_unlock(&ns->subsys->lock);
563                 return -EINVAL;
564         }
565
566         ns->buffered_io = val;
567         mutex_unlock(&ns->subsys->lock);
568         return count;
569 }
570
571 CONFIGFS_ATTR(nvmet_ns_, buffered_io);
572
573 static ssize_t nvmet_ns_revalidate_size_store(struct config_item *item,
574                 const char *page, size_t count)
575 {
576         struct nvmet_ns *ns = to_nvmet_ns(item);
577         bool val;
578
579         if (strtobool(page, &val))
580                 return -EINVAL;
581
582         if (!val)
583                 return -EINVAL;
584
585         mutex_lock(&ns->subsys->lock);
586         if (!ns->enabled) {
587                 pr_err("enable ns before revalidate.\n");
588                 mutex_unlock(&ns->subsys->lock);
589                 return -EINVAL;
590         }
591         nvmet_ns_revalidate(ns);
592         mutex_unlock(&ns->subsys->lock);
593         return count;
594 }
595
596 CONFIGFS_ATTR_WO(nvmet_ns_, revalidate_size);
597
598 static struct configfs_attribute *nvmet_ns_attrs[] = {
599         &nvmet_ns_attr_device_path,
600         &nvmet_ns_attr_device_nguid,
601         &nvmet_ns_attr_device_uuid,
602         &nvmet_ns_attr_ana_grpid,
603         &nvmet_ns_attr_enable,
604         &nvmet_ns_attr_buffered_io,
605         &nvmet_ns_attr_revalidate_size,
606 #ifdef CONFIG_PCI_P2PDMA
607         &nvmet_ns_attr_p2pmem,
608 #endif
609         NULL,
610 };
611
612 static void nvmet_ns_release(struct config_item *item)
613 {
614         struct nvmet_ns *ns = to_nvmet_ns(item);
615
616         nvmet_ns_free(ns);
617 }
618
619 static struct configfs_item_operations nvmet_ns_item_ops = {
620         .release                = nvmet_ns_release,
621 };
622
623 static const struct config_item_type nvmet_ns_type = {
624         .ct_item_ops            = &nvmet_ns_item_ops,
625         .ct_attrs               = nvmet_ns_attrs,
626         .ct_owner               = THIS_MODULE,
627 };
628
629 static struct config_group *nvmet_ns_make(struct config_group *group,
630                 const char *name)
631 {
632         struct nvmet_subsys *subsys = namespaces_to_subsys(&group->cg_item);
633         struct nvmet_ns *ns;
634         int ret;
635         u32 nsid;
636
637         ret = kstrtou32(name, 0, &nsid);
638         if (ret)
639                 goto out;
640
641         ret = -EINVAL;
642         if (nsid == 0 || nsid == NVME_NSID_ALL) {
643                 pr_err("invalid nsid %#x", nsid);
644                 goto out;
645         }
646
647         ret = -ENOMEM;
648         ns = nvmet_ns_alloc(subsys, nsid);
649         if (!ns)
650                 goto out;
651         config_group_init_type_name(&ns->group, name, &nvmet_ns_type);
652
653         pr_info("adding nsid %d to subsystem %s\n", nsid, subsys->subsysnqn);
654
655         return &ns->group;
656 out:
657         return ERR_PTR(ret);
658 }
659
660 static struct configfs_group_operations nvmet_namespaces_group_ops = {
661         .make_group             = nvmet_ns_make,
662 };
663
664 static const struct config_item_type nvmet_namespaces_type = {
665         .ct_group_ops           = &nvmet_namespaces_group_ops,
666         .ct_owner               = THIS_MODULE,
667 };
668
669 #ifdef CONFIG_NVME_TARGET_PASSTHRU
670
671 static ssize_t nvmet_passthru_device_path_show(struct config_item *item,
672                 char *page)
673 {
674         struct nvmet_subsys *subsys = to_subsys(item->ci_parent);
675
676         return snprintf(page, PAGE_SIZE, "%s\n", subsys->passthru_ctrl_path);
677 }
678
679 static ssize_t nvmet_passthru_device_path_store(struct config_item *item,
680                 const char *page, size_t count)
681 {
682         struct nvmet_subsys *subsys = to_subsys(item->ci_parent);
683         size_t len;
684         int ret;
685
686         mutex_lock(&subsys->lock);
687
688         ret = -EBUSY;
689         if (subsys->passthru_ctrl)
690                 goto out_unlock;
691
692         ret = -EINVAL;
693         len = strcspn(page, "\n");
694         if (!len)
695                 goto out_unlock;
696
697         kfree(subsys->passthru_ctrl_path);
698         ret = -ENOMEM;
699         subsys->passthru_ctrl_path = kstrndup(page, len, GFP_KERNEL);
700         if (!subsys->passthru_ctrl_path)
701                 goto out_unlock;
702
703         mutex_unlock(&subsys->lock);
704
705         return count;
706 out_unlock:
707         mutex_unlock(&subsys->lock);
708         return ret;
709 }
710 CONFIGFS_ATTR(nvmet_passthru_, device_path);
711
712 static ssize_t nvmet_passthru_enable_show(struct config_item *item,
713                 char *page)
714 {
715         struct nvmet_subsys *subsys = to_subsys(item->ci_parent);
716
717         return sprintf(page, "%d\n", subsys->passthru_ctrl ? 1 : 0);
718 }
719
720 static ssize_t nvmet_passthru_enable_store(struct config_item *item,
721                 const char *page, size_t count)
722 {
723         struct nvmet_subsys *subsys = to_subsys(item->ci_parent);
724         bool enable;
725         int ret = 0;
726
727         if (strtobool(page, &enable))
728                 return -EINVAL;
729
730         if (enable)
731                 ret = nvmet_passthru_ctrl_enable(subsys);
732         else
733                 nvmet_passthru_ctrl_disable(subsys);
734
735         return ret ? ret : count;
736 }
737 CONFIGFS_ATTR(nvmet_passthru_, enable);
738
739 static ssize_t nvmet_passthru_admin_timeout_show(struct config_item *item,
740                 char *page)
741 {
742         return sprintf(page, "%u\n", to_subsys(item->ci_parent)->admin_timeout);
743 }
744
745 static ssize_t nvmet_passthru_admin_timeout_store(struct config_item *item,
746                 const char *page, size_t count)
747 {
748         struct nvmet_subsys *subsys = to_subsys(item->ci_parent);
749         unsigned int timeout;
750
751         if (kstrtouint(page, 0, &timeout))
752                 return -EINVAL;
753         subsys->admin_timeout = timeout;
754         return count;
755 }
756 CONFIGFS_ATTR(nvmet_passthru_, admin_timeout);
757
758 static ssize_t nvmet_passthru_io_timeout_show(struct config_item *item,
759                 char *page)
760 {
761         return sprintf(page, "%u\n", to_subsys(item->ci_parent)->io_timeout);
762 }
763
764 static ssize_t nvmet_passthru_io_timeout_store(struct config_item *item,
765                 const char *page, size_t count)
766 {
767         struct nvmet_subsys *subsys = to_subsys(item->ci_parent);
768         unsigned int timeout;
769
770         if (kstrtouint(page, 0, &timeout))
771                 return -EINVAL;
772         subsys->io_timeout = timeout;
773         return count;
774 }
775 CONFIGFS_ATTR(nvmet_passthru_, io_timeout);
776
777 static struct configfs_attribute *nvmet_passthru_attrs[] = {
778         &nvmet_passthru_attr_device_path,
779         &nvmet_passthru_attr_enable,
780         &nvmet_passthru_attr_admin_timeout,
781         &nvmet_passthru_attr_io_timeout,
782         NULL,
783 };
784
785 static const struct config_item_type nvmet_passthru_type = {
786         .ct_attrs               = nvmet_passthru_attrs,
787         .ct_owner               = THIS_MODULE,
788 };
789
790 static void nvmet_add_passthru_group(struct nvmet_subsys *subsys)
791 {
792         config_group_init_type_name(&subsys->passthru_group,
793                                     "passthru", &nvmet_passthru_type);
794         configfs_add_default_group(&subsys->passthru_group,
795                                    &subsys->group);
796 }
797
798 #else /* CONFIG_NVME_TARGET_PASSTHRU */
799
800 static void nvmet_add_passthru_group(struct nvmet_subsys *subsys)
801 {
802 }
803
804 #endif /* CONFIG_NVME_TARGET_PASSTHRU */
805
806 static int nvmet_port_subsys_allow_link(struct config_item *parent,
807                 struct config_item *target)
808 {
809         struct nvmet_port *port = to_nvmet_port(parent->ci_parent);
810         struct nvmet_subsys *subsys;
811         struct nvmet_subsys_link *link, *p;
812         int ret;
813
814         if (target->ci_type != &nvmet_subsys_type) {
815                 pr_err("can only link subsystems into the subsystems dir.!\n");
816                 return -EINVAL;
817         }
818         subsys = to_subsys(target);
819         link = kmalloc(sizeof(*link), GFP_KERNEL);
820         if (!link)
821                 return -ENOMEM;
822         link->subsys = subsys;
823
824         down_write(&nvmet_config_sem);
825         ret = -EEXIST;
826         list_for_each_entry(p, &port->subsystems, entry) {
827                 if (p->subsys == subsys)
828                         goto out_free_link;
829         }
830
831         if (list_empty(&port->subsystems)) {
832                 ret = nvmet_enable_port(port);
833                 if (ret)
834                         goto out_free_link;
835         }
836
837         list_add_tail(&link->entry, &port->subsystems);
838         nvmet_port_disc_changed(port, subsys);
839
840         up_write(&nvmet_config_sem);
841         return 0;
842
843 out_free_link:
844         up_write(&nvmet_config_sem);
845         kfree(link);
846         return ret;
847 }
848
849 static void nvmet_port_subsys_drop_link(struct config_item *parent,
850                 struct config_item *target)
851 {
852         struct nvmet_port *port = to_nvmet_port(parent->ci_parent);
853         struct nvmet_subsys *subsys = to_subsys(target);
854         struct nvmet_subsys_link *p;
855
856         down_write(&nvmet_config_sem);
857         list_for_each_entry(p, &port->subsystems, entry) {
858                 if (p->subsys == subsys)
859                         goto found;
860         }
861         up_write(&nvmet_config_sem);
862         return;
863
864 found:
865         list_del(&p->entry);
866         nvmet_port_del_ctrls(port, subsys);
867         nvmet_port_disc_changed(port, subsys);
868
869         if (list_empty(&port->subsystems))
870                 nvmet_disable_port(port);
871         up_write(&nvmet_config_sem);
872         kfree(p);
873 }
874
875 static struct configfs_item_operations nvmet_port_subsys_item_ops = {
876         .allow_link             = nvmet_port_subsys_allow_link,
877         .drop_link              = nvmet_port_subsys_drop_link,
878 };
879
880 static const struct config_item_type nvmet_port_subsys_type = {
881         .ct_item_ops            = &nvmet_port_subsys_item_ops,
882         .ct_owner               = THIS_MODULE,
883 };
884
885 static int nvmet_allowed_hosts_allow_link(struct config_item *parent,
886                 struct config_item *target)
887 {
888         struct nvmet_subsys *subsys = to_subsys(parent->ci_parent);
889         struct nvmet_host *host;
890         struct nvmet_host_link *link, *p;
891         int ret;
892
893         if (target->ci_type != &nvmet_host_type) {
894                 pr_err("can only link hosts into the allowed_hosts directory!\n");
895                 return -EINVAL;
896         }
897
898         host = to_host(target);
899         link = kmalloc(sizeof(*link), GFP_KERNEL);
900         if (!link)
901                 return -ENOMEM;
902         link->host = host;
903
904         down_write(&nvmet_config_sem);
905         ret = -EINVAL;
906         if (subsys->allow_any_host) {
907                 pr_err("can't add hosts when allow_any_host is set!\n");
908                 goto out_free_link;
909         }
910
911         ret = -EEXIST;
912         list_for_each_entry(p, &subsys->hosts, entry) {
913                 if (!strcmp(nvmet_host_name(p->host), nvmet_host_name(host)))
914                         goto out_free_link;
915         }
916         list_add_tail(&link->entry, &subsys->hosts);
917         nvmet_subsys_disc_changed(subsys, host);
918
919         up_write(&nvmet_config_sem);
920         return 0;
921 out_free_link:
922         up_write(&nvmet_config_sem);
923         kfree(link);
924         return ret;
925 }
926
927 static void nvmet_allowed_hosts_drop_link(struct config_item *parent,
928                 struct config_item *target)
929 {
930         struct nvmet_subsys *subsys = to_subsys(parent->ci_parent);
931         struct nvmet_host *host = to_host(target);
932         struct nvmet_host_link *p;
933
934         down_write(&nvmet_config_sem);
935         list_for_each_entry(p, &subsys->hosts, entry) {
936                 if (!strcmp(nvmet_host_name(p->host), nvmet_host_name(host)))
937                         goto found;
938         }
939         up_write(&nvmet_config_sem);
940         return;
941
942 found:
943         list_del(&p->entry);
944         nvmet_subsys_disc_changed(subsys, host);
945
946         up_write(&nvmet_config_sem);
947         kfree(p);
948 }
949
950 static struct configfs_item_operations nvmet_allowed_hosts_item_ops = {
951         .allow_link             = nvmet_allowed_hosts_allow_link,
952         .drop_link              = nvmet_allowed_hosts_drop_link,
953 };
954
955 static const struct config_item_type nvmet_allowed_hosts_type = {
956         .ct_item_ops            = &nvmet_allowed_hosts_item_ops,
957         .ct_owner               = THIS_MODULE,
958 };
959
960 static ssize_t nvmet_subsys_attr_allow_any_host_show(struct config_item *item,
961                 char *page)
962 {
963         return snprintf(page, PAGE_SIZE, "%d\n",
964                 to_subsys(item)->allow_any_host);
965 }
966
967 static ssize_t nvmet_subsys_attr_allow_any_host_store(struct config_item *item,
968                 const char *page, size_t count)
969 {
970         struct nvmet_subsys *subsys = to_subsys(item);
971         bool allow_any_host;
972         int ret = 0;
973
974         if (strtobool(page, &allow_any_host))
975                 return -EINVAL;
976
977         down_write(&nvmet_config_sem);
978         if (allow_any_host && !list_empty(&subsys->hosts)) {
979                 pr_err("Can't set allow_any_host when explicit hosts are set!\n");
980                 ret = -EINVAL;
981                 goto out_unlock;
982         }
983
984         if (subsys->allow_any_host != allow_any_host) {
985                 subsys->allow_any_host = allow_any_host;
986                 nvmet_subsys_disc_changed(subsys, NULL);
987         }
988
989 out_unlock:
990         up_write(&nvmet_config_sem);
991         return ret ? ret : count;
992 }
993
994 CONFIGFS_ATTR(nvmet_subsys_, attr_allow_any_host);
995
996 static ssize_t nvmet_subsys_attr_version_show(struct config_item *item,
997                                               char *page)
998 {
999         struct nvmet_subsys *subsys = to_subsys(item);
1000
1001         if (NVME_TERTIARY(subsys->ver))
1002                 return snprintf(page, PAGE_SIZE, "%llu.%llu.%llu\n",
1003                                 NVME_MAJOR(subsys->ver),
1004                                 NVME_MINOR(subsys->ver),
1005                                 NVME_TERTIARY(subsys->ver));
1006
1007         return snprintf(page, PAGE_SIZE, "%llu.%llu\n",
1008                         NVME_MAJOR(subsys->ver),
1009                         NVME_MINOR(subsys->ver));
1010 }
1011
1012 static ssize_t nvmet_subsys_attr_version_store(struct config_item *item,
1013                                                const char *page, size_t count)
1014 {
1015         struct nvmet_subsys *subsys = to_subsys(item);
1016         int major, minor, tertiary = 0;
1017         int ret;
1018
1019         /* passthru subsystems use the underlying controller's version */
1020         if (nvmet_passthru_ctrl(subsys))
1021                 return -EINVAL;
1022
1023         ret = sscanf(page, "%d.%d.%d\n", &major, &minor, &tertiary);
1024         if (ret != 2 && ret != 3)
1025                 return -EINVAL;
1026
1027         down_write(&nvmet_config_sem);
1028         subsys->ver = NVME_VS(major, minor, tertiary);
1029         up_write(&nvmet_config_sem);
1030
1031         return count;
1032 }
1033 CONFIGFS_ATTR(nvmet_subsys_, attr_version);
1034
1035 static ssize_t nvmet_subsys_attr_serial_show(struct config_item *item,
1036                                              char *page)
1037 {
1038         struct nvmet_subsys *subsys = to_subsys(item);
1039
1040         return snprintf(page, PAGE_SIZE, "%llx\n", subsys->serial);
1041 }
1042
1043 static ssize_t nvmet_subsys_attr_serial_store(struct config_item *item,
1044                                               const char *page, size_t count)
1045 {
1046         u64 serial;
1047
1048         if (sscanf(page, "%llx\n", &serial) != 1)
1049                 return -EINVAL;
1050
1051         down_write(&nvmet_config_sem);
1052         to_subsys(item)->serial = serial;
1053         up_write(&nvmet_config_sem);
1054
1055         return count;
1056 }
1057 CONFIGFS_ATTR(nvmet_subsys_, attr_serial);
1058
1059 static ssize_t nvmet_subsys_attr_cntlid_min_show(struct config_item *item,
1060                                                  char *page)
1061 {
1062         return snprintf(page, PAGE_SIZE, "%u\n", to_subsys(item)->cntlid_min);
1063 }
1064
1065 static ssize_t nvmet_subsys_attr_cntlid_min_store(struct config_item *item,
1066                                                   const char *page, size_t cnt)
1067 {
1068         u16 cntlid_min;
1069
1070         if (sscanf(page, "%hu\n", &cntlid_min) != 1)
1071                 return -EINVAL;
1072
1073         if (cntlid_min == 0)
1074                 return -EINVAL;
1075
1076         down_write(&nvmet_config_sem);
1077         if (cntlid_min >= to_subsys(item)->cntlid_max)
1078                 goto out_unlock;
1079         to_subsys(item)->cntlid_min = cntlid_min;
1080         up_write(&nvmet_config_sem);
1081         return cnt;
1082
1083 out_unlock:
1084         up_write(&nvmet_config_sem);
1085         return -EINVAL;
1086 }
1087 CONFIGFS_ATTR(nvmet_subsys_, attr_cntlid_min);
1088
1089 static ssize_t nvmet_subsys_attr_cntlid_max_show(struct config_item *item,
1090                                                  char *page)
1091 {
1092         return snprintf(page, PAGE_SIZE, "%u\n", to_subsys(item)->cntlid_max);
1093 }
1094
1095 static ssize_t nvmet_subsys_attr_cntlid_max_store(struct config_item *item,
1096                                                   const char *page, size_t cnt)
1097 {
1098         u16 cntlid_max;
1099
1100         if (sscanf(page, "%hu\n", &cntlid_max) != 1)
1101                 return -EINVAL;
1102
1103         if (cntlid_max == 0)
1104                 return -EINVAL;
1105
1106         down_write(&nvmet_config_sem);
1107         if (cntlid_max <= to_subsys(item)->cntlid_min)
1108                 goto out_unlock;
1109         to_subsys(item)->cntlid_max = cntlid_max;
1110         up_write(&nvmet_config_sem);
1111         return cnt;
1112
1113 out_unlock:
1114         up_write(&nvmet_config_sem);
1115         return -EINVAL;
1116 }
1117 CONFIGFS_ATTR(nvmet_subsys_, attr_cntlid_max);
1118
1119 static ssize_t nvmet_subsys_attr_model_show(struct config_item *item,
1120                                             char *page)
1121 {
1122         struct nvmet_subsys *subsys = to_subsys(item);
1123         struct nvmet_subsys_model *subsys_model;
1124         char *model = NVMET_DEFAULT_CTRL_MODEL;
1125         int ret;
1126
1127         rcu_read_lock();
1128         subsys_model = rcu_dereference(subsys->model);
1129         if (subsys_model)
1130                 model = subsys_model->number;
1131         ret = snprintf(page, PAGE_SIZE, "%s\n", model);
1132         rcu_read_unlock();
1133
1134         return ret;
1135 }
1136
1137 /* See Section 1.5 of NVMe 1.4 */
1138 static bool nvmet_is_ascii(const char c)
1139 {
1140         return c >= 0x20 && c <= 0x7e;
1141 }
1142
1143 static ssize_t nvmet_subsys_attr_model_store(struct config_item *item,
1144                                              const char *page, size_t count)
1145 {
1146         struct nvmet_subsys *subsys = to_subsys(item);
1147         struct nvmet_subsys_model *new_model;
1148         char *new_model_number;
1149         int pos = 0, len;
1150
1151         len = strcspn(page, "\n");
1152         if (!len)
1153                 return -EINVAL;
1154
1155         for (pos = 0; pos < len; pos++) {
1156                 if (!nvmet_is_ascii(page[pos]))
1157                         return -EINVAL;
1158         }
1159
1160         new_model_number = kmemdup_nul(page, len, GFP_KERNEL);
1161         if (!new_model_number)
1162                 return -ENOMEM;
1163
1164         new_model = kzalloc(sizeof(*new_model) + len + 1, GFP_KERNEL);
1165         if (!new_model) {
1166                 kfree(new_model_number);
1167                 return -ENOMEM;
1168         }
1169         memcpy(new_model->number, new_model_number, len);
1170
1171         down_write(&nvmet_config_sem);
1172         mutex_lock(&subsys->lock);
1173         new_model = rcu_replace_pointer(subsys->model, new_model,
1174                                         mutex_is_locked(&subsys->lock));
1175         mutex_unlock(&subsys->lock);
1176         up_write(&nvmet_config_sem);
1177
1178         kfree_rcu(new_model, rcuhead);
1179         kfree(new_model_number);
1180
1181         return count;
1182 }
1183 CONFIGFS_ATTR(nvmet_subsys_, attr_model);
1184
1185 #ifdef CONFIG_BLK_DEV_INTEGRITY
1186 static ssize_t nvmet_subsys_attr_pi_enable_show(struct config_item *item,
1187                                                 char *page)
1188 {
1189         return snprintf(page, PAGE_SIZE, "%d\n", to_subsys(item)->pi_support);
1190 }
1191
1192 static ssize_t nvmet_subsys_attr_pi_enable_store(struct config_item *item,
1193                                                  const char *page, size_t count)
1194 {
1195         struct nvmet_subsys *subsys = to_subsys(item);
1196         bool pi_enable;
1197
1198         if (strtobool(page, &pi_enable))
1199                 return -EINVAL;
1200
1201         subsys->pi_support = pi_enable;
1202         return count;
1203 }
1204 CONFIGFS_ATTR(nvmet_subsys_, attr_pi_enable);
1205 #endif
1206
1207 static struct configfs_attribute *nvmet_subsys_attrs[] = {
1208         &nvmet_subsys_attr_attr_allow_any_host,
1209         &nvmet_subsys_attr_attr_version,
1210         &nvmet_subsys_attr_attr_serial,
1211         &nvmet_subsys_attr_attr_cntlid_min,
1212         &nvmet_subsys_attr_attr_cntlid_max,
1213         &nvmet_subsys_attr_attr_model,
1214 #ifdef CONFIG_BLK_DEV_INTEGRITY
1215         &nvmet_subsys_attr_attr_pi_enable,
1216 #endif
1217         NULL,
1218 };
1219
1220 /*
1221  * Subsystem structures & folder operation functions below
1222  */
1223 static void nvmet_subsys_release(struct config_item *item)
1224 {
1225         struct nvmet_subsys *subsys = to_subsys(item);
1226
1227         nvmet_subsys_del_ctrls(subsys);
1228         nvmet_subsys_put(subsys);
1229 }
1230
1231 static struct configfs_item_operations nvmet_subsys_item_ops = {
1232         .release                = nvmet_subsys_release,
1233 };
1234
1235 static const struct config_item_type nvmet_subsys_type = {
1236         .ct_item_ops            = &nvmet_subsys_item_ops,
1237         .ct_attrs               = nvmet_subsys_attrs,
1238         .ct_owner               = THIS_MODULE,
1239 };
1240
1241 static struct config_group *nvmet_subsys_make(struct config_group *group,
1242                 const char *name)
1243 {
1244         struct nvmet_subsys *subsys;
1245
1246         if (sysfs_streq(name, NVME_DISC_SUBSYS_NAME)) {
1247                 pr_err("can't create discovery subsystem through configfs\n");
1248                 return ERR_PTR(-EINVAL);
1249         }
1250
1251         subsys = nvmet_subsys_alloc(name, NVME_NQN_NVME);
1252         if (IS_ERR(subsys))
1253                 return ERR_CAST(subsys);
1254
1255         config_group_init_type_name(&subsys->group, name, &nvmet_subsys_type);
1256
1257         config_group_init_type_name(&subsys->namespaces_group,
1258                         "namespaces", &nvmet_namespaces_type);
1259         configfs_add_default_group(&subsys->namespaces_group, &subsys->group);
1260
1261         config_group_init_type_name(&subsys->allowed_hosts_group,
1262                         "allowed_hosts", &nvmet_allowed_hosts_type);
1263         configfs_add_default_group(&subsys->allowed_hosts_group,
1264                         &subsys->group);
1265
1266         nvmet_add_passthru_group(subsys);
1267
1268         return &subsys->group;
1269 }
1270
1271 static struct configfs_group_operations nvmet_subsystems_group_ops = {
1272         .make_group             = nvmet_subsys_make,
1273 };
1274
1275 static const struct config_item_type nvmet_subsystems_type = {
1276         .ct_group_ops           = &nvmet_subsystems_group_ops,
1277         .ct_owner               = THIS_MODULE,
1278 };
1279
1280 static ssize_t nvmet_referral_enable_show(struct config_item *item,
1281                 char *page)
1282 {
1283         return snprintf(page, PAGE_SIZE, "%d\n", to_nvmet_port(item)->enabled);
1284 }
1285
1286 static ssize_t nvmet_referral_enable_store(struct config_item *item,
1287                 const char *page, size_t count)
1288 {
1289         struct nvmet_port *parent = to_nvmet_port(item->ci_parent->ci_parent);
1290         struct nvmet_port *port = to_nvmet_port(item);
1291         bool enable;
1292
1293         if (strtobool(page, &enable))
1294                 goto inval;
1295
1296         if (enable)
1297                 nvmet_referral_enable(parent, port);
1298         else
1299                 nvmet_referral_disable(parent, port);
1300
1301         return count;
1302 inval:
1303         pr_err("Invalid value '%s' for enable\n", page);
1304         return -EINVAL;
1305 }
1306
1307 CONFIGFS_ATTR(nvmet_referral_, enable);
1308
1309 /*
1310  * Discovery Service subsystem definitions
1311  */
1312 static struct configfs_attribute *nvmet_referral_attrs[] = {
1313         &nvmet_attr_addr_adrfam,
1314         &nvmet_attr_addr_portid,
1315         &nvmet_attr_addr_treq,
1316         &nvmet_attr_addr_traddr,
1317         &nvmet_attr_addr_trsvcid,
1318         &nvmet_attr_addr_trtype,
1319         &nvmet_referral_attr_enable,
1320         NULL,
1321 };
1322
1323 static void nvmet_referral_notify(struct config_group *group,
1324                 struct config_item *item)
1325 {
1326         struct nvmet_port *parent = to_nvmet_port(item->ci_parent->ci_parent);
1327         struct nvmet_port *port = to_nvmet_port(item);
1328
1329         nvmet_referral_disable(parent, port);
1330 }
1331
1332 static void nvmet_referral_release(struct config_item *item)
1333 {
1334         struct nvmet_port *port = to_nvmet_port(item);
1335
1336         kfree(port);
1337 }
1338
1339 static struct configfs_item_operations nvmet_referral_item_ops = {
1340         .release        = nvmet_referral_release,
1341 };
1342
1343 static const struct config_item_type nvmet_referral_type = {
1344         .ct_owner       = THIS_MODULE,
1345         .ct_attrs       = nvmet_referral_attrs,
1346         .ct_item_ops    = &nvmet_referral_item_ops,
1347 };
1348
1349 static struct config_group *nvmet_referral_make(
1350                 struct config_group *group, const char *name)
1351 {
1352         struct nvmet_port *port;
1353
1354         port = kzalloc(sizeof(*port), GFP_KERNEL);
1355         if (!port)
1356                 return ERR_PTR(-ENOMEM);
1357
1358         INIT_LIST_HEAD(&port->entry);
1359         config_group_init_type_name(&port->group, name, &nvmet_referral_type);
1360
1361         return &port->group;
1362 }
1363
1364 static struct configfs_group_operations nvmet_referral_group_ops = {
1365         .make_group             = nvmet_referral_make,
1366         .disconnect_notify      = nvmet_referral_notify,
1367 };
1368
1369 static const struct config_item_type nvmet_referrals_type = {
1370         .ct_owner       = THIS_MODULE,
1371         .ct_group_ops   = &nvmet_referral_group_ops,
1372 };
1373
1374 static struct nvmet_type_name_map nvmet_ana_state[] = {
1375         { NVME_ANA_OPTIMIZED,           "optimized" },
1376         { NVME_ANA_NONOPTIMIZED,        "non-optimized" },
1377         { NVME_ANA_INACCESSIBLE,        "inaccessible" },
1378         { NVME_ANA_PERSISTENT_LOSS,     "persistent-loss" },
1379         { NVME_ANA_CHANGE,              "change" },
1380 };
1381
1382 static ssize_t nvmet_ana_group_ana_state_show(struct config_item *item,
1383                 char *page)
1384 {
1385         struct nvmet_ana_group *grp = to_ana_group(item);
1386         enum nvme_ana_state state = grp->port->ana_state[grp->grpid];
1387         int i;
1388
1389         for (i = 0; i < ARRAY_SIZE(nvmet_ana_state); i++) {
1390                 if (state == nvmet_ana_state[i].type)
1391                         return sprintf(page, "%s\n", nvmet_ana_state[i].name);
1392         }
1393
1394         return sprintf(page, "\n");
1395 }
1396
1397 static ssize_t nvmet_ana_group_ana_state_store(struct config_item *item,
1398                 const char *page, size_t count)
1399 {
1400         struct nvmet_ana_group *grp = to_ana_group(item);
1401         enum nvme_ana_state *ana_state = grp->port->ana_state;
1402         int i;
1403
1404         for (i = 0; i < ARRAY_SIZE(nvmet_ana_state); i++) {
1405                 if (sysfs_streq(page, nvmet_ana_state[i].name))
1406                         goto found;
1407         }
1408
1409         pr_err("Invalid value '%s' for ana_state\n", page);
1410         return -EINVAL;
1411
1412 found:
1413         down_write(&nvmet_ana_sem);
1414         ana_state[grp->grpid] = (enum nvme_ana_state) nvmet_ana_state[i].type;
1415         nvmet_ana_chgcnt++;
1416         up_write(&nvmet_ana_sem);
1417         nvmet_port_send_ana_event(grp->port);
1418         return count;
1419 }
1420
1421 CONFIGFS_ATTR(nvmet_ana_group_, ana_state);
1422
1423 static struct configfs_attribute *nvmet_ana_group_attrs[] = {
1424         &nvmet_ana_group_attr_ana_state,
1425         NULL,
1426 };
1427
1428 static void nvmet_ana_group_release(struct config_item *item)
1429 {
1430         struct nvmet_ana_group *grp = to_ana_group(item);
1431
1432         if (grp == &grp->port->ana_default_group)
1433                 return;
1434
1435         down_write(&nvmet_ana_sem);
1436         grp->port->ana_state[grp->grpid] = NVME_ANA_INACCESSIBLE;
1437         nvmet_ana_group_enabled[grp->grpid]--;
1438         up_write(&nvmet_ana_sem);
1439
1440         nvmet_port_send_ana_event(grp->port);
1441         kfree(grp);
1442 }
1443
1444 static struct configfs_item_operations nvmet_ana_group_item_ops = {
1445         .release                = nvmet_ana_group_release,
1446 };
1447
1448 static const struct config_item_type nvmet_ana_group_type = {
1449         .ct_item_ops            = &nvmet_ana_group_item_ops,
1450         .ct_attrs               = nvmet_ana_group_attrs,
1451         .ct_owner               = THIS_MODULE,
1452 };
1453
1454 static struct config_group *nvmet_ana_groups_make_group(
1455                 struct config_group *group, const char *name)
1456 {
1457         struct nvmet_port *port = ana_groups_to_port(&group->cg_item);
1458         struct nvmet_ana_group *grp;
1459         u32 grpid;
1460         int ret;
1461
1462         ret = kstrtou32(name, 0, &grpid);
1463         if (ret)
1464                 goto out;
1465
1466         ret = -EINVAL;
1467         if (grpid <= 1 || grpid > NVMET_MAX_ANAGRPS)
1468                 goto out;
1469
1470         ret = -ENOMEM;
1471         grp = kzalloc(sizeof(*grp), GFP_KERNEL);
1472         if (!grp)
1473                 goto out;
1474         grp->port = port;
1475         grp->grpid = grpid;
1476
1477         down_write(&nvmet_ana_sem);
1478         nvmet_ana_group_enabled[grpid]++;
1479         up_write(&nvmet_ana_sem);
1480
1481         nvmet_port_send_ana_event(grp->port);
1482
1483         config_group_init_type_name(&grp->group, name, &nvmet_ana_group_type);
1484         return &grp->group;
1485 out:
1486         return ERR_PTR(ret);
1487 }
1488
1489 static struct configfs_group_operations nvmet_ana_groups_group_ops = {
1490         .make_group             = nvmet_ana_groups_make_group,
1491 };
1492
1493 static const struct config_item_type nvmet_ana_groups_type = {
1494         .ct_group_ops           = &nvmet_ana_groups_group_ops,
1495         .ct_owner               = THIS_MODULE,
1496 };
1497
1498 /*
1499  * Ports definitions.
1500  */
1501 static void nvmet_port_release(struct config_item *item)
1502 {
1503         struct nvmet_port *port = to_nvmet_port(item);
1504
1505         list_del(&port->global_entry);
1506
1507         kfree(port->ana_state);
1508         kfree(port);
1509 }
1510
1511 static struct configfs_attribute *nvmet_port_attrs[] = {
1512         &nvmet_attr_addr_adrfam,
1513         &nvmet_attr_addr_treq,
1514         &nvmet_attr_addr_traddr,
1515         &nvmet_attr_addr_trsvcid,
1516         &nvmet_attr_addr_trtype,
1517         &nvmet_attr_param_inline_data_size,
1518 #ifdef CONFIG_BLK_DEV_INTEGRITY
1519         &nvmet_attr_param_pi_enable,
1520 #endif
1521         NULL,
1522 };
1523
1524 static struct configfs_item_operations nvmet_port_item_ops = {
1525         .release                = nvmet_port_release,
1526 };
1527
1528 static const struct config_item_type nvmet_port_type = {
1529         .ct_attrs               = nvmet_port_attrs,
1530         .ct_item_ops            = &nvmet_port_item_ops,
1531         .ct_owner               = THIS_MODULE,
1532 };
1533
1534 static struct config_group *nvmet_ports_make(struct config_group *group,
1535                 const char *name)
1536 {
1537         struct nvmet_port *port;
1538         u16 portid;
1539         u32 i;
1540
1541         if (kstrtou16(name, 0, &portid))
1542                 return ERR_PTR(-EINVAL);
1543
1544         port = kzalloc(sizeof(*port), GFP_KERNEL);
1545         if (!port)
1546                 return ERR_PTR(-ENOMEM);
1547
1548         port->ana_state = kcalloc(NVMET_MAX_ANAGRPS + 1,
1549                         sizeof(*port->ana_state), GFP_KERNEL);
1550         if (!port->ana_state) {
1551                 kfree(port);
1552                 return ERR_PTR(-ENOMEM);
1553         }
1554
1555         for (i = 1; i <= NVMET_MAX_ANAGRPS; i++) {
1556                 if (i == NVMET_DEFAULT_ANA_GRPID)
1557                         port->ana_state[1] = NVME_ANA_OPTIMIZED;
1558                 else
1559                         port->ana_state[i] = NVME_ANA_INACCESSIBLE;
1560         }
1561
1562         list_add(&port->global_entry, &nvmet_ports_list);
1563
1564         INIT_LIST_HEAD(&port->entry);
1565         INIT_LIST_HEAD(&port->subsystems);
1566         INIT_LIST_HEAD(&port->referrals);
1567         port->inline_data_size = -1;    /* < 0 == let the transport choose */
1568
1569         port->disc_addr.portid = cpu_to_le16(portid);
1570         port->disc_addr.adrfam = NVMF_ADDR_FAMILY_MAX;
1571         port->disc_addr.treq = NVMF_TREQ_DISABLE_SQFLOW;
1572         config_group_init_type_name(&port->group, name, &nvmet_port_type);
1573
1574         config_group_init_type_name(&port->subsys_group,
1575                         "subsystems", &nvmet_port_subsys_type);
1576         configfs_add_default_group(&port->subsys_group, &port->group);
1577
1578         config_group_init_type_name(&port->referrals_group,
1579                         "referrals", &nvmet_referrals_type);
1580         configfs_add_default_group(&port->referrals_group, &port->group);
1581
1582         config_group_init_type_name(&port->ana_groups_group,
1583                         "ana_groups", &nvmet_ana_groups_type);
1584         configfs_add_default_group(&port->ana_groups_group, &port->group);
1585
1586         port->ana_default_group.port = port;
1587         port->ana_default_group.grpid = NVMET_DEFAULT_ANA_GRPID;
1588         config_group_init_type_name(&port->ana_default_group.group,
1589                         __stringify(NVMET_DEFAULT_ANA_GRPID),
1590                         &nvmet_ana_group_type);
1591         configfs_add_default_group(&port->ana_default_group.group,
1592                         &port->ana_groups_group);
1593
1594         return &port->group;
1595 }
1596
1597 static struct configfs_group_operations nvmet_ports_group_ops = {
1598         .make_group             = nvmet_ports_make,
1599 };
1600
1601 static const struct config_item_type nvmet_ports_type = {
1602         .ct_group_ops           = &nvmet_ports_group_ops,
1603         .ct_owner               = THIS_MODULE,
1604 };
1605
1606 static struct config_group nvmet_subsystems_group;
1607 static struct config_group nvmet_ports_group;
1608
1609 static void nvmet_host_release(struct config_item *item)
1610 {
1611         struct nvmet_host *host = to_host(item);
1612
1613         kfree(host);
1614 }
1615
1616 static struct configfs_item_operations nvmet_host_item_ops = {
1617         .release                = nvmet_host_release,
1618 };
1619
1620 static const struct config_item_type nvmet_host_type = {
1621         .ct_item_ops            = &nvmet_host_item_ops,
1622         .ct_owner               = THIS_MODULE,
1623 };
1624
1625 static struct config_group *nvmet_hosts_make_group(struct config_group *group,
1626                 const char *name)
1627 {
1628         struct nvmet_host *host;
1629
1630         host = kzalloc(sizeof(*host), GFP_KERNEL);
1631         if (!host)
1632                 return ERR_PTR(-ENOMEM);
1633
1634         config_group_init_type_name(&host->group, name, &nvmet_host_type);
1635
1636         return &host->group;
1637 }
1638
1639 static struct configfs_group_operations nvmet_hosts_group_ops = {
1640         .make_group             = nvmet_hosts_make_group,
1641 };
1642
1643 static const struct config_item_type nvmet_hosts_type = {
1644         .ct_group_ops           = &nvmet_hosts_group_ops,
1645         .ct_owner               = THIS_MODULE,
1646 };
1647
1648 static struct config_group nvmet_hosts_group;
1649
1650 static const struct config_item_type nvmet_root_type = {
1651         .ct_owner               = THIS_MODULE,
1652 };
1653
1654 static struct configfs_subsystem nvmet_configfs_subsystem = {
1655         .su_group = {
1656                 .cg_item = {
1657                         .ci_namebuf     = "nvmet",
1658                         .ci_type        = &nvmet_root_type,
1659                 },
1660         },
1661 };
1662
1663 int __init nvmet_init_configfs(void)
1664 {
1665         int ret;
1666
1667         config_group_init(&nvmet_configfs_subsystem.su_group);
1668         mutex_init(&nvmet_configfs_subsystem.su_mutex);
1669
1670         config_group_init_type_name(&nvmet_subsystems_group,
1671                         "subsystems", &nvmet_subsystems_type);
1672         configfs_add_default_group(&nvmet_subsystems_group,
1673                         &nvmet_configfs_subsystem.su_group);
1674
1675         config_group_init_type_name(&nvmet_ports_group,
1676                         "ports", &nvmet_ports_type);
1677         configfs_add_default_group(&nvmet_ports_group,
1678                         &nvmet_configfs_subsystem.su_group);
1679
1680         config_group_init_type_name(&nvmet_hosts_group,
1681                         "hosts", &nvmet_hosts_type);
1682         configfs_add_default_group(&nvmet_hosts_group,
1683                         &nvmet_configfs_subsystem.su_group);
1684
1685         ret = configfs_register_subsystem(&nvmet_configfs_subsystem);
1686         if (ret) {
1687                 pr_err("configfs_register_subsystem: %d\n", ret);
1688                 return ret;
1689         }
1690
1691         return 0;
1692 }
1693
1694 void __exit nvmet_exit_configfs(void)
1695 {
1696         configfs_unregister_subsystem(&nvmet_configfs_subsystem);
1697 }