1 // SPDX-License-Identifier: GPL-2.0-only
2 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
4 #include <linux/platform_device.h>
5 #include <linux/device.h>
6 #include <linux/module.h>
7 #include <linux/genalloc.h>
8 #include <linux/vmalloc.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/list_sort.h>
11 #include <linux/libnvdimm.h>
12 #include <linux/ndctl.h>
14 #include <linux/printk.h>
15 #include <linux/seq_buf.h>
17 #include "../watermark.h"
18 #include "nfit_test.h"
26 NDTEST_MAX_MAPPING = 6,
29 #define NDTEST_SCM_DIMM_CMD_MASK \
30 ((1ul << ND_CMD_GET_CONFIG_SIZE) | \
31 (1ul << ND_CMD_GET_CONFIG_DATA) | \
32 (1ul << ND_CMD_SET_CONFIG_DATA) | \
35 #define NFIT_DIMM_HANDLE(node, socket, imc, chan, dimm) \
36 (((node & 0xfff) << 16) | ((socket & 0xf) << 12) \
37 | ((imc & 0xf) << 8) | ((chan & 0xf) << 4) | (dimm & 0xf))
39 static DEFINE_SPINLOCK(ndtest_lock);
40 static struct ndtest_priv *instances[NUM_INSTANCES];
41 static struct class *ndtest_dimm_class;
42 static struct gen_pool *ndtest_pool;
44 static struct ndtest_dimm dimm_group1[] = {
47 .handle = NFIT_DIMM_HANDLE(0, 0, 0, 0, 0),
48 .uuid_str = "1e5c75d2-b618-11ea-9aa3-507b9ddc0f72",
54 .handle = NFIT_DIMM_HANDLE(0, 0, 0, 0, 1),
55 .uuid_str = "1c4d43ac-b618-11ea-be80-507b9ddc0f72",
61 .handle = NFIT_DIMM_HANDLE(0, 0, 1, 0, 0),
62 .uuid_str = "a9f17ffc-b618-11ea-b36d-507b9ddc0f72",
68 .handle = NFIT_DIMM_HANDLE(0, 0, 1, 0, 1),
69 .uuid_str = "b6b83b22-b618-11ea-8aae-507b9ddc0f72",
75 .handle = NFIT_DIMM_HANDLE(0, 1, 0, 0, 0),
76 .uuid_str = "bf9baaee-b618-11ea-b181-507b9ddc0f72",
82 static struct ndtest_dimm dimm_group2[] = {
85 .handle = NFIT_DIMM_HANDLE(1, 0, 0, 0, 0),
86 .uuid_str = "ca0817e2-b618-11ea-9db3-507b9ddc0f72",
92 static struct ndtest_mapping region0_mapping[] = {
107 static struct ndtest_mapping region1_mapping[] = {
134 static struct ndtest_mapping region2_mapping[] = {
143 static struct ndtest_mapping region3_mapping[] = {
151 static struct ndtest_mapping region4_mapping[] = {
159 static struct ndtest_mapping region5_mapping[] = {
167 static struct ndtest_region bus0_regions[] = {
169 .type = ND_DEVICE_NAMESPACE_PMEM,
170 .num_mappings = ARRAY_SIZE(region0_mapping),
171 .mapping = region0_mapping,
176 .type = ND_DEVICE_NAMESPACE_PMEM,
177 .num_mappings = ARRAY_SIZE(region1_mapping),
178 .mapping = region1_mapping,
179 .size = DIMM_SIZE * 2,
183 .type = ND_DEVICE_NAMESPACE_BLK,
184 .num_mappings = ARRAY_SIZE(region2_mapping),
185 .mapping = region2_mapping,
190 .type = ND_DEVICE_NAMESPACE_BLK,
191 .num_mappings = ARRAY_SIZE(region3_mapping),
192 .mapping = region3_mapping,
197 .type = ND_DEVICE_NAMESPACE_BLK,
198 .num_mappings = ARRAY_SIZE(region4_mapping),
199 .mapping = region4_mapping,
204 .type = ND_DEVICE_NAMESPACE_BLK,
205 .num_mappings = ARRAY_SIZE(region5_mapping),
206 .mapping = region5_mapping,
212 static struct ndtest_mapping region6_mapping[] = {
221 static struct ndtest_region bus1_regions[] = {
223 .type = ND_DEVICE_NAMESPACE_IO,
224 .num_mappings = ARRAY_SIZE(region6_mapping),
225 .mapping = region6_mapping,
231 static struct ndtest_config bus_configs[NUM_INSTANCES] = {
235 .dimm_count = ARRAY_SIZE(dimm_group1),
236 .dimms = dimm_group1,
237 .regions = bus0_regions,
238 .num_regions = ARRAY_SIZE(bus0_regions),
242 .dimm_start = ARRAY_SIZE(dimm_group1),
243 .dimm_count = ARRAY_SIZE(dimm_group2),
244 .dimms = dimm_group2,
245 .regions = bus1_regions,
246 .num_regions = ARRAY_SIZE(bus1_regions),
250 static inline struct ndtest_priv *to_ndtest_priv(struct device *dev)
252 struct platform_device *pdev = to_platform_device(dev);
254 return container_of(pdev, struct ndtest_priv, pdev);
257 static int ndtest_config_get(struct ndtest_dimm *p, unsigned int buf_len,
258 struct nd_cmd_get_config_data_hdr *hdr)
262 if ((hdr->in_offset + hdr->in_length) > LABEL_SIZE)
266 len = min(hdr->in_length, LABEL_SIZE - hdr->in_offset);
267 memcpy(hdr->out_buf, p->label_area + hdr->in_offset, len);
269 return buf_len - len;
272 static int ndtest_config_set(struct ndtest_dimm *p, unsigned int buf_len,
273 struct nd_cmd_set_config_hdr *hdr)
277 if ((hdr->in_offset + hdr->in_length) > LABEL_SIZE)
280 len = min(hdr->in_length, LABEL_SIZE - hdr->in_offset);
281 memcpy(p->label_area + hdr->in_offset, hdr->in_buf, len);
283 return buf_len - len;
286 static int ndtest_get_config_size(struct ndtest_dimm *dimm, unsigned int buf_len,
287 struct nd_cmd_get_config_size *size)
291 size->config_size = dimm->config_size;
296 static int ndtest_ctl(struct nvdimm_bus_descriptor *nd_desc,
297 struct nvdimm *nvdimm, unsigned int cmd, void *buf,
298 unsigned int buf_len, int *cmd_rc)
300 struct ndtest_dimm *dimm;
311 dimm = nvdimm_provider_data(nvdimm);
316 case ND_CMD_GET_CONFIG_SIZE:
317 *cmd_rc = ndtest_get_config_size(dimm, buf_len, buf);
319 case ND_CMD_GET_CONFIG_DATA:
320 *cmd_rc = ndtest_config_get(dimm, buf_len, buf);
322 case ND_CMD_SET_CONFIG_DATA:
323 *cmd_rc = ndtest_config_set(dimm, buf_len, buf);
329 /* Failures for a DIMM can be injected using fail_cmd and
330 * fail_cmd_code, see the device attributes below
332 if ((1 << cmd) & dimm->fail_cmd)
333 return dimm->fail_cmd_code ? dimm->fail_cmd_code : -EIO;
338 static int ndtest_blk_do_io(struct nd_blk_region *ndbr, resource_size_t dpa,
339 void *iobuf, u64 len, int rw)
341 struct ndtest_dimm *dimm = ndbr->blk_provider_data;
342 struct ndtest_blk_mmio *mmio = dimm->mmio;
343 struct nd_region *nd_region = &ndbr->nd_region;
349 lane = nd_region_acquire_lane(nd_region);
351 memcpy(mmio->base + dpa, iobuf, len);
353 memcpy(iobuf, mmio->base + dpa, len);
354 arch_invalidate_pmem(mmio->base + dpa, len);
357 nd_region_release_lane(nd_region, lane);
362 static int ndtest_blk_region_enable(struct nvdimm_bus *nvdimm_bus,
365 struct nd_blk_region *ndbr = to_nd_blk_region(dev);
366 struct nvdimm *nvdimm;
367 struct ndtest_dimm *dimm;
368 struct ndtest_blk_mmio *mmio;
370 nvdimm = nd_blk_region_to_dimm(ndbr);
371 dimm = nvdimm_provider_data(nvdimm);
373 nd_blk_region_set_provider_data(ndbr, dimm);
374 dimm->blk_region = to_nd_region(dev);
376 mmio = devm_kzalloc(dev, sizeof(struct ndtest_blk_mmio), GFP_KERNEL);
380 mmio->base = (void __iomem *) devm_nvdimm_memremap(
381 dev, dimm->address, 12, nd_blk_memremap_flags(ndbr));
383 dev_err(dev, "%s failed to map blk dimm\n", nvdimm_name(nvdimm));
386 mmio->size = dimm->size;
387 mmio->base_offset = 0;
394 static struct nfit_test_resource *ndtest_resource_lookup(resource_size_t addr)
398 for (i = 0; i < NUM_INSTANCES; i++) {
399 struct nfit_test_resource *n, *nfit_res = NULL;
400 struct ndtest_priv *t = instances[i];
404 spin_lock(&ndtest_lock);
405 list_for_each_entry(n, &t->resources, list) {
406 if (addr >= n->res.start && (addr < n->res.start
407 + resource_size(&n->res))) {
410 } else if (addr >= (unsigned long) n->buf
411 && (addr < (unsigned long) n->buf
412 + resource_size(&n->res))) {
417 spin_unlock(&ndtest_lock);
422 pr_warn("Failed to get resource\n");
427 static void ndtest_release_resource(void *data)
429 struct nfit_test_resource *res = data;
431 spin_lock(&ndtest_lock);
432 list_del(&res->list);
433 spin_unlock(&ndtest_lock);
435 if (resource_size(&res->res) >= DIMM_SIZE)
436 gen_pool_free(ndtest_pool, res->res.start,
437 resource_size(&res->res));
442 static void *ndtest_alloc_resource(struct ndtest_priv *p, size_t size,
447 struct nfit_test_resource *res;
448 struct genpool_data_align data = {
452 res = kzalloc(sizeof(*res), GFP_KERNEL);
457 if (size >= DIMM_SIZE)
458 __dma = gen_pool_alloc_algo(ndtest_pool, size,
459 gen_pool_first_fit_align, &data);
461 __dma = (unsigned long) buf;
466 INIT_LIST_HEAD(&res->list);
467 res->dev = &p->pdev.dev;
469 res->res.start = __dma;
470 res->res.end = __dma + size - 1;
471 res->res.name = "NFIT";
472 spin_lock_init(&res->lock);
473 INIT_LIST_HEAD(&res->requests);
474 spin_lock(&ndtest_lock);
475 list_add(&res->list, &p->resources);
476 spin_unlock(&ndtest_lock);
481 if (!devm_add_action(&p->pdev.dev, ndtest_release_resource, res))
485 if (__dma && size >= DIMM_SIZE)
486 gen_pool_free(ndtest_pool, __dma, size);
494 static ssize_t range_index_show(struct device *dev,
495 struct device_attribute *attr, char *buf)
497 struct nd_region *nd_region = to_nd_region(dev);
498 struct ndtest_region *region = nd_region_provider_data(nd_region);
500 return sprintf(buf, "%d\n", region->range_index);
502 static DEVICE_ATTR_RO(range_index);
504 static struct attribute *ndtest_region_attributes[] = {
505 &dev_attr_range_index.attr,
509 static const struct attribute_group ndtest_region_attribute_group = {
511 .attrs = ndtest_region_attributes,
514 static const struct attribute_group *ndtest_region_attribute_groups[] = {
515 &ndtest_region_attribute_group,
519 static int ndtest_create_region(struct ndtest_priv *p,
520 struct ndtest_region *region)
522 struct nd_mapping_desc mappings[NDTEST_MAX_MAPPING];
523 struct nd_blk_region_desc ndbr_desc;
524 struct nd_interleave_set *nd_set;
525 struct nd_region_desc *ndr_desc;
527 int i, ndimm = region->mapping[0].dimm;
530 memset(&res, 0, sizeof(res));
531 memset(&mappings, 0, sizeof(mappings));
532 memset(&ndbr_desc, 0, sizeof(ndbr_desc));
533 ndr_desc = &ndbr_desc.ndr_desc;
535 if (!ndtest_alloc_resource(p, region->size, &res.start))
538 res.end = res.start + region->size - 1;
539 ndr_desc->mapping = mappings;
540 ndr_desc->res = &res;
541 ndr_desc->provider_data = region;
542 ndr_desc->attr_groups = ndtest_region_attribute_groups;
544 if (uuid_parse(p->config->dimms[ndimm].uuid_str, (uuid_t *)uuid)) {
545 pr_err("failed to parse UUID\n");
549 nd_set = devm_kzalloc(&p->pdev.dev, sizeof(*nd_set), GFP_KERNEL);
553 nd_set->cookie1 = cpu_to_le64(uuid[0]);
554 nd_set->cookie2 = cpu_to_le64(uuid[1]);
555 nd_set->altcookie = nd_set->cookie1;
556 ndr_desc->nd_set = nd_set;
558 if (region->type == ND_DEVICE_NAMESPACE_BLK) {
559 mappings[0].start = 0;
560 mappings[0].size = DIMM_SIZE;
561 mappings[0].nvdimm = p->config->dimms[ndimm].nvdimm;
563 ndr_desc->mapping = &mappings[0];
564 ndr_desc->num_mappings = 1;
565 ndr_desc->num_lanes = 1;
566 ndbr_desc.enable = ndtest_blk_region_enable;
567 ndbr_desc.do_io = ndtest_blk_do_io;
568 region->region = nvdimm_blk_region_create(p->bus, ndr_desc);
573 for (i = 0; i < region->num_mappings; i++) {
574 ndimm = region->mapping[i].dimm;
575 mappings[i].start = region->mapping[i].start;
576 mappings[i].size = region->mapping[i].size;
577 mappings[i].position = region->mapping[i].position;
578 mappings[i].nvdimm = p->config->dimms[ndimm].nvdimm;
581 ndr_desc->num_mappings = region->num_mappings;
582 region->region = nvdimm_pmem_region_create(p->bus, ndr_desc);
585 if (!region->region) {
586 dev_err(&p->pdev.dev, "Error registering region %pR\n",
594 static int ndtest_init_regions(struct ndtest_priv *p)
598 for (i = 0; i < p->config->num_regions; i++) {
599 ret = ndtest_create_region(p, &p->config->regions[i]);
607 static void put_dimms(void *data)
609 struct ndtest_priv *p = data;
612 for (i = 0; i < p->config->dimm_count; i++)
613 if (p->config->dimms[i].dev) {
614 device_unregister(p->config->dimms[i].dev);
615 p->config->dimms[i].dev = NULL;
619 static ssize_t handle_show(struct device *dev, struct device_attribute *attr,
622 struct ndtest_dimm *dimm = dev_get_drvdata(dev);
624 return sprintf(buf, "%#x\n", dimm->handle);
626 static DEVICE_ATTR_RO(handle);
628 static ssize_t fail_cmd_show(struct device *dev, struct device_attribute *attr,
631 struct ndtest_dimm *dimm = dev_get_drvdata(dev);
633 return sprintf(buf, "%#x\n", dimm->fail_cmd);
636 static ssize_t fail_cmd_store(struct device *dev, struct device_attribute *attr,
637 const char *buf, size_t size)
639 struct ndtest_dimm *dimm = dev_get_drvdata(dev);
643 rc = kstrtol(buf, 0, &val);
647 dimm->fail_cmd = val;
651 static DEVICE_ATTR_RW(fail_cmd);
653 static ssize_t fail_cmd_code_show(struct device *dev, struct device_attribute *attr,
656 struct ndtest_dimm *dimm = dev_get_drvdata(dev);
658 return sprintf(buf, "%d\n", dimm->fail_cmd_code);
661 static ssize_t fail_cmd_code_store(struct device *dev, struct device_attribute *attr,
662 const char *buf, size_t size)
664 struct ndtest_dimm *dimm = dev_get_drvdata(dev);
668 rc = kstrtol(buf, 0, &val);
672 dimm->fail_cmd_code = val;
675 static DEVICE_ATTR_RW(fail_cmd_code);
677 static struct attribute *dimm_attributes[] = {
678 &dev_attr_handle.attr,
679 &dev_attr_fail_cmd.attr,
680 &dev_attr_fail_cmd_code.attr,
684 static struct attribute_group dimm_attribute_group = {
685 .attrs = dimm_attributes,
688 static const struct attribute_group *dimm_attribute_groups[] = {
689 &dimm_attribute_group,
693 static ssize_t phys_id_show(struct device *dev,
694 struct device_attribute *attr, char *buf)
696 struct nvdimm *nvdimm = to_nvdimm(dev);
697 struct ndtest_dimm *dimm = nvdimm_provider_data(nvdimm);
699 return sprintf(buf, "%#x\n", dimm->physical_id);
701 static DEVICE_ATTR_RO(phys_id);
703 static ssize_t vendor_show(struct device *dev,
704 struct device_attribute *attr, char *buf)
706 return sprintf(buf, "0x1234567\n");
708 static DEVICE_ATTR_RO(vendor);
710 static ssize_t id_show(struct device *dev,
711 struct device_attribute *attr, char *buf)
713 struct nvdimm *nvdimm = to_nvdimm(dev);
714 struct ndtest_dimm *dimm = nvdimm_provider_data(nvdimm);
716 return sprintf(buf, "%04x-%02x-%04x-%08x", 0xabcd,
717 0xa, 2016, ~(dimm->handle));
719 static DEVICE_ATTR_RO(id);
721 static ssize_t nvdimm_handle_show(struct device *dev,
722 struct device_attribute *attr, char *buf)
724 struct nvdimm *nvdimm = to_nvdimm(dev);
725 struct ndtest_dimm *dimm = nvdimm_provider_data(nvdimm);
727 return sprintf(buf, "%#x\n", dimm->handle);
730 static struct device_attribute dev_attr_nvdimm_show_handle = {
731 .attr = { .name = "handle", .mode = 0444 },
732 .show = nvdimm_handle_show,
735 static ssize_t subsystem_vendor_show(struct device *dev,
736 struct device_attribute *attr, char *buf)
738 return sprintf(buf, "0x%04x\n", 0);
740 static DEVICE_ATTR_RO(subsystem_vendor);
742 static ssize_t dirty_shutdown_show(struct device *dev,
743 struct device_attribute *attr, char *buf)
745 return sprintf(buf, "%d\n", 42);
747 static DEVICE_ATTR_RO(dirty_shutdown);
749 static ssize_t formats_show(struct device *dev,
750 struct device_attribute *attr, char *buf)
752 struct nvdimm *nvdimm = to_nvdimm(dev);
753 struct ndtest_dimm *dimm = nvdimm_provider_data(nvdimm);
755 return sprintf(buf, "%d\n", dimm->num_formats);
757 static DEVICE_ATTR_RO(formats);
759 static ssize_t format_show(struct device *dev,
760 struct device_attribute *attr, char *buf)
762 struct nvdimm *nvdimm = to_nvdimm(dev);
763 struct ndtest_dimm *dimm = nvdimm_provider_data(nvdimm);
765 if (dimm->num_formats > 1)
766 return sprintf(buf, "0x201\n");
768 return sprintf(buf, "0x101\n");
770 static DEVICE_ATTR_RO(format);
772 static ssize_t format1_show(struct device *dev, struct device_attribute *attr,
775 return sprintf(buf, "0x301\n");
777 static DEVICE_ATTR_RO(format1);
779 static umode_t ndtest_nvdimm_attr_visible(struct kobject *kobj,
780 struct attribute *a, int n)
782 struct device *dev = container_of(kobj, struct device, kobj);
783 struct nvdimm *nvdimm = to_nvdimm(dev);
784 struct ndtest_dimm *dimm = nvdimm_provider_data(nvdimm);
786 if (a == &dev_attr_format1.attr && dimm->num_formats <= 1)
792 static struct attribute *ndtest_nvdimm_attributes[] = {
793 &dev_attr_nvdimm_show_handle.attr,
794 &dev_attr_vendor.attr,
796 &dev_attr_phys_id.attr,
797 &dev_attr_subsystem_vendor.attr,
798 &dev_attr_dirty_shutdown.attr,
799 &dev_attr_formats.attr,
800 &dev_attr_format.attr,
801 &dev_attr_format1.attr,
805 static const struct attribute_group ndtest_nvdimm_attribute_group = {
807 .attrs = ndtest_nvdimm_attributes,
808 .is_visible = ndtest_nvdimm_attr_visible,
811 static const struct attribute_group *ndtest_nvdimm_attribute_groups[] = {
812 &ndtest_nvdimm_attribute_group,
816 static int ndtest_dimm_register(struct ndtest_priv *priv,
817 struct ndtest_dimm *dimm, int id)
819 struct device *dev = &priv->pdev.dev;
820 unsigned long dimm_flags = dimm->flags;
822 if (dimm->num_formats > 1) {
823 set_bit(NDD_ALIASING, &dimm_flags);
824 set_bit(NDD_LABELING, &dimm_flags);
827 dimm->nvdimm = nvdimm_create(priv->bus, dimm,
828 ndtest_nvdimm_attribute_groups, dimm_flags,
829 NDTEST_SCM_DIMM_CMD_MASK, 0, NULL);
831 dev_err(dev, "Error creating DIMM object for %pOF\n", priv->dn);
835 dimm->dev = device_create_with_groups(ndtest_dimm_class,
837 0, dimm, dimm_attribute_groups,
840 pr_err("Could not create dimm device attributes\n");
847 static int ndtest_nvdimm_init(struct ndtest_priv *p)
849 struct ndtest_dimm *d;
853 for (i = 0; i < p->config->dimm_count; i++) {
854 d = &p->config->dimms[i];
855 d->id = id = p->config->dimm_start + i;
856 res = ndtest_alloc_resource(p, LABEL_SIZE, NULL);
861 sprintf(d->label_area, "label%d", id);
862 d->config_size = LABEL_SIZE;
864 if (!ndtest_alloc_resource(p, d->size,
868 if (!ndtest_alloc_resource(p, LABEL_SIZE,
872 if (!ndtest_alloc_resource(p, LABEL_SIZE,
876 d->address = p->dimm_dma[id];
878 ndtest_dimm_register(p, d, id);
884 static ssize_t compatible_show(struct device *dev,
885 struct device_attribute *attr, char *buf)
887 return sprintf(buf, "nvdimm_test");
889 static DEVICE_ATTR_RO(compatible);
891 static struct attribute *of_node_attributes[] = {
892 &dev_attr_compatible.attr,
896 static const struct attribute_group of_node_attribute_group = {
898 .attrs = of_node_attributes,
901 static const struct attribute_group *ndtest_attribute_groups[] = {
902 &of_node_attribute_group,
906 static int ndtest_bus_register(struct ndtest_priv *p)
908 p->config = &bus_configs[p->pdev.id];
910 p->bus_desc.ndctl = ndtest_ctl;
911 p->bus_desc.module = THIS_MODULE;
912 p->bus_desc.provider_name = NULL;
913 p->bus_desc.attr_groups = ndtest_attribute_groups;
915 p->bus = nvdimm_bus_register(&p->pdev.dev, &p->bus_desc);
917 dev_err(&p->pdev.dev, "Error creating nvdimm bus %pOF\n", p->dn);
924 static int ndtest_remove(struct platform_device *pdev)
926 struct ndtest_priv *p = to_ndtest_priv(&pdev->dev);
928 nvdimm_bus_unregister(p->bus);
932 static int ndtest_probe(struct platform_device *pdev)
934 struct ndtest_priv *p;
937 p = to_ndtest_priv(&pdev->dev);
938 if (ndtest_bus_register(p))
941 p->dcr_dma = devm_kcalloc(&p->pdev.dev, NUM_DCR,
942 sizeof(dma_addr_t), GFP_KERNEL);
943 p->label_dma = devm_kcalloc(&p->pdev.dev, NUM_DCR,
944 sizeof(dma_addr_t), GFP_KERNEL);
945 p->dimm_dma = devm_kcalloc(&p->pdev.dev, NUM_DCR,
946 sizeof(dma_addr_t), GFP_KERNEL);
948 rc = ndtest_nvdimm_init(p);
952 rc = ndtest_init_regions(p);
956 rc = devm_add_action_or_reset(&pdev->dev, put_dimms, p);
960 platform_set_drvdata(pdev, p);
965 pr_err("%s:%d Failed nvdimm init\n", __func__, __LINE__);
969 static const struct platform_device_id ndtest_id[] = {
974 static struct platform_driver ndtest_driver = {
975 .probe = ndtest_probe,
976 .remove = ndtest_remove,
978 .name = KBUILD_MODNAME,
980 .id_table = ndtest_id,
983 static void ndtest_release(struct device *dev)
985 struct ndtest_priv *p = to_ndtest_priv(dev);
990 static void cleanup_devices(void)
994 for (i = 0; i < NUM_INSTANCES; i++)
996 platform_device_unregister(&instances[i]->pdev);
998 nfit_test_teardown();
1001 gen_pool_destroy(ndtest_pool);
1004 if (ndtest_dimm_class)
1005 class_destroy(ndtest_dimm_class);
1008 static __init int ndtest_init(void)
1016 dax_pmem_core_test();
1017 #ifdef CONFIG_DEV_DAX_PMEM_COMPAT
1018 dax_pmem_compat_test();
1021 nfit_test_setup(ndtest_resource_lookup, NULL);
1023 ndtest_dimm_class = class_create(THIS_MODULE, "nfit_test_dimm");
1024 if (IS_ERR(ndtest_dimm_class)) {
1025 rc = PTR_ERR(ndtest_dimm_class);
1029 ndtest_pool = gen_pool_create(ilog2(SZ_4M), NUMA_NO_NODE);
1035 if (gen_pool_add(ndtest_pool, SZ_4G, SZ_4G, NUMA_NO_NODE)) {
1040 /* Each instance can be taken as a bus, which can have multiple dimms */
1041 for (i = 0; i < NUM_INSTANCES; i++) {
1042 struct ndtest_priv *priv;
1043 struct platform_device *pdev;
1045 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
1051 INIT_LIST_HEAD(&priv->resources);
1053 pdev->name = KBUILD_MODNAME;
1055 pdev->dev.release = ndtest_release;
1056 rc = platform_device_register(pdev);
1058 put_device(&pdev->dev);
1061 get_device(&pdev->dev);
1063 instances[i] = priv;
1066 rc = platform_driver_register(&ndtest_driver);
1073 pr_err("Error registering platform device\n");
1079 static __exit void ndtest_exit(void)
1082 platform_driver_unregister(&ndtest_driver);
1085 module_init(ndtest_init);
1086 module_exit(ndtest_exit);
1087 MODULE_LICENSE("GPL");
1088 MODULE_AUTHOR("IBM Corporation");