1 // SPDX-License-Identifier: GPL-2.0-only
2 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
4 #include <linux/platform_device.h>
5 #include <linux/device.h>
6 #include <linux/module.h>
7 #include <linux/genalloc.h>
8 #include <linux/vmalloc.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/list_sort.h>
11 #include <linux/libnvdimm.h>
12 #include <linux/ndctl.h>
14 #include <linux/printk.h>
15 #include <linux/seq_buf.h>
17 #include "../watermark.h"
18 #include "nfit_test.h"
26 NDTEST_MAX_MAPPING = 6,
29 #define NDTEST_SCM_DIMM_CMD_MASK \
30 ((1ul << ND_CMD_GET_CONFIG_SIZE) | \
31 (1ul << ND_CMD_GET_CONFIG_DATA) | \
32 (1ul << ND_CMD_SET_CONFIG_DATA) | \
35 #define NFIT_DIMM_HANDLE(node, socket, imc, chan, dimm) \
36 (((node & 0xfff) << 16) | ((socket & 0xf) << 12) \
37 | ((imc & 0xf) << 8) | ((chan & 0xf) << 4) | (dimm & 0xf))
39 static DEFINE_SPINLOCK(ndtest_lock);
40 static struct ndtest_priv *instances[NUM_INSTANCES];
41 static struct class *ndtest_dimm_class;
42 static struct gen_pool *ndtest_pool;
44 static struct ndtest_dimm dimm_group1[] = {
47 .handle = NFIT_DIMM_HANDLE(0, 0, 0, 0, 0),
48 .uuid_str = "1e5c75d2-b618-11ea-9aa3-507b9ddc0f72",
54 .handle = NFIT_DIMM_HANDLE(0, 0, 0, 0, 1),
55 .uuid_str = "1c4d43ac-b618-11ea-be80-507b9ddc0f72",
61 .handle = NFIT_DIMM_HANDLE(0, 0, 1, 0, 0),
62 .uuid_str = "a9f17ffc-b618-11ea-b36d-507b9ddc0f72",
68 .handle = NFIT_DIMM_HANDLE(0, 0, 1, 0, 1),
69 .uuid_str = "b6b83b22-b618-11ea-8aae-507b9ddc0f72",
75 .handle = NFIT_DIMM_HANDLE(0, 1, 0, 0, 0),
76 .uuid_str = "bf9baaee-b618-11ea-b181-507b9ddc0f72",
82 static struct ndtest_dimm dimm_group2[] = {
85 .handle = NFIT_DIMM_HANDLE(1, 0, 0, 0, 0),
86 .uuid_str = "ca0817e2-b618-11ea-9db3-507b9ddc0f72",
89 .flags = PAPR_PMEM_UNARMED | PAPR_PMEM_EMPTY |
90 PAPR_PMEM_SAVE_FAILED | PAPR_PMEM_SHUTDOWN_DIRTY |
91 PAPR_PMEM_HEALTH_FATAL,
95 static struct ndtest_mapping region0_mapping[] = {
110 static struct ndtest_mapping region1_mapping[] = {
137 static struct ndtest_mapping region2_mapping[] = {
146 static struct ndtest_mapping region3_mapping[] = {
154 static struct ndtest_mapping region4_mapping[] = {
162 static struct ndtest_mapping region5_mapping[] = {
170 static struct ndtest_region bus0_regions[] = {
172 .type = ND_DEVICE_NAMESPACE_PMEM,
173 .num_mappings = ARRAY_SIZE(region0_mapping),
174 .mapping = region0_mapping,
179 .type = ND_DEVICE_NAMESPACE_PMEM,
180 .num_mappings = ARRAY_SIZE(region1_mapping),
181 .mapping = region1_mapping,
182 .size = DIMM_SIZE * 2,
186 .type = ND_DEVICE_NAMESPACE_BLK,
187 .num_mappings = ARRAY_SIZE(region2_mapping),
188 .mapping = region2_mapping,
193 .type = ND_DEVICE_NAMESPACE_BLK,
194 .num_mappings = ARRAY_SIZE(region3_mapping),
195 .mapping = region3_mapping,
200 .type = ND_DEVICE_NAMESPACE_BLK,
201 .num_mappings = ARRAY_SIZE(region4_mapping),
202 .mapping = region4_mapping,
207 .type = ND_DEVICE_NAMESPACE_BLK,
208 .num_mappings = ARRAY_SIZE(region5_mapping),
209 .mapping = region5_mapping,
215 static struct ndtest_mapping region6_mapping[] = {
224 static struct ndtest_region bus1_regions[] = {
226 .type = ND_DEVICE_NAMESPACE_IO,
227 .num_mappings = ARRAY_SIZE(region6_mapping),
228 .mapping = region6_mapping,
234 static struct ndtest_config bus_configs[NUM_INSTANCES] = {
238 .dimm_count = ARRAY_SIZE(dimm_group1),
239 .dimms = dimm_group1,
240 .regions = bus0_regions,
241 .num_regions = ARRAY_SIZE(bus0_regions),
245 .dimm_start = ARRAY_SIZE(dimm_group1),
246 .dimm_count = ARRAY_SIZE(dimm_group2),
247 .dimms = dimm_group2,
248 .regions = bus1_regions,
249 .num_regions = ARRAY_SIZE(bus1_regions),
253 static inline struct ndtest_priv *to_ndtest_priv(struct device *dev)
255 struct platform_device *pdev = to_platform_device(dev);
257 return container_of(pdev, struct ndtest_priv, pdev);
260 static int ndtest_config_get(struct ndtest_dimm *p, unsigned int buf_len,
261 struct nd_cmd_get_config_data_hdr *hdr)
265 if ((hdr->in_offset + hdr->in_length) > LABEL_SIZE)
269 len = min(hdr->in_length, LABEL_SIZE - hdr->in_offset);
270 memcpy(hdr->out_buf, p->label_area + hdr->in_offset, len);
272 return buf_len - len;
275 static int ndtest_config_set(struct ndtest_dimm *p, unsigned int buf_len,
276 struct nd_cmd_set_config_hdr *hdr)
280 if ((hdr->in_offset + hdr->in_length) > LABEL_SIZE)
283 len = min(hdr->in_length, LABEL_SIZE - hdr->in_offset);
284 memcpy(p->label_area + hdr->in_offset, hdr->in_buf, len);
286 return buf_len - len;
289 static int ndtest_get_config_size(struct ndtest_dimm *dimm, unsigned int buf_len,
290 struct nd_cmd_get_config_size *size)
294 size->config_size = dimm->config_size;
299 static int ndtest_ctl(struct nvdimm_bus_descriptor *nd_desc,
300 struct nvdimm *nvdimm, unsigned int cmd, void *buf,
301 unsigned int buf_len, int *cmd_rc)
303 struct ndtest_dimm *dimm;
314 dimm = nvdimm_provider_data(nvdimm);
319 case ND_CMD_GET_CONFIG_SIZE:
320 *cmd_rc = ndtest_get_config_size(dimm, buf_len, buf);
322 case ND_CMD_GET_CONFIG_DATA:
323 *cmd_rc = ndtest_config_get(dimm, buf_len, buf);
325 case ND_CMD_SET_CONFIG_DATA:
326 *cmd_rc = ndtest_config_set(dimm, buf_len, buf);
332 /* Failures for a DIMM can be injected using fail_cmd and
333 * fail_cmd_code, see the device attributes below
335 if ((1 << cmd) & dimm->fail_cmd)
336 return dimm->fail_cmd_code ? dimm->fail_cmd_code : -EIO;
341 static struct nfit_test_resource *ndtest_resource_lookup(resource_size_t addr)
345 for (i = 0; i < NUM_INSTANCES; i++) {
346 struct nfit_test_resource *n, *nfit_res = NULL;
347 struct ndtest_priv *t = instances[i];
351 spin_lock(&ndtest_lock);
352 list_for_each_entry(n, &t->resources, list) {
353 if (addr >= n->res.start && (addr < n->res.start
354 + resource_size(&n->res))) {
357 } else if (addr >= (unsigned long) n->buf
358 && (addr < (unsigned long) n->buf
359 + resource_size(&n->res))) {
364 spin_unlock(&ndtest_lock);
369 pr_warn("Failed to get resource\n");
374 static void ndtest_release_resource(void *data)
376 struct nfit_test_resource *res = data;
378 spin_lock(&ndtest_lock);
379 list_del(&res->list);
380 spin_unlock(&ndtest_lock);
382 if (resource_size(&res->res) >= DIMM_SIZE)
383 gen_pool_free(ndtest_pool, res->res.start,
384 resource_size(&res->res));
389 static void *ndtest_alloc_resource(struct ndtest_priv *p, size_t size,
394 struct nfit_test_resource *res;
395 struct genpool_data_align data = {
399 res = kzalloc(sizeof(*res), GFP_KERNEL);
404 if (size >= DIMM_SIZE)
405 __dma = gen_pool_alloc_algo(ndtest_pool, size,
406 gen_pool_first_fit_align, &data);
408 __dma = (unsigned long) buf;
413 INIT_LIST_HEAD(&res->list);
414 res->dev = &p->pdev.dev;
416 res->res.start = __dma;
417 res->res.end = __dma + size - 1;
418 res->res.name = "NFIT";
419 spin_lock_init(&res->lock);
420 INIT_LIST_HEAD(&res->requests);
421 spin_lock(&ndtest_lock);
422 list_add(&res->list, &p->resources);
423 spin_unlock(&ndtest_lock);
428 if (!devm_add_action(&p->pdev.dev, ndtest_release_resource, res))
432 if (__dma && size >= DIMM_SIZE)
433 gen_pool_free(ndtest_pool, __dma, size);
441 static ssize_t range_index_show(struct device *dev,
442 struct device_attribute *attr, char *buf)
444 struct nd_region *nd_region = to_nd_region(dev);
445 struct ndtest_region *region = nd_region_provider_data(nd_region);
447 return sprintf(buf, "%d\n", region->range_index);
449 static DEVICE_ATTR_RO(range_index);
451 static struct attribute *ndtest_region_attributes[] = {
452 &dev_attr_range_index.attr,
456 static const struct attribute_group ndtest_region_attribute_group = {
458 .attrs = ndtest_region_attributes,
461 static const struct attribute_group *ndtest_region_attribute_groups[] = {
462 &ndtest_region_attribute_group,
466 static int ndtest_create_region(struct ndtest_priv *p,
467 struct ndtest_region *region)
469 struct nd_mapping_desc mappings[NDTEST_MAX_MAPPING];
470 struct nd_region_desc *ndr_desc, _ndr_desc;
471 struct nd_interleave_set *nd_set;
473 int i, ndimm = region->mapping[0].dimm;
476 memset(&res, 0, sizeof(res));
477 memset(&mappings, 0, sizeof(mappings));
478 memset(&_ndr_desc, 0, sizeof(_ndr_desc));
479 ndr_desc = &_ndr_desc;
481 if (!ndtest_alloc_resource(p, region->size, &res.start))
484 res.end = res.start + region->size - 1;
485 ndr_desc->mapping = mappings;
486 ndr_desc->res = &res;
487 ndr_desc->provider_data = region;
488 ndr_desc->attr_groups = ndtest_region_attribute_groups;
490 if (uuid_parse(p->config->dimms[ndimm].uuid_str, (uuid_t *)uuid)) {
491 pr_err("failed to parse UUID\n");
495 nd_set = devm_kzalloc(&p->pdev.dev, sizeof(*nd_set), GFP_KERNEL);
499 nd_set->cookie1 = cpu_to_le64(uuid[0]);
500 nd_set->cookie2 = cpu_to_le64(uuid[1]);
501 nd_set->altcookie = nd_set->cookie1;
502 ndr_desc->nd_set = nd_set;
504 if (region->type == ND_DEVICE_NAMESPACE_BLK) {
505 mappings[0].start = 0;
506 mappings[0].size = DIMM_SIZE;
507 mappings[0].nvdimm = p->config->dimms[ndimm].nvdimm;
509 ndr_desc->mapping = &mappings[0];
510 ndr_desc->num_mappings = 1;
511 ndr_desc->num_lanes = 1;
512 ndbr_desc.enable = ndtest_blk_region_enable;
513 ndbr_desc.do_io = ndtest_blk_do_io;
514 region->region = nvdimm_blk_region_create(p->bus, ndr_desc);
519 for (i = 0; i < region->num_mappings; i++) {
520 ndimm = region->mapping[i].dimm;
521 mappings[i].start = region->mapping[i].start;
522 mappings[i].size = region->mapping[i].size;
523 mappings[i].position = region->mapping[i].position;
524 mappings[i].nvdimm = p->config->dimms[ndimm].nvdimm;
527 ndr_desc->num_mappings = region->num_mappings;
528 region->region = nvdimm_pmem_region_create(p->bus, ndr_desc);
531 if (!region->region) {
532 dev_err(&p->pdev.dev, "Error registering region %pR\n",
540 static int ndtest_init_regions(struct ndtest_priv *p)
544 for (i = 0; i < p->config->num_regions; i++) {
545 ret = ndtest_create_region(p, &p->config->regions[i]);
553 static void put_dimms(void *data)
555 struct ndtest_priv *p = data;
558 for (i = 0; i < p->config->dimm_count; i++)
559 if (p->config->dimms[i].dev) {
560 device_unregister(p->config->dimms[i].dev);
561 p->config->dimms[i].dev = NULL;
565 static ssize_t handle_show(struct device *dev, struct device_attribute *attr,
568 struct ndtest_dimm *dimm = dev_get_drvdata(dev);
570 return sprintf(buf, "%#x\n", dimm->handle);
572 static DEVICE_ATTR_RO(handle);
574 static ssize_t fail_cmd_show(struct device *dev, struct device_attribute *attr,
577 struct ndtest_dimm *dimm = dev_get_drvdata(dev);
579 return sprintf(buf, "%#x\n", dimm->fail_cmd);
582 static ssize_t fail_cmd_store(struct device *dev, struct device_attribute *attr,
583 const char *buf, size_t size)
585 struct ndtest_dimm *dimm = dev_get_drvdata(dev);
589 rc = kstrtol(buf, 0, &val);
593 dimm->fail_cmd = val;
597 static DEVICE_ATTR_RW(fail_cmd);
599 static ssize_t fail_cmd_code_show(struct device *dev, struct device_attribute *attr,
602 struct ndtest_dimm *dimm = dev_get_drvdata(dev);
604 return sprintf(buf, "%d\n", dimm->fail_cmd_code);
607 static ssize_t fail_cmd_code_store(struct device *dev, struct device_attribute *attr,
608 const char *buf, size_t size)
610 struct ndtest_dimm *dimm = dev_get_drvdata(dev);
614 rc = kstrtol(buf, 0, &val);
618 dimm->fail_cmd_code = val;
621 static DEVICE_ATTR_RW(fail_cmd_code);
623 static struct attribute *dimm_attributes[] = {
624 &dev_attr_handle.attr,
625 &dev_attr_fail_cmd.attr,
626 &dev_attr_fail_cmd_code.attr,
630 static struct attribute_group dimm_attribute_group = {
631 .attrs = dimm_attributes,
634 static const struct attribute_group *dimm_attribute_groups[] = {
635 &dimm_attribute_group,
639 static ssize_t phys_id_show(struct device *dev,
640 struct device_attribute *attr, char *buf)
642 struct nvdimm *nvdimm = to_nvdimm(dev);
643 struct ndtest_dimm *dimm = nvdimm_provider_data(nvdimm);
645 return sprintf(buf, "%#x\n", dimm->physical_id);
647 static DEVICE_ATTR_RO(phys_id);
649 static ssize_t vendor_show(struct device *dev,
650 struct device_attribute *attr, char *buf)
652 return sprintf(buf, "0x1234567\n");
654 static DEVICE_ATTR_RO(vendor);
656 static ssize_t id_show(struct device *dev,
657 struct device_attribute *attr, char *buf)
659 struct nvdimm *nvdimm = to_nvdimm(dev);
660 struct ndtest_dimm *dimm = nvdimm_provider_data(nvdimm);
662 return sprintf(buf, "%04x-%02x-%04x-%08x", 0xabcd,
663 0xa, 2016, ~(dimm->handle));
665 static DEVICE_ATTR_RO(id);
667 static ssize_t nvdimm_handle_show(struct device *dev,
668 struct device_attribute *attr, char *buf)
670 struct nvdimm *nvdimm = to_nvdimm(dev);
671 struct ndtest_dimm *dimm = nvdimm_provider_data(nvdimm);
673 return sprintf(buf, "%#x\n", dimm->handle);
676 static struct device_attribute dev_attr_nvdimm_show_handle = {
677 .attr = { .name = "handle", .mode = 0444 },
678 .show = nvdimm_handle_show,
681 static ssize_t subsystem_vendor_show(struct device *dev,
682 struct device_attribute *attr, char *buf)
684 return sprintf(buf, "0x%04x\n", 0);
686 static DEVICE_ATTR_RO(subsystem_vendor);
688 static ssize_t dirty_shutdown_show(struct device *dev,
689 struct device_attribute *attr, char *buf)
691 return sprintf(buf, "%d\n", 42);
693 static DEVICE_ATTR_RO(dirty_shutdown);
695 static ssize_t formats_show(struct device *dev,
696 struct device_attribute *attr, char *buf)
698 struct nvdimm *nvdimm = to_nvdimm(dev);
699 struct ndtest_dimm *dimm = nvdimm_provider_data(nvdimm);
701 return sprintf(buf, "%d\n", dimm->num_formats);
703 static DEVICE_ATTR_RO(formats);
705 static ssize_t format_show(struct device *dev,
706 struct device_attribute *attr, char *buf)
708 struct nvdimm *nvdimm = to_nvdimm(dev);
709 struct ndtest_dimm *dimm = nvdimm_provider_data(nvdimm);
711 if (dimm->num_formats > 1)
712 return sprintf(buf, "0x201\n");
714 return sprintf(buf, "0x101\n");
716 static DEVICE_ATTR_RO(format);
718 static ssize_t format1_show(struct device *dev, struct device_attribute *attr,
721 return sprintf(buf, "0x301\n");
723 static DEVICE_ATTR_RO(format1);
725 static umode_t ndtest_nvdimm_attr_visible(struct kobject *kobj,
726 struct attribute *a, int n)
728 struct device *dev = container_of(kobj, struct device, kobj);
729 struct nvdimm *nvdimm = to_nvdimm(dev);
730 struct ndtest_dimm *dimm = nvdimm_provider_data(nvdimm);
732 if (a == &dev_attr_format1.attr && dimm->num_formats <= 1)
738 static ssize_t flags_show(struct device *dev,
739 struct device_attribute *attr, char *buf)
741 struct nvdimm *nvdimm = to_nvdimm(dev);
742 struct ndtest_dimm *dimm = nvdimm_provider_data(nvdimm);
748 seq_buf_init(&s, buf, PAGE_SIZE);
749 if (flags & PAPR_PMEM_UNARMED_MASK)
750 seq_buf_printf(&s, "not_armed ");
752 if (flags & PAPR_PMEM_BAD_SHUTDOWN_MASK)
753 seq_buf_printf(&s, "flush_fail ");
755 if (flags & PAPR_PMEM_BAD_RESTORE_MASK)
756 seq_buf_printf(&s, "restore_fail ");
758 if (flags & PAPR_PMEM_SAVE_MASK)
759 seq_buf_printf(&s, "save_fail ");
761 if (flags & PAPR_PMEM_SMART_EVENT_MASK)
762 seq_buf_printf(&s, "smart_notify ");
765 if (seq_buf_used(&s))
766 seq_buf_printf(&s, "\n");
768 return seq_buf_used(&s);
770 static DEVICE_ATTR_RO(flags);
772 static struct attribute *ndtest_nvdimm_attributes[] = {
773 &dev_attr_nvdimm_show_handle.attr,
774 &dev_attr_vendor.attr,
776 &dev_attr_phys_id.attr,
777 &dev_attr_subsystem_vendor.attr,
778 &dev_attr_dirty_shutdown.attr,
779 &dev_attr_formats.attr,
780 &dev_attr_format.attr,
781 &dev_attr_format1.attr,
782 &dev_attr_flags.attr,
786 static const struct attribute_group ndtest_nvdimm_attribute_group = {
788 .attrs = ndtest_nvdimm_attributes,
789 .is_visible = ndtest_nvdimm_attr_visible,
792 static const struct attribute_group *ndtest_nvdimm_attribute_groups[] = {
793 &ndtest_nvdimm_attribute_group,
797 static int ndtest_dimm_register(struct ndtest_priv *priv,
798 struct ndtest_dimm *dimm, int id)
800 struct device *dev = &priv->pdev.dev;
801 unsigned long dimm_flags = dimm->flags;
803 if (dimm->num_formats > 1)
804 set_bit(NDD_LABELING, &dimm_flags);
806 if (dimm->flags & PAPR_PMEM_UNARMED_MASK)
807 set_bit(NDD_UNARMED, &dimm_flags);
809 dimm->nvdimm = nvdimm_create(priv->bus, dimm,
810 ndtest_nvdimm_attribute_groups, dimm_flags,
811 NDTEST_SCM_DIMM_CMD_MASK, 0, NULL);
813 dev_err(dev, "Error creating DIMM object for %pOF\n", priv->dn);
817 dimm->dev = device_create_with_groups(ndtest_dimm_class,
819 0, dimm, dimm_attribute_groups,
822 pr_err("Could not create dimm device attributes\n");
829 static int ndtest_nvdimm_init(struct ndtest_priv *p)
831 struct ndtest_dimm *d;
835 for (i = 0; i < p->config->dimm_count; i++) {
836 d = &p->config->dimms[i];
837 d->id = id = p->config->dimm_start + i;
838 res = ndtest_alloc_resource(p, LABEL_SIZE, NULL);
843 sprintf(d->label_area, "label%d", id);
844 d->config_size = LABEL_SIZE;
846 if (!ndtest_alloc_resource(p, d->size,
850 if (!ndtest_alloc_resource(p, LABEL_SIZE,
854 if (!ndtest_alloc_resource(p, LABEL_SIZE,
858 d->address = p->dimm_dma[id];
860 ndtest_dimm_register(p, d, id);
866 static ssize_t compatible_show(struct device *dev,
867 struct device_attribute *attr, char *buf)
869 return sprintf(buf, "nvdimm_test");
871 static DEVICE_ATTR_RO(compatible);
873 static struct attribute *of_node_attributes[] = {
874 &dev_attr_compatible.attr,
878 static const struct attribute_group of_node_attribute_group = {
880 .attrs = of_node_attributes,
883 static const struct attribute_group *ndtest_attribute_groups[] = {
884 &of_node_attribute_group,
888 static int ndtest_bus_register(struct ndtest_priv *p)
890 p->config = &bus_configs[p->pdev.id];
892 p->bus_desc.ndctl = ndtest_ctl;
893 p->bus_desc.module = THIS_MODULE;
894 p->bus_desc.provider_name = NULL;
895 p->bus_desc.attr_groups = ndtest_attribute_groups;
897 p->bus = nvdimm_bus_register(&p->pdev.dev, &p->bus_desc);
899 dev_err(&p->pdev.dev, "Error creating nvdimm bus %pOF\n", p->dn);
906 static int ndtest_remove(struct platform_device *pdev)
908 struct ndtest_priv *p = to_ndtest_priv(&pdev->dev);
910 nvdimm_bus_unregister(p->bus);
914 static int ndtest_probe(struct platform_device *pdev)
916 struct ndtest_priv *p;
919 p = to_ndtest_priv(&pdev->dev);
920 if (ndtest_bus_register(p))
923 p->dcr_dma = devm_kcalloc(&p->pdev.dev, NUM_DCR,
924 sizeof(dma_addr_t), GFP_KERNEL);
925 p->label_dma = devm_kcalloc(&p->pdev.dev, NUM_DCR,
926 sizeof(dma_addr_t), GFP_KERNEL);
927 p->dimm_dma = devm_kcalloc(&p->pdev.dev, NUM_DCR,
928 sizeof(dma_addr_t), GFP_KERNEL);
930 rc = ndtest_nvdimm_init(p);
934 rc = ndtest_init_regions(p);
938 rc = devm_add_action_or_reset(&pdev->dev, put_dimms, p);
942 platform_set_drvdata(pdev, p);
947 pr_err("%s:%d Failed nvdimm init\n", __func__, __LINE__);
951 static const struct platform_device_id ndtest_id[] = {
956 static struct platform_driver ndtest_driver = {
957 .probe = ndtest_probe,
958 .remove = ndtest_remove,
960 .name = KBUILD_MODNAME,
962 .id_table = ndtest_id,
965 static void ndtest_release(struct device *dev)
967 struct ndtest_priv *p = to_ndtest_priv(dev);
972 static void cleanup_devices(void)
976 for (i = 0; i < NUM_INSTANCES; i++)
978 platform_device_unregister(&instances[i]->pdev);
980 nfit_test_teardown();
983 gen_pool_destroy(ndtest_pool);
986 if (ndtest_dimm_class)
987 class_destroy(ndtest_dimm_class);
990 static __init int ndtest_init(void)
999 nfit_test_setup(ndtest_resource_lookup, NULL);
1001 ndtest_dimm_class = class_create(THIS_MODULE, "nfit_test_dimm");
1002 if (IS_ERR(ndtest_dimm_class)) {
1003 rc = PTR_ERR(ndtest_dimm_class);
1007 ndtest_pool = gen_pool_create(ilog2(SZ_4M), NUMA_NO_NODE);
1013 if (gen_pool_add(ndtest_pool, SZ_4G, SZ_4G, NUMA_NO_NODE)) {
1018 /* Each instance can be taken as a bus, which can have multiple dimms */
1019 for (i = 0; i < NUM_INSTANCES; i++) {
1020 struct ndtest_priv *priv;
1021 struct platform_device *pdev;
1023 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
1029 INIT_LIST_HEAD(&priv->resources);
1031 pdev->name = KBUILD_MODNAME;
1033 pdev->dev.release = ndtest_release;
1034 rc = platform_device_register(pdev);
1036 put_device(&pdev->dev);
1039 get_device(&pdev->dev);
1041 instances[i] = priv;
1044 rc = platform_driver_register(&ndtest_driver);
1051 pr_err("Error registering platform device\n");
1057 static __exit void ndtest_exit(void)
1060 platform_driver_unregister(&ndtest_driver);
1063 module_init(ndtest_init);
1064 module_exit(ndtest_exit);
1065 MODULE_LICENSE("GPL");
1066 MODULE_AUTHOR("IBM Corporation");