1 // SPDX-License-Identifier: GPL-2.0
3 #define pr_fmt(fmt) "papr-scm: " fmt
6 #include <linux/kernel.h>
7 #include <linux/module.h>
8 #include <linux/ioport.h>
9 #include <linux/slab.h>
10 #include <linux/ndctl.h>
11 #include <linux/sched.h>
12 #include <linux/libnvdimm.h>
13 #include <linux/platform_device.h>
14 #include <linux/delay.h>
15 #include <linux/seq_buf.h>
18 #include <asm/plpar_wrappers.h>
19 #include <asm/papr_pdsm.h>
21 #include <asm/unaligned.h>
22 #include <linux/perf_event.h>
24 #define BIND_ANY_ADDR (~0ul)
26 #define PAPR_SCM_DIMM_CMD_MASK \
27 ((1ul << ND_CMD_GET_CONFIG_SIZE) | \
28 (1ul << ND_CMD_GET_CONFIG_DATA) | \
29 (1ul << ND_CMD_SET_CONFIG_DATA) | \
32 /* DIMM health bitmap bitmap indicators */
33 /* SCM device is unable to persist memory contents */
34 #define PAPR_PMEM_UNARMED (1ULL << (63 - 0))
35 /* SCM device failed to persist memory contents */
36 #define PAPR_PMEM_SHUTDOWN_DIRTY (1ULL << (63 - 1))
37 /* SCM device contents are persisted from previous IPL */
38 #define PAPR_PMEM_SHUTDOWN_CLEAN (1ULL << (63 - 2))
39 /* SCM device contents are not persisted from previous IPL */
40 #define PAPR_PMEM_EMPTY (1ULL << (63 - 3))
41 /* SCM device memory life remaining is critically low */
42 #define PAPR_PMEM_HEALTH_CRITICAL (1ULL << (63 - 4))
43 /* SCM device will be garded off next IPL due to failure */
44 #define PAPR_PMEM_HEALTH_FATAL (1ULL << (63 - 5))
45 /* SCM contents cannot persist due to current platform health status */
46 #define PAPR_PMEM_HEALTH_UNHEALTHY (1ULL << (63 - 6))
47 /* SCM device is unable to persist memory contents in certain conditions */
48 #define PAPR_PMEM_HEALTH_NON_CRITICAL (1ULL << (63 - 7))
49 /* SCM device is encrypted */
50 #define PAPR_PMEM_ENCRYPTED (1ULL << (63 - 8))
51 /* SCM device has been scrubbed and locked */
52 #define PAPR_PMEM_SCRUBBED_AND_LOCKED (1ULL << (63 - 9))
54 /* Bits status indicators for health bitmap indicating unarmed dimm */
55 #define PAPR_PMEM_UNARMED_MASK (PAPR_PMEM_UNARMED | \
56 PAPR_PMEM_HEALTH_UNHEALTHY)
58 /* Bits status indicators for health bitmap indicating unflushed dimm */
59 #define PAPR_PMEM_BAD_SHUTDOWN_MASK (PAPR_PMEM_SHUTDOWN_DIRTY)
61 /* Bits status indicators for health bitmap indicating unrestored dimm */
62 #define PAPR_PMEM_BAD_RESTORE_MASK (PAPR_PMEM_EMPTY)
64 /* Bit status indicators for smart event notification */
65 #define PAPR_PMEM_SMART_EVENT_MASK (PAPR_PMEM_HEALTH_CRITICAL | \
66 PAPR_PMEM_HEALTH_FATAL | \
67 PAPR_PMEM_HEALTH_UNHEALTHY)
69 #define PAPR_SCM_PERF_STATS_EYECATCHER __stringify(SCMSTATS)
70 #define PAPR_SCM_PERF_STATS_VERSION 0x1
72 /* Struct holding a single performance metric */
73 struct papr_scm_perf_stat {
78 /* Struct exchanged between kernel and PHYP for fetching drc perf stats */
79 struct papr_scm_perf_stats {
81 /* Should be PAPR_SCM_PERF_STATS_VERSION */
83 /* Number of stats following */
84 __be32 num_statistics;
85 /* zero or more performance matrics */
86 struct papr_scm_perf_stat scm_statistic[];
89 /* private struct associated with each region */
90 struct papr_scm_priv {
91 struct platform_device *pdev;
92 struct device_node *dn;
98 bool hcall_flush_required;
102 struct nvdimm_bus_descriptor bus_desc;
103 struct nvdimm_bus *bus;
104 struct nvdimm *nvdimm;
106 struct nd_region *region;
107 struct nd_interleave_set nd_set;
108 struct list_head region_list;
110 /* Protect dimm health data from concurrent read/writes */
111 struct mutex health_mutex;
113 /* Last time the health information of the dimm was updated */
114 unsigned long lasthealth_jiffies;
116 /* Health information for the dimm */
119 /* Holds the last known dirty shutdown counter value */
120 u64 dirty_shutdown_counter;
122 /* length of the stat buffer as expected by phyp */
123 size_t stat_buffer_len;
125 /* The bits which needs to be overridden */
126 u64 health_bitmap_inject_mask;
128 /* array to have event_code and stat_id mappings */
129 char **nvdimm_events_map;
132 static int papr_scm_pmem_flush(struct nd_region *nd_region,
133 struct bio *bio __maybe_unused)
135 struct papr_scm_priv *p = nd_region_provider_data(nd_region);
136 unsigned long ret_buf[PLPAR_HCALL_BUFSIZE], token = 0;
139 dev_dbg(&p->pdev->dev, "flush drc 0x%x", p->drc_index);
142 rc = plpar_hcall(H_SCM_FLUSH, ret_buf, p->drc_index, token);
145 /* Check if we are stalled for some time */
146 if (H_IS_LONG_BUSY(rc)) {
147 msleep(get_longbusy_msecs(rc));
149 } else if (rc == H_BUSY) {
152 } while (rc == H_BUSY);
155 dev_err(&p->pdev->dev, "flush error: %ld", rc);
158 dev_dbg(&p->pdev->dev, "flush drc 0x%x complete", p->drc_index);
164 static LIST_HEAD(papr_nd_regions);
165 static DEFINE_MUTEX(papr_ndr_lock);
167 static int drc_pmem_bind(struct papr_scm_priv *p)
169 unsigned long ret[PLPAR_HCALL_BUFSIZE];
175 * When the hypervisor cannot map all the requested memory in a single
176 * hcall it returns H_BUSY and we call again with the token until
177 * we get H_SUCCESS. Aborting the retry loop before getting H_SUCCESS
178 * leave the system in an undefined state, so we wait.
183 rc = plpar_hcall(H_SCM_BIND_MEM, ret, p->drc_index, 0,
184 p->blocks, BIND_ANY_ADDR, token);
189 } while (rc == H_BUSY);
194 p->bound_addr = saved;
195 dev_dbg(&p->pdev->dev, "bound drc 0x%x to 0x%lx\n",
196 p->drc_index, (unsigned long)saved);
200 static void drc_pmem_unbind(struct papr_scm_priv *p)
202 unsigned long ret[PLPAR_HCALL_BUFSIZE];
206 dev_dbg(&p->pdev->dev, "unbind drc 0x%x\n", p->drc_index);
208 /* NB: unbind has the same retry requirements as drc_pmem_bind() */
211 /* Unbind of all SCM resources associated with drcIndex */
212 rc = plpar_hcall(H_SCM_UNBIND_ALL, ret, H_UNBIND_SCOPE_DRC,
213 p->drc_index, token);
216 /* Check if we are stalled for some time */
217 if (H_IS_LONG_BUSY(rc)) {
218 msleep(get_longbusy_msecs(rc));
220 } else if (rc == H_BUSY) {
224 } while (rc == H_BUSY);
227 dev_err(&p->pdev->dev, "unbind error: %lld\n", rc);
229 dev_dbg(&p->pdev->dev, "unbind drc 0x%x complete\n",
235 static int drc_pmem_query_n_bind(struct papr_scm_priv *p)
237 unsigned long start_addr;
238 unsigned long end_addr;
239 unsigned long ret[PLPAR_HCALL_BUFSIZE];
243 rc = plpar_hcall(H_SCM_QUERY_BLOCK_MEM_BINDING, ret,
249 /* Make sure the full region is bound. */
250 rc = plpar_hcall(H_SCM_QUERY_BLOCK_MEM_BINDING, ret,
251 p->drc_index, p->blocks - 1);
256 if ((end_addr - start_addr) != ((p->blocks - 1) * p->block_size))
259 p->bound_addr = start_addr;
260 dev_dbg(&p->pdev->dev, "bound drc 0x%x to 0x%lx\n", p->drc_index, start_addr);
264 dev_info(&p->pdev->dev,
265 "Failed to query, trying an unbind followed by bind");
267 return drc_pmem_bind(p);
271 * Query the Dimm performance stats from PHYP and copy them (if returned) to
272 * provided struct papr_scm_perf_stats instance 'stats' that can hold atleast
273 * (num_stats + header) bytes.
274 * - If buff_stats == NULL the return value is the size in bytes of the buffer
275 * needed to hold all supported performance-statistics.
276 * - If buff_stats != NULL and num_stats == 0 then we copy all known
277 * performance-statistics to 'buff_stat' and expect to be large enough to
279 * - if buff_stats != NULL and num_stats > 0 then copy the requested
280 * performance-statistics to buff_stats.
282 static ssize_t drc_pmem_query_stats(struct papr_scm_priv *p,
283 struct papr_scm_perf_stats *buff_stats,
284 unsigned int num_stats)
286 unsigned long ret[PLPAR_HCALL_BUFSIZE];
290 /* Setup the out buffer */
292 memcpy(buff_stats->eye_catcher,
293 PAPR_SCM_PERF_STATS_EYECATCHER, 8);
294 buff_stats->stats_version =
295 cpu_to_be32(PAPR_SCM_PERF_STATS_VERSION);
296 buff_stats->num_statistics =
297 cpu_to_be32(num_stats);
300 * Calculate the buffer size based on num-stats provided
301 * or use the prefetched max buffer length
304 /* Calculate size from the num_stats */
305 size = sizeof(struct papr_scm_perf_stats) +
306 num_stats * sizeof(struct papr_scm_perf_stat);
308 size = p->stat_buffer_len;
310 /* In case of no out buffer ignore the size */
314 /* Do the HCALL asking PHYP for info */
315 rc = plpar_hcall(H_SCM_PERFORMANCE_STATS, ret, p->drc_index,
316 buff_stats ? virt_to_phys(buff_stats) : 0,
319 /* Check if the error was due to an unknown stat-id */
320 if (rc == H_PARTIAL) {
321 dev_err(&p->pdev->dev,
322 "Unknown performance stats, Err:0x%016lX\n", ret[0]);
324 } else if (rc == H_AUTHORITY) {
325 dev_info(&p->pdev->dev,
326 "Permission denied while accessing performance stats");
328 } else if (rc == H_UNSUPPORTED) {
329 dev_dbg(&p->pdev->dev, "Performance stats unsupported\n");
331 } else if (rc != H_SUCCESS) {
332 dev_err(&p->pdev->dev,
333 "Failed to query performance stats, Err:%lld\n", rc);
337 /* Handle case where stat buffer size was requested */
338 dev_dbg(&p->pdev->dev,
339 "Performance stats size %ld\n", ret[0]);
343 /* Successfully fetched the requested stats from phyp */
344 dev_dbg(&p->pdev->dev,
345 "Performance stats returned %d stats\n",
346 be32_to_cpu(buff_stats->num_statistics));
350 #ifdef CONFIG_PERF_EVENTS
351 #define to_nvdimm_pmu(_pmu) container_of(_pmu, struct nvdimm_pmu, pmu)
353 static int papr_scm_pmu_get_value(struct perf_event *event, struct device *dev, u64 *count)
355 struct papr_scm_perf_stat *stat;
356 struct papr_scm_perf_stats *stats;
357 struct papr_scm_priv *p = (struct papr_scm_priv *)dev->driver_data;
360 /* Allocate request buffer enough to hold single performance stat */
361 size = sizeof(struct papr_scm_perf_stats) +
362 sizeof(struct papr_scm_perf_stat);
364 if (!p || !p->nvdimm_events_map)
367 stats = kzalloc(size, GFP_KERNEL);
371 stat = &stats->scm_statistic[0];
372 memcpy(&stat->stat_id,
373 p->nvdimm_events_map[event->attr.config],
374 sizeof(stat->stat_id));
377 rc = drc_pmem_query_stats(p, stats, 1);
383 *count = be64_to_cpu(stat->stat_val);
388 static int papr_scm_pmu_event_init(struct perf_event *event)
390 struct nvdimm_pmu *nd_pmu = to_nvdimm_pmu(event->pmu);
391 struct papr_scm_priv *p;
396 /* test the event attr type for PMU enumeration */
397 if (event->attr.type != event->pmu->type)
400 /* it does not support event sampling mode */
401 if (is_sampling_event(event))
404 /* no branch sampling */
405 if (has_branch_stack(event))
408 p = (struct papr_scm_priv *)nd_pmu->dev->driver_data;
412 /* Invalid eventcode */
413 if (event->attr.config == 0 || event->attr.config > 16)
419 static int papr_scm_pmu_add(struct perf_event *event, int flags)
423 struct nvdimm_pmu *nd_pmu = to_nvdimm_pmu(event->pmu);
428 if (flags & PERF_EF_START) {
429 rc = papr_scm_pmu_get_value(event, nd_pmu->dev, &count);
433 local64_set(&event->hw.prev_count, count);
439 static void papr_scm_pmu_read(struct perf_event *event)
443 struct nvdimm_pmu *nd_pmu = to_nvdimm_pmu(event->pmu);
448 rc = papr_scm_pmu_get_value(event, nd_pmu->dev, &now);
452 prev = local64_xchg(&event->hw.prev_count, now);
453 local64_add(now - prev, &event->count);
456 static void papr_scm_pmu_del(struct perf_event *event, int flags)
458 papr_scm_pmu_read(event);
461 static int papr_scm_pmu_check_events(struct papr_scm_priv *p, struct nvdimm_pmu *nd_pmu)
463 struct papr_scm_perf_stat *stat;
464 struct papr_scm_perf_stats *stats;
465 int index, rc, count;
466 u32 available_events;
468 if (!p->stat_buffer_len)
471 available_events = (p->stat_buffer_len - sizeof(struct papr_scm_perf_stats))
472 / sizeof(struct papr_scm_perf_stat);
474 /* Allocate the buffer for phyp where stats are written */
475 stats = kzalloc(p->stat_buffer_len, GFP_KERNEL);
481 /* Allocate memory to nvdimm_event_map */
482 p->nvdimm_events_map = kcalloc(available_events, sizeof(char *), GFP_KERNEL);
483 if (!p->nvdimm_events_map) {
488 /* Called to get list of events supported */
489 rc = drc_pmem_query_stats(p, stats, 0);
491 goto out_nvdimm_events_map;
493 for (index = 0, stat = stats->scm_statistic, count = 0;
494 index < available_events; index++, ++stat) {
495 p->nvdimm_events_map[count] = kmemdup_nul(stat->stat_id, 8, GFP_KERNEL);
496 if (!p->nvdimm_events_map[count]) {
498 goto out_nvdimm_events_map;
503 p->nvdimm_events_map[count] = NULL;
507 out_nvdimm_events_map:
508 kfree(p->nvdimm_events_map);
514 static void papr_scm_pmu_register(struct papr_scm_priv *p)
516 struct nvdimm_pmu *nd_pmu;
519 nd_pmu = kzalloc(sizeof(*nd_pmu), GFP_KERNEL);
525 rc = papr_scm_pmu_check_events(p, nd_pmu);
527 goto pmu_check_events_err;
529 nd_pmu->pmu.task_ctx_nr = perf_invalid_context;
530 nd_pmu->pmu.name = nvdimm_name(p->nvdimm);
531 nd_pmu->pmu.event_init = papr_scm_pmu_event_init;
532 nd_pmu->pmu.read = papr_scm_pmu_read;
533 nd_pmu->pmu.add = papr_scm_pmu_add;
534 nd_pmu->pmu.del = papr_scm_pmu_del;
536 nd_pmu->pmu.capabilities = PERF_PMU_CAP_NO_INTERRUPT |
537 PERF_PMU_CAP_NO_EXCLUDE;
539 /*updating the cpumask variable */
540 nodeid = numa_map_to_online_node(dev_to_node(&p->pdev->dev));
541 nd_pmu->arch_cpumask = *cpumask_of_node(nodeid);
543 rc = register_nvdimm_pmu(nd_pmu, p->pdev);
545 goto pmu_register_err;
548 * Set archdata.priv value to nvdimm_pmu structure, to handle the
549 * unregistering of pmu device.
551 p->pdev->archdata.priv = nd_pmu;
555 kfree(p->nvdimm_events_map);
556 pmu_check_events_err:
559 dev_info(&p->pdev->dev, "nvdimm pmu didn't register rc=%d\n", rc);
563 static void papr_scm_pmu_register(struct papr_scm_priv *p) { }
567 * Issue hcall to retrieve dimm health info and populate papr_scm_priv with the
568 * health information.
570 static int __drc_pmem_query_health(struct papr_scm_priv *p)
572 unsigned long ret[PLPAR_HCALL_BUFSIZE];
576 /* issue the hcall */
577 rc = plpar_hcall(H_SCM_HEALTH, ret, p->drc_index);
579 bitmap = ret[0] & ret[1];
580 else if (rc == H_FUNCTION)
581 dev_info_once(&p->pdev->dev,
582 "Hcall H_SCM_HEALTH not implemented, assuming empty health bitmap");
585 dev_err(&p->pdev->dev,
586 "Failed to query health information, Err:%ld\n", rc);
590 p->lasthealth_jiffies = jiffies;
591 /* Allow injecting specific health bits via inject mask. */
592 if (p->health_bitmap_inject_mask)
593 bitmap = (bitmap & ~p->health_bitmap_inject_mask) |
594 p->health_bitmap_inject_mask;
595 WRITE_ONCE(p->health_bitmap, bitmap);
596 dev_dbg(&p->pdev->dev,
597 "Queried dimm health info. Bitmap:0x%016lx Mask:0x%016lx\n",
603 /* Min interval in seconds for assuming stable dimm health */
604 #define MIN_HEALTH_QUERY_INTERVAL 60
606 /* Query cached health info and if needed call drc_pmem_query_health */
607 static int drc_pmem_query_health(struct papr_scm_priv *p)
609 unsigned long cache_timeout;
612 /* Protect concurrent modifications to papr_scm_priv */
613 rc = mutex_lock_interruptible(&p->health_mutex);
617 /* Jiffies offset for which the health data is assumed to be same */
618 cache_timeout = p->lasthealth_jiffies +
619 msecs_to_jiffies(MIN_HEALTH_QUERY_INTERVAL * 1000);
621 /* Fetch new health info is its older than MIN_HEALTH_QUERY_INTERVAL */
622 if (time_after(jiffies, cache_timeout))
623 rc = __drc_pmem_query_health(p);
625 /* Assume cached health data is valid */
628 mutex_unlock(&p->health_mutex);
632 static int papr_scm_meta_get(struct papr_scm_priv *p,
633 struct nd_cmd_get_config_data_hdr *hdr)
635 unsigned long data[PLPAR_HCALL_BUFSIZE];
636 unsigned long offset, data_offset;
640 if ((hdr->in_offset + hdr->in_length) > p->metadata_size)
643 for (len = hdr->in_length; len; len -= read) {
645 data_offset = hdr->in_length - len;
646 offset = hdr->in_offset + data_offset;
657 ret = plpar_hcall(H_SCM_READ_METADATA, data, p->drc_index,
660 if (ret == H_PARAMETER) /* bad DRC index */
663 return -EINVAL; /* other invalid parameter */
667 *(uint64_t *)(hdr->out_buf + data_offset) = be64_to_cpu(data[0]);
670 *(uint32_t *)(hdr->out_buf + data_offset) = be32_to_cpu(data[0] & 0xffffffff);
674 *(uint16_t *)(hdr->out_buf + data_offset) = be16_to_cpu(data[0] & 0xffff);
678 *(uint8_t *)(hdr->out_buf + data_offset) = (data[0] & 0xff);
685 static int papr_scm_meta_set(struct papr_scm_priv *p,
686 struct nd_cmd_set_config_hdr *hdr)
688 unsigned long offset, data_offset;
694 if ((hdr->in_offset + hdr->in_length) > p->metadata_size)
697 for (len = hdr->in_length; len; len -= wrote) {
699 data_offset = hdr->in_length - len;
700 offset = hdr->in_offset + data_offset;
703 data = *(uint64_t *)(hdr->in_buf + data_offset);
704 data_be = cpu_to_be64(data);
706 } else if (len >= 4) {
707 data = *(uint32_t *)(hdr->in_buf + data_offset);
709 data_be = cpu_to_be32(data);
711 } else if (len >= 2) {
712 data = *(uint16_t *)(hdr->in_buf + data_offset);
714 data_be = cpu_to_be16(data);
717 data_be = *(uint8_t *)(hdr->in_buf + data_offset);
722 ret = plpar_hcall_norets(H_SCM_WRITE_METADATA, p->drc_index,
723 offset, data_be, wrote);
724 if (ret == H_PARAMETER) /* bad DRC index */
727 return -EINVAL; /* other invalid parameter */
734 * Do a sanity checks on the inputs args to dimm-control function and return
735 * '0' if valid. Validation of PDSM payloads happens later in
736 * papr_scm_service_pdsm.
738 static int is_cmd_valid(struct nvdimm *nvdimm, unsigned int cmd, void *buf,
739 unsigned int buf_len)
741 unsigned long cmd_mask = PAPR_SCM_DIMM_CMD_MASK;
742 struct nd_cmd_pkg *nd_cmd;
743 struct papr_scm_priv *p;
746 /* Only dimm-specific calls are supported atm */
750 /* get the provider data from struct nvdimm */
751 p = nvdimm_provider_data(nvdimm);
753 if (!test_bit(cmd, &cmd_mask)) {
754 dev_dbg(&p->pdev->dev, "Unsupported cmd=%u\n", cmd);
758 /* For CMD_CALL verify pdsm request */
759 if (cmd == ND_CMD_CALL) {
760 /* Verify the envelope and envelop size */
762 buf_len < (sizeof(struct nd_cmd_pkg) + ND_PDSM_HDR_SIZE)) {
763 dev_dbg(&p->pdev->dev, "Invalid pkg size=%u\n",
768 /* Verify that the nd_cmd_pkg.nd_family is correct */
769 nd_cmd = (struct nd_cmd_pkg *)buf;
771 if (nd_cmd->nd_family != NVDIMM_FAMILY_PAPR) {
772 dev_dbg(&p->pdev->dev, "Invalid pkg family=0x%llx\n",
777 pdsm = (enum papr_pdsm)nd_cmd->nd_command;
779 /* Verify if the pdsm command is valid */
780 if (pdsm <= PAPR_PDSM_MIN || pdsm >= PAPR_PDSM_MAX) {
781 dev_dbg(&p->pdev->dev, "PDSM[0x%x]: Invalid PDSM\n",
786 /* Have enough space to hold returned 'nd_pkg_pdsm' header */
787 if (nd_cmd->nd_size_out < ND_PDSM_HDR_SIZE) {
788 dev_dbg(&p->pdev->dev, "PDSM[0x%x]: Invalid payload\n",
794 /* Let the command be further processed */
798 static int papr_pdsm_fuel_gauge(struct papr_scm_priv *p,
799 union nd_pdsm_payload *payload)
803 struct papr_scm_perf_stat *stat;
804 struct papr_scm_perf_stats *stats;
806 /* Silently fail if fetching performance metrics isn't supported */
807 if (!p->stat_buffer_len)
810 /* Allocate request buffer enough to hold single performance stat */
811 size = sizeof(struct papr_scm_perf_stats) +
812 sizeof(struct papr_scm_perf_stat);
814 stats = kzalloc(size, GFP_KERNEL);
818 stat = &stats->scm_statistic[0];
819 memcpy(&stat->stat_id, "MemLife ", sizeof(stat->stat_id));
822 /* Fetch the fuel gauge and populate it in payload */
823 rc = drc_pmem_query_stats(p, stats, 1);
825 dev_dbg(&p->pdev->dev, "Err(%d) fetching fuel gauge\n", rc);
829 statval = be64_to_cpu(stat->stat_val);
830 dev_dbg(&p->pdev->dev,
831 "Fetched fuel-gauge %llu", statval);
832 payload->health.extension_flags |=
833 PDSM_DIMM_HEALTH_RUN_GAUGE_VALID;
834 payload->health.dimm_fuel_gauge = statval;
836 rc = sizeof(struct nd_papr_pdsm_health);
843 /* Add the dirty-shutdown-counter value to the pdsm */
844 static int papr_pdsm_dsc(struct papr_scm_priv *p,
845 union nd_pdsm_payload *payload)
847 payload->health.extension_flags |= PDSM_DIMM_DSC_VALID;
848 payload->health.dimm_dsc = p->dirty_shutdown_counter;
850 return sizeof(struct nd_papr_pdsm_health);
853 /* Fetch the DIMM health info and populate it in provided package. */
854 static int papr_pdsm_health(struct papr_scm_priv *p,
855 union nd_pdsm_payload *payload)
859 /* Ensure dimm health mutex is taken preventing concurrent access */
860 rc = mutex_lock_interruptible(&p->health_mutex);
864 /* Always fetch upto date dimm health data ignoring cached values */
865 rc = __drc_pmem_query_health(p);
867 mutex_unlock(&p->health_mutex);
871 /* update health struct with various flags derived from health bitmap */
872 payload->health = (struct nd_papr_pdsm_health) {
873 .extension_flags = 0,
874 .dimm_unarmed = !!(p->health_bitmap & PAPR_PMEM_UNARMED_MASK),
875 .dimm_bad_shutdown = !!(p->health_bitmap & PAPR_PMEM_BAD_SHUTDOWN_MASK),
876 .dimm_bad_restore = !!(p->health_bitmap & PAPR_PMEM_BAD_RESTORE_MASK),
877 .dimm_scrubbed = !!(p->health_bitmap & PAPR_PMEM_SCRUBBED_AND_LOCKED),
878 .dimm_locked = !!(p->health_bitmap & PAPR_PMEM_SCRUBBED_AND_LOCKED),
879 .dimm_encrypted = !!(p->health_bitmap & PAPR_PMEM_ENCRYPTED),
880 .dimm_health = PAPR_PDSM_DIMM_HEALTHY,
883 /* Update field dimm_health based on health_bitmap flags */
884 if (p->health_bitmap & PAPR_PMEM_HEALTH_FATAL)
885 payload->health.dimm_health = PAPR_PDSM_DIMM_FATAL;
886 else if (p->health_bitmap & PAPR_PMEM_HEALTH_CRITICAL)
887 payload->health.dimm_health = PAPR_PDSM_DIMM_CRITICAL;
888 else if (p->health_bitmap & PAPR_PMEM_HEALTH_UNHEALTHY)
889 payload->health.dimm_health = PAPR_PDSM_DIMM_UNHEALTHY;
891 /* struct populated hence can release the mutex now */
892 mutex_unlock(&p->health_mutex);
894 /* Populate the fuel gauge meter in the payload */
895 papr_pdsm_fuel_gauge(p, payload);
896 /* Populate the dirty-shutdown-counter field */
897 papr_pdsm_dsc(p, payload);
899 rc = sizeof(struct nd_papr_pdsm_health);
905 /* Inject a smart error Add the dirty-shutdown-counter value to the pdsm */
906 static int papr_pdsm_smart_inject(struct papr_scm_priv *p,
907 union nd_pdsm_payload *payload)
910 u32 supported_flags = 0;
911 u64 inject_mask = 0, clear_mask = 0;
914 /* Check for individual smart error flags and update inject/clear masks */
915 if (payload->smart_inject.flags & PDSM_SMART_INJECT_HEALTH_FATAL) {
916 supported_flags |= PDSM_SMART_INJECT_HEALTH_FATAL;
917 if (payload->smart_inject.fatal_enable)
918 inject_mask |= PAPR_PMEM_HEALTH_FATAL;
920 clear_mask |= PAPR_PMEM_HEALTH_FATAL;
923 if (payload->smart_inject.flags & PDSM_SMART_INJECT_BAD_SHUTDOWN) {
924 supported_flags |= PDSM_SMART_INJECT_BAD_SHUTDOWN;
925 if (payload->smart_inject.unsafe_shutdown_enable)
926 inject_mask |= PAPR_PMEM_SHUTDOWN_DIRTY;
928 clear_mask |= PAPR_PMEM_SHUTDOWN_DIRTY;
931 dev_dbg(&p->pdev->dev, "[Smart-inject] inject_mask=%#llx clear_mask=%#llx\n",
932 inject_mask, clear_mask);
934 /* Prevent concurrent access to dimm health bitmap related members */
935 rc = mutex_lock_interruptible(&p->health_mutex);
939 /* Use inject/clear masks to set health_bitmap_inject_mask */
940 mask = READ_ONCE(p->health_bitmap_inject_mask);
941 mask = (mask & ~clear_mask) | inject_mask;
942 WRITE_ONCE(p->health_bitmap_inject_mask, mask);
944 /* Invalidate cached health bitmap */
945 p->lasthealth_jiffies = 0;
947 mutex_unlock(&p->health_mutex);
949 /* Return the supported flags back to userspace */
950 payload->smart_inject.flags = supported_flags;
952 return sizeof(struct nd_papr_pdsm_health);
956 * 'struct pdsm_cmd_desc'
957 * Identifies supported PDSMs' expected length of in/out payloads
958 * and pdsm service function.
960 * size_in : Size of input payload if any in the PDSM request.
961 * size_out : Size of output payload if any in the PDSM request.
962 * service : Service function for the PDSM request. Return semantics:
963 * rc < 0 : Error servicing PDSM and rc indicates the error.
964 * rc >=0 : Serviced successfully and 'rc' indicate number of
965 * bytes written to payload.
967 struct pdsm_cmd_desc {
970 int (*service)(struct papr_scm_priv *dimm,
971 union nd_pdsm_payload *payload);
974 /* Holds all supported PDSMs' command descriptors */
975 static const struct pdsm_cmd_desc __pdsm_cmd_descriptors[] = {
981 /* New PDSM command descriptors to be added below */
983 [PAPR_PDSM_HEALTH] = {
985 .size_out = sizeof(struct nd_papr_pdsm_health),
986 .service = papr_pdsm_health,
989 [PAPR_PDSM_SMART_INJECT] = {
990 .size_in = sizeof(struct nd_papr_pdsm_smart_inject),
991 .size_out = sizeof(struct nd_papr_pdsm_smart_inject),
992 .service = papr_pdsm_smart_inject,
1002 /* Given a valid pdsm cmd return its command descriptor else return NULL */
1003 static inline const struct pdsm_cmd_desc *pdsm_cmd_desc(enum papr_pdsm cmd)
1005 if (cmd >= 0 || cmd < ARRAY_SIZE(__pdsm_cmd_descriptors))
1006 return &__pdsm_cmd_descriptors[cmd];
1012 * For a given pdsm request call an appropriate service function.
1013 * Returns errors if any while handling the pdsm command package.
1015 static int papr_scm_service_pdsm(struct papr_scm_priv *p,
1016 struct nd_cmd_pkg *pkg)
1018 /* Get the PDSM header and PDSM command */
1019 struct nd_pkg_pdsm *pdsm_pkg = (struct nd_pkg_pdsm *)pkg->nd_payload;
1020 enum papr_pdsm pdsm = (enum papr_pdsm)pkg->nd_command;
1021 const struct pdsm_cmd_desc *pdsc;
1024 /* Fetch corresponding pdsm descriptor for validation and servicing */
1025 pdsc = pdsm_cmd_desc(pdsm);
1027 /* Validate pdsm descriptor */
1028 /* Ensure that reserved fields are 0 */
1029 if (pdsm_pkg->reserved[0] || pdsm_pkg->reserved[1]) {
1030 dev_dbg(&p->pdev->dev, "PDSM[0x%x]: Invalid reserved field\n",
1035 /* If pdsm expects some input, then ensure that the size_in matches */
1036 if (pdsc->size_in &&
1037 pkg->nd_size_in != (pdsc->size_in + ND_PDSM_HDR_SIZE)) {
1038 dev_dbg(&p->pdev->dev, "PDSM[0x%x]: Mismatched size_in=%d\n",
1039 pdsm, pkg->nd_size_in);
1043 /* If pdsm wants to return data, then ensure that size_out matches */
1044 if (pdsc->size_out &&
1045 pkg->nd_size_out != (pdsc->size_out + ND_PDSM_HDR_SIZE)) {
1046 dev_dbg(&p->pdev->dev, "PDSM[0x%x]: Mismatched size_out=%d\n",
1047 pdsm, pkg->nd_size_out);
1051 /* Service the pdsm */
1052 if (pdsc->service) {
1053 dev_dbg(&p->pdev->dev, "PDSM[0x%x]: Servicing..\n", pdsm);
1055 rc = pdsc->service(p, &pdsm_pkg->payload);
1058 /* error encountered while servicing pdsm */
1059 pdsm_pkg->cmd_status = rc;
1060 pkg->nd_fw_size = ND_PDSM_HDR_SIZE;
1062 /* pdsm serviced and 'rc' bytes written to payload */
1063 pdsm_pkg->cmd_status = 0;
1064 pkg->nd_fw_size = ND_PDSM_HDR_SIZE + rc;
1067 dev_dbg(&p->pdev->dev, "PDSM[0x%x]: Unsupported PDSM request\n",
1069 pdsm_pkg->cmd_status = -ENOENT;
1070 pkg->nd_fw_size = ND_PDSM_HDR_SIZE;
1073 return pdsm_pkg->cmd_status;
1076 static int papr_scm_ndctl(struct nvdimm_bus_descriptor *nd_desc,
1077 struct nvdimm *nvdimm, unsigned int cmd, void *buf,
1078 unsigned int buf_len, int *cmd_rc)
1080 struct nd_cmd_get_config_size *get_size_hdr;
1081 struct nd_cmd_pkg *call_pkg = NULL;
1082 struct papr_scm_priv *p;
1085 rc = is_cmd_valid(nvdimm, cmd, buf, buf_len);
1087 pr_debug("Invalid cmd=0x%x. Err=%d\n", cmd, rc);
1091 /* Use a local variable in case cmd_rc pointer is NULL */
1095 p = nvdimm_provider_data(nvdimm);
1098 case ND_CMD_GET_CONFIG_SIZE:
1101 get_size_hdr->status = 0;
1102 get_size_hdr->max_xfer = 8;
1103 get_size_hdr->config_size = p->metadata_size;
1107 case ND_CMD_GET_CONFIG_DATA:
1108 *cmd_rc = papr_scm_meta_get(p, buf);
1111 case ND_CMD_SET_CONFIG_DATA:
1112 *cmd_rc = papr_scm_meta_set(p, buf);
1116 call_pkg = (struct nd_cmd_pkg *)buf;
1117 *cmd_rc = papr_scm_service_pdsm(p, call_pkg);
1121 dev_dbg(&p->pdev->dev, "Unknown command = %d\n", cmd);
1125 dev_dbg(&p->pdev->dev, "returned with cmd_rc = %d\n", *cmd_rc);
1130 static ssize_t health_bitmap_inject_show(struct device *dev,
1131 struct device_attribute *attr,
1134 struct nvdimm *dimm = to_nvdimm(dev);
1135 struct papr_scm_priv *p = nvdimm_provider_data(dimm);
1137 return sprintf(buf, "%#llx\n",
1138 READ_ONCE(p->health_bitmap_inject_mask));
1141 static DEVICE_ATTR_ADMIN_RO(health_bitmap_inject);
1143 static ssize_t perf_stats_show(struct device *dev,
1144 struct device_attribute *attr, char *buf)
1149 struct papr_scm_perf_stat *stat;
1150 struct papr_scm_perf_stats *stats;
1151 struct nvdimm *dimm = to_nvdimm(dev);
1152 struct papr_scm_priv *p = nvdimm_provider_data(dimm);
1154 if (!p->stat_buffer_len)
1157 /* Allocate the buffer for phyp where stats are written */
1158 stats = kzalloc(p->stat_buffer_len, GFP_KERNEL);
1162 /* Ask phyp to return all dimm perf stats */
1163 rc = drc_pmem_query_stats(p, stats, 0);
1167 * Go through the returned output buffer and print stats and
1168 * values. Since stat_id is essentially a char string of
1169 * 8 bytes, simply use the string format specifier to print it.
1171 seq_buf_init(&s, buf, PAGE_SIZE);
1172 for (index = 0, stat = stats->scm_statistic;
1173 index < be32_to_cpu(stats->num_statistics);
1175 seq_buf_printf(&s, "%.8s = 0x%016llX\n",
1177 be64_to_cpu(stat->stat_val));
1182 return rc ? rc : (ssize_t)seq_buf_used(&s);
1184 static DEVICE_ATTR_ADMIN_RO(perf_stats);
1186 static ssize_t flags_show(struct device *dev,
1187 struct device_attribute *attr, char *buf)
1189 struct nvdimm *dimm = to_nvdimm(dev);
1190 struct papr_scm_priv *p = nvdimm_provider_data(dimm);
1195 rc = drc_pmem_query_health(p);
1199 /* Copy health_bitmap locally, check masks & update out buffer */
1200 health = READ_ONCE(p->health_bitmap);
1202 seq_buf_init(&s, buf, PAGE_SIZE);
1203 if (health & PAPR_PMEM_UNARMED_MASK)
1204 seq_buf_printf(&s, "not_armed ");
1206 if (health & PAPR_PMEM_BAD_SHUTDOWN_MASK)
1207 seq_buf_printf(&s, "flush_fail ");
1209 if (health & PAPR_PMEM_BAD_RESTORE_MASK)
1210 seq_buf_printf(&s, "restore_fail ");
1212 if (health & PAPR_PMEM_ENCRYPTED)
1213 seq_buf_printf(&s, "encrypted ");
1215 if (health & PAPR_PMEM_SMART_EVENT_MASK)
1216 seq_buf_printf(&s, "smart_notify ");
1218 if (health & PAPR_PMEM_SCRUBBED_AND_LOCKED)
1219 seq_buf_printf(&s, "scrubbed locked ");
1221 if (seq_buf_used(&s))
1222 seq_buf_printf(&s, "\n");
1224 return seq_buf_used(&s);
1226 DEVICE_ATTR_RO(flags);
1228 static ssize_t dirty_shutdown_show(struct device *dev,
1229 struct device_attribute *attr, char *buf)
1231 struct nvdimm *dimm = to_nvdimm(dev);
1232 struct papr_scm_priv *p = nvdimm_provider_data(dimm);
1234 return sysfs_emit(buf, "%llu\n", p->dirty_shutdown_counter);
1236 DEVICE_ATTR_RO(dirty_shutdown);
1238 static umode_t papr_nd_attribute_visible(struct kobject *kobj,
1239 struct attribute *attr, int n)
1241 struct device *dev = kobj_to_dev(kobj);
1242 struct nvdimm *nvdimm = to_nvdimm(dev);
1243 struct papr_scm_priv *p = nvdimm_provider_data(nvdimm);
1245 /* For if perf-stats not available remove perf_stats sysfs */
1246 if (attr == &dev_attr_perf_stats.attr && p->stat_buffer_len == 0)
1252 /* papr_scm specific dimm attributes */
1253 static struct attribute *papr_nd_attributes[] = {
1254 &dev_attr_flags.attr,
1255 &dev_attr_perf_stats.attr,
1256 &dev_attr_dirty_shutdown.attr,
1257 &dev_attr_health_bitmap_inject.attr,
1261 static const struct attribute_group papr_nd_attribute_group = {
1263 .is_visible = papr_nd_attribute_visible,
1264 .attrs = papr_nd_attributes,
1267 static const struct attribute_group *papr_nd_attr_groups[] = {
1268 &papr_nd_attribute_group,
1272 static int papr_scm_nvdimm_init(struct papr_scm_priv *p)
1274 struct device *dev = &p->pdev->dev;
1275 struct nd_mapping_desc mapping;
1276 struct nd_region_desc ndr_desc;
1277 unsigned long dimm_flags;
1278 int target_nid, online_nid;
1280 p->bus_desc.ndctl = papr_scm_ndctl;
1281 p->bus_desc.module = THIS_MODULE;
1282 p->bus_desc.of_node = p->pdev->dev.of_node;
1283 p->bus_desc.provider_name = kstrdup(p->pdev->name, GFP_KERNEL);
1285 /* Set the dimm command family mask to accept PDSMs */
1286 set_bit(NVDIMM_FAMILY_PAPR, &p->bus_desc.dimm_family_mask);
1288 if (!p->bus_desc.provider_name)
1291 p->bus = nvdimm_bus_register(NULL, &p->bus_desc);
1293 dev_err(dev, "Error creating nvdimm bus %pOF\n", p->dn);
1294 kfree(p->bus_desc.provider_name);
1299 set_bit(NDD_LABELING, &dimm_flags);
1302 * Check if the nvdimm is unarmed. No locking needed as we are still
1303 * initializing. Ignore error encountered if any.
1305 __drc_pmem_query_health(p);
1307 if (p->health_bitmap & PAPR_PMEM_UNARMED_MASK)
1308 set_bit(NDD_UNARMED, &dimm_flags);
1310 p->nvdimm = nvdimm_create(p->bus, p, papr_nd_attr_groups,
1311 dimm_flags, PAPR_SCM_DIMM_CMD_MASK, 0, NULL);
1313 dev_err(dev, "Error creating DIMM object for %pOF\n", p->dn);
1317 if (nvdimm_bus_check_dimm_count(p->bus, 1))
1320 /* now add the region */
1322 memset(&mapping, 0, sizeof(mapping));
1323 mapping.nvdimm = p->nvdimm;
1325 mapping.size = p->blocks * p->block_size; // XXX: potential overflow?
1327 memset(&ndr_desc, 0, sizeof(ndr_desc));
1328 target_nid = dev_to_node(&p->pdev->dev);
1329 online_nid = numa_map_to_online_node(target_nid);
1330 ndr_desc.numa_node = online_nid;
1331 ndr_desc.target_node = target_nid;
1332 ndr_desc.res = &p->res;
1333 ndr_desc.of_node = p->dn;
1334 ndr_desc.provider_data = p;
1335 ndr_desc.mapping = &mapping;
1336 ndr_desc.num_mappings = 1;
1337 ndr_desc.nd_set = &p->nd_set;
1339 if (p->hcall_flush_required) {
1340 set_bit(ND_REGION_ASYNC, &ndr_desc.flags);
1341 ndr_desc.flush = papr_scm_pmem_flush;
1345 p->region = nvdimm_volatile_region_create(p->bus, &ndr_desc);
1347 set_bit(ND_REGION_PERSIST_MEMCTRL, &ndr_desc.flags);
1348 p->region = nvdimm_pmem_region_create(p->bus, &ndr_desc);
1351 dev_err(dev, "Error registering region %pR from %pOF\n",
1352 ndr_desc.res, p->dn);
1355 if (target_nid != online_nid)
1356 dev_info(dev, "Region registered with target node %d and online node %d",
1357 target_nid, online_nid);
1359 mutex_lock(&papr_ndr_lock);
1360 list_add_tail(&p->region_list, &papr_nd_regions);
1361 mutex_unlock(&papr_ndr_lock);
1365 err: nvdimm_bus_unregister(p->bus);
1366 kfree(p->bus_desc.provider_name);
1370 static void papr_scm_add_badblock(struct nd_region *region,
1371 struct nvdimm_bus *bus, u64 phys_addr)
1373 u64 aligned_addr = ALIGN_DOWN(phys_addr, L1_CACHE_BYTES);
1375 if (nvdimm_bus_add_badrange(bus, aligned_addr, L1_CACHE_BYTES)) {
1376 pr_err("Bad block registration for 0x%llx failed\n", phys_addr);
1380 pr_debug("Add memory range (0x%llx - 0x%llx) as bad range\n",
1381 aligned_addr, aligned_addr + L1_CACHE_BYTES);
1383 nvdimm_region_notify(region, NVDIMM_REVALIDATE_POISON);
1386 static int handle_mce_ue(struct notifier_block *nb, unsigned long val,
1389 struct machine_check_event *evt = data;
1390 struct papr_scm_priv *p;
1394 if (evt->error_type != MCE_ERROR_TYPE_UE)
1397 if (list_empty(&papr_nd_regions))
1401 * The physical address obtained here is PAGE_SIZE aligned, so get the
1402 * exact address from the effective address
1404 phys_addr = evt->u.ue_error.physical_address +
1405 (evt->u.ue_error.effective_address & ~PAGE_MASK);
1407 if (!evt->u.ue_error.physical_address_provided ||
1408 !is_zone_device_page(pfn_to_page(phys_addr >> PAGE_SHIFT)))
1411 /* mce notifier is called from a process context, so mutex is safe */
1412 mutex_lock(&papr_ndr_lock);
1413 list_for_each_entry(p, &papr_nd_regions, region_list) {
1414 if (phys_addr >= p->res.start && phys_addr <= p->res.end) {
1421 papr_scm_add_badblock(p->region, p->bus, phys_addr);
1423 mutex_unlock(&papr_ndr_lock);
1425 return found ? NOTIFY_OK : NOTIFY_DONE;
1428 static struct notifier_block mce_ue_nb = {
1429 .notifier_call = handle_mce_ue
1432 static int papr_scm_probe(struct platform_device *pdev)
1434 struct device_node *dn = pdev->dev.of_node;
1435 u32 drc_index, metadata_size;
1436 u64 blocks, block_size;
1437 struct papr_scm_priv *p;
1438 u8 uuid_raw[UUID_SIZE];
1439 const char *uuid_str;
1444 /* check we have all the required DT properties */
1445 if (of_property_read_u32(dn, "ibm,my-drc-index", &drc_index)) {
1446 dev_err(&pdev->dev, "%pOF: missing drc-index!\n", dn);
1450 if (of_property_read_u64(dn, "ibm,block-size", &block_size)) {
1451 dev_err(&pdev->dev, "%pOF: missing block-size!\n", dn);
1455 if (of_property_read_u64(dn, "ibm,number-of-blocks", &blocks)) {
1456 dev_err(&pdev->dev, "%pOF: missing number-of-blocks!\n", dn);
1460 if (of_property_read_string(dn, "ibm,unit-guid", &uuid_str)) {
1461 dev_err(&pdev->dev, "%pOF: missing unit-guid!\n", dn);
1466 p = kzalloc(sizeof(*p), GFP_KERNEL);
1470 /* Initialize the dimm mutex */
1471 mutex_init(&p->health_mutex);
1473 /* optional DT properties */
1474 of_property_read_u32(dn, "ibm,metadata-size", &metadata_size);
1477 p->drc_index = drc_index;
1478 p->block_size = block_size;
1480 p->is_volatile = !of_property_read_bool(dn, "ibm,cache-flush-required");
1481 p->hcall_flush_required = of_property_read_bool(dn, "ibm,hcall-flush-required");
1483 if (of_property_read_u64(dn, "ibm,persistence-failed-count",
1484 &p->dirty_shutdown_counter))
1485 p->dirty_shutdown_counter = 0;
1487 /* We just need to ensure that set cookies are unique across */
1488 uuid_parse(uuid_str, &uuid);
1491 * The cookie1 and cookie2 are not really little endian.
1492 * We store a raw buffer representation of the
1493 * uuid string so that we can compare this with the label
1494 * area cookie irrespective of the endian configuration
1495 * with which the kernel is built.
1497 * Historically we stored the cookie in the below format.
1498 * for a uuid string 72511b67-0b3b-42fd-8d1d-5be3cae8bcaa
1499 * cookie1 was 0xfd423b0b671b5172
1500 * cookie2 was 0xaabce8cae35b1d8d
1502 export_uuid(uuid_raw, &uuid);
1503 p->nd_set.cookie1 = get_unaligned_le64(&uuid_raw[0]);
1504 p->nd_set.cookie2 = get_unaligned_le64(&uuid_raw[8]);
1507 p->metadata_size = metadata_size;
1510 /* request the hypervisor to bind this region to somewhere in memory */
1511 rc = drc_pmem_bind(p);
1513 /* If phyp says drc memory still bound then force unbound and retry */
1514 if (rc == H_OVERLAP)
1515 rc = drc_pmem_query_n_bind(p);
1517 if (rc != H_SUCCESS) {
1518 dev_err(&p->pdev->dev, "bind err: %d\n", rc);
1523 /* setup the resource for the newly bound range */
1524 p->res.start = p->bound_addr;
1525 p->res.end = p->bound_addr + p->blocks * p->block_size - 1;
1526 p->res.name = pdev->name;
1527 p->res.flags = IORESOURCE_MEM;
1529 /* Try retrieving the stat buffer and see if its supported */
1530 stat_size = drc_pmem_query_stats(p, NULL, 0);
1531 if (stat_size > 0) {
1532 p->stat_buffer_len = stat_size;
1533 dev_dbg(&p->pdev->dev, "Max perf-stat size %lu-bytes\n",
1534 p->stat_buffer_len);
1537 rc = papr_scm_nvdimm_init(p);
1541 platform_set_drvdata(pdev, p);
1542 papr_scm_pmu_register(p);
1546 err2: drc_pmem_unbind(p);
1551 static int papr_scm_remove(struct platform_device *pdev)
1553 struct papr_scm_priv *p = platform_get_drvdata(pdev);
1555 mutex_lock(&papr_ndr_lock);
1556 list_del(&p->region_list);
1557 mutex_unlock(&papr_ndr_lock);
1559 nvdimm_bus_unregister(p->bus);
1562 if (pdev->archdata.priv)
1563 unregister_nvdimm_pmu(pdev->archdata.priv);
1565 pdev->archdata.priv = NULL;
1566 kfree(p->nvdimm_events_map);
1567 kfree(p->bus_desc.provider_name);
1573 static const struct of_device_id papr_scm_match[] = {
1574 { .compatible = "ibm,pmemory" },
1575 { .compatible = "ibm,pmemory-v2" },
1579 static struct platform_driver papr_scm_driver = {
1580 .probe = papr_scm_probe,
1581 .remove = papr_scm_remove,
1584 .of_match_table = papr_scm_match,
1588 static int __init papr_scm_init(void)
1592 ret = platform_driver_register(&papr_scm_driver);
1594 mce_register_notifier(&mce_ue_nb);
1598 module_init(papr_scm_init);
1600 static void __exit papr_scm_exit(void)
1602 mce_unregister_notifier(&mce_ue_nb);
1603 platform_driver_unregister(&papr_scm_driver);
1605 module_exit(papr_scm_exit);
1607 MODULE_DEVICE_TABLE(of, papr_scm_match);
1608 MODULE_LICENSE("GPL");
1609 MODULE_AUTHOR("IBM Corporation");