1 // SPDX-License-Identifier: GPL-2.0
3 #define pr_fmt(fmt) "papr-scm: " fmt
6 #include <linux/kernel.h>
7 #include <linux/module.h>
8 #include <linux/ioport.h>
9 #include <linux/slab.h>
10 #include <linux/ndctl.h>
11 #include <linux/sched.h>
12 #include <linux/libnvdimm.h>
13 #include <linux/platform_device.h>
14 #include <linux/delay.h>
15 #include <linux/seq_buf.h>
18 #include <asm/plpar_wrappers.h>
19 #include <asm/papr_pdsm.h>
21 #include <asm/unaligned.h>
23 #define BIND_ANY_ADDR (~0ul)
25 #define PAPR_SCM_DIMM_CMD_MASK \
26 ((1ul << ND_CMD_GET_CONFIG_SIZE) | \
27 (1ul << ND_CMD_GET_CONFIG_DATA) | \
28 (1ul << ND_CMD_SET_CONFIG_DATA) | \
31 /* DIMM health bitmap bitmap indicators */
32 /* SCM device is unable to persist memory contents */
33 #define PAPR_PMEM_UNARMED (1ULL << (63 - 0))
34 /* SCM device failed to persist memory contents */
35 #define PAPR_PMEM_SHUTDOWN_DIRTY (1ULL << (63 - 1))
36 /* SCM device contents are persisted from previous IPL */
37 #define PAPR_PMEM_SHUTDOWN_CLEAN (1ULL << (63 - 2))
38 /* SCM device contents are not persisted from previous IPL */
39 #define PAPR_PMEM_EMPTY (1ULL << (63 - 3))
40 /* SCM device memory life remaining is critically low */
41 #define PAPR_PMEM_HEALTH_CRITICAL (1ULL << (63 - 4))
42 /* SCM device will be garded off next IPL due to failure */
43 #define PAPR_PMEM_HEALTH_FATAL (1ULL << (63 - 5))
44 /* SCM contents cannot persist due to current platform health status */
45 #define PAPR_PMEM_HEALTH_UNHEALTHY (1ULL << (63 - 6))
46 /* SCM device is unable to persist memory contents in certain conditions */
47 #define PAPR_PMEM_HEALTH_NON_CRITICAL (1ULL << (63 - 7))
48 /* SCM device is encrypted */
49 #define PAPR_PMEM_ENCRYPTED (1ULL << (63 - 8))
50 /* SCM device has been scrubbed and locked */
51 #define PAPR_PMEM_SCRUBBED_AND_LOCKED (1ULL << (63 - 9))
53 /* Bits status indicators for health bitmap indicating unarmed dimm */
54 #define PAPR_PMEM_UNARMED_MASK (PAPR_PMEM_UNARMED | \
55 PAPR_PMEM_HEALTH_UNHEALTHY)
57 /* Bits status indicators for health bitmap indicating unflushed dimm */
58 #define PAPR_PMEM_BAD_SHUTDOWN_MASK (PAPR_PMEM_SHUTDOWN_DIRTY)
60 /* Bits status indicators for health bitmap indicating unrestored dimm */
61 #define PAPR_PMEM_BAD_RESTORE_MASK (PAPR_PMEM_EMPTY)
63 /* Bit status indicators for smart event notification */
64 #define PAPR_PMEM_SMART_EVENT_MASK (PAPR_PMEM_HEALTH_CRITICAL | \
65 PAPR_PMEM_HEALTH_FATAL | \
66 PAPR_PMEM_HEALTH_UNHEALTHY)
68 #define PAPR_SCM_PERF_STATS_EYECATCHER __stringify(SCMSTATS)
69 #define PAPR_SCM_PERF_STATS_VERSION 0x1
71 /* Struct holding a single performance metric */
72 struct papr_scm_perf_stat {
77 /* Struct exchanged between kernel and PHYP for fetching drc perf stats */
78 struct papr_scm_perf_stats {
80 /* Should be PAPR_SCM_PERF_STATS_VERSION */
82 /* Number of stats following */
83 __be32 num_statistics;
84 /* zero or more performance matrics */
85 struct papr_scm_perf_stat scm_statistic[];
88 /* private struct associated with each region */
89 struct papr_scm_priv {
90 struct platform_device *pdev;
91 struct device_node *dn;
97 bool hcall_flush_required;
101 struct nvdimm_bus_descriptor bus_desc;
102 struct nvdimm_bus *bus;
103 struct nvdimm *nvdimm;
105 struct nd_region *region;
106 struct nd_interleave_set nd_set;
107 struct list_head region_list;
109 /* Protect dimm health data from concurrent read/writes */
110 struct mutex health_mutex;
112 /* Last time the health information of the dimm was updated */
113 unsigned long lasthealth_jiffies;
115 /* Health information for the dimm */
118 /* Holds the last known dirty shutdown counter value */
119 u64 dirty_shutdown_counter;
121 /* length of the stat buffer as expected by phyp */
122 size_t stat_buffer_len;
125 static int papr_scm_pmem_flush(struct nd_region *nd_region,
126 struct bio *bio __maybe_unused)
128 struct papr_scm_priv *p = nd_region_provider_data(nd_region);
129 unsigned long ret_buf[PLPAR_HCALL_BUFSIZE], token = 0;
132 dev_dbg(&p->pdev->dev, "flush drc 0x%x", p->drc_index);
135 rc = plpar_hcall(H_SCM_FLUSH, ret_buf, p->drc_index, token);
138 /* Check if we are stalled for some time */
139 if (H_IS_LONG_BUSY(rc)) {
140 msleep(get_longbusy_msecs(rc));
142 } else if (rc == H_BUSY) {
145 } while (rc == H_BUSY);
148 dev_err(&p->pdev->dev, "flush error: %ld", rc);
151 dev_dbg(&p->pdev->dev, "flush drc 0x%x complete", p->drc_index);
157 static LIST_HEAD(papr_nd_regions);
158 static DEFINE_MUTEX(papr_ndr_lock);
160 static int drc_pmem_bind(struct papr_scm_priv *p)
162 unsigned long ret[PLPAR_HCALL_BUFSIZE];
168 * When the hypervisor cannot map all the requested memory in a single
169 * hcall it returns H_BUSY and we call again with the token until
170 * we get H_SUCCESS. Aborting the retry loop before getting H_SUCCESS
171 * leave the system in an undefined state, so we wait.
176 rc = plpar_hcall(H_SCM_BIND_MEM, ret, p->drc_index, 0,
177 p->blocks, BIND_ANY_ADDR, token);
182 } while (rc == H_BUSY);
187 p->bound_addr = saved;
188 dev_dbg(&p->pdev->dev, "bound drc 0x%x to 0x%lx\n",
189 p->drc_index, (unsigned long)saved);
193 static void drc_pmem_unbind(struct papr_scm_priv *p)
195 unsigned long ret[PLPAR_HCALL_BUFSIZE];
199 dev_dbg(&p->pdev->dev, "unbind drc 0x%x\n", p->drc_index);
201 /* NB: unbind has the same retry requirements as drc_pmem_bind() */
204 /* Unbind of all SCM resources associated with drcIndex */
205 rc = plpar_hcall(H_SCM_UNBIND_ALL, ret, H_UNBIND_SCOPE_DRC,
206 p->drc_index, token);
209 /* Check if we are stalled for some time */
210 if (H_IS_LONG_BUSY(rc)) {
211 msleep(get_longbusy_msecs(rc));
213 } else if (rc == H_BUSY) {
217 } while (rc == H_BUSY);
220 dev_err(&p->pdev->dev, "unbind error: %lld\n", rc);
222 dev_dbg(&p->pdev->dev, "unbind drc 0x%x complete\n",
228 static int drc_pmem_query_n_bind(struct papr_scm_priv *p)
230 unsigned long start_addr;
231 unsigned long end_addr;
232 unsigned long ret[PLPAR_HCALL_BUFSIZE];
236 rc = plpar_hcall(H_SCM_QUERY_BLOCK_MEM_BINDING, ret,
242 /* Make sure the full region is bound. */
243 rc = plpar_hcall(H_SCM_QUERY_BLOCK_MEM_BINDING, ret,
244 p->drc_index, p->blocks - 1);
249 if ((end_addr - start_addr) != ((p->blocks - 1) * p->block_size))
252 p->bound_addr = start_addr;
253 dev_dbg(&p->pdev->dev, "bound drc 0x%x to 0x%lx\n", p->drc_index, start_addr);
257 dev_info(&p->pdev->dev,
258 "Failed to query, trying an unbind followed by bind");
260 return drc_pmem_bind(p);
264 * Query the Dimm performance stats from PHYP and copy them (if returned) to
265 * provided struct papr_scm_perf_stats instance 'stats' that can hold atleast
266 * (num_stats + header) bytes.
267 * - If buff_stats == NULL the return value is the size in bytes of the buffer
268 * needed to hold all supported performance-statistics.
269 * - If buff_stats != NULL and num_stats == 0 then we copy all known
270 * performance-statistics to 'buff_stat' and expect to be large enough to
272 * - if buff_stats != NULL and num_stats > 0 then copy the requested
273 * performance-statistics to buff_stats.
275 static ssize_t drc_pmem_query_stats(struct papr_scm_priv *p,
276 struct papr_scm_perf_stats *buff_stats,
277 unsigned int num_stats)
279 unsigned long ret[PLPAR_HCALL_BUFSIZE];
283 /* Setup the out buffer */
285 memcpy(buff_stats->eye_catcher,
286 PAPR_SCM_PERF_STATS_EYECATCHER, 8);
287 buff_stats->stats_version =
288 cpu_to_be32(PAPR_SCM_PERF_STATS_VERSION);
289 buff_stats->num_statistics =
290 cpu_to_be32(num_stats);
293 * Calculate the buffer size based on num-stats provided
294 * or use the prefetched max buffer length
297 /* Calculate size from the num_stats */
298 size = sizeof(struct papr_scm_perf_stats) +
299 num_stats * sizeof(struct papr_scm_perf_stat);
301 size = p->stat_buffer_len;
303 /* In case of no out buffer ignore the size */
307 /* Do the HCALL asking PHYP for info */
308 rc = plpar_hcall(H_SCM_PERFORMANCE_STATS, ret, p->drc_index,
309 buff_stats ? virt_to_phys(buff_stats) : 0,
312 /* Check if the error was due to an unknown stat-id */
313 if (rc == H_PARTIAL) {
314 dev_err(&p->pdev->dev,
315 "Unknown performance stats, Err:0x%016lX\n", ret[0]);
317 } else if (rc == H_AUTHORITY) {
318 dev_info(&p->pdev->dev,
319 "Permission denied while accessing performance stats");
321 } else if (rc == H_UNSUPPORTED) {
322 dev_dbg(&p->pdev->dev, "Performance stats unsupported\n");
324 } else if (rc != H_SUCCESS) {
325 dev_err(&p->pdev->dev,
326 "Failed to query performance stats, Err:%lld\n", rc);
330 /* Handle case where stat buffer size was requested */
331 dev_dbg(&p->pdev->dev,
332 "Performance stats size %ld\n", ret[0]);
336 /* Successfully fetched the requested stats from phyp */
337 dev_dbg(&p->pdev->dev,
338 "Performance stats returned %d stats\n",
339 be32_to_cpu(buff_stats->num_statistics));
344 * Issue hcall to retrieve dimm health info and populate papr_scm_priv with the
345 * health information.
347 static int __drc_pmem_query_health(struct papr_scm_priv *p)
349 unsigned long ret[PLPAR_HCALL_BUFSIZE];
352 /* issue the hcall */
353 rc = plpar_hcall(H_SCM_HEALTH, ret, p->drc_index);
354 if (rc != H_SUCCESS) {
355 dev_err(&p->pdev->dev,
356 "Failed to query health information, Err:%ld\n", rc);
360 p->lasthealth_jiffies = jiffies;
361 p->health_bitmap = ret[0] & ret[1];
363 dev_dbg(&p->pdev->dev,
364 "Queried dimm health info. Bitmap:0x%016lx Mask:0x%016lx\n",
370 /* Min interval in seconds for assuming stable dimm health */
371 #define MIN_HEALTH_QUERY_INTERVAL 60
373 /* Query cached health info and if needed call drc_pmem_query_health */
374 static int drc_pmem_query_health(struct papr_scm_priv *p)
376 unsigned long cache_timeout;
379 /* Protect concurrent modifications to papr_scm_priv */
380 rc = mutex_lock_interruptible(&p->health_mutex);
384 /* Jiffies offset for which the health data is assumed to be same */
385 cache_timeout = p->lasthealth_jiffies +
386 msecs_to_jiffies(MIN_HEALTH_QUERY_INTERVAL * 1000);
388 /* Fetch new health info is its older than MIN_HEALTH_QUERY_INTERVAL */
389 if (time_after(jiffies, cache_timeout))
390 rc = __drc_pmem_query_health(p);
392 /* Assume cached health data is valid */
395 mutex_unlock(&p->health_mutex);
399 static int papr_scm_meta_get(struct papr_scm_priv *p,
400 struct nd_cmd_get_config_data_hdr *hdr)
402 unsigned long data[PLPAR_HCALL_BUFSIZE];
403 unsigned long offset, data_offset;
407 if ((hdr->in_offset + hdr->in_length) > p->metadata_size)
410 for (len = hdr->in_length; len; len -= read) {
412 data_offset = hdr->in_length - len;
413 offset = hdr->in_offset + data_offset;
424 ret = plpar_hcall(H_SCM_READ_METADATA, data, p->drc_index,
427 if (ret == H_PARAMETER) /* bad DRC index */
430 return -EINVAL; /* other invalid parameter */
434 *(uint64_t *)(hdr->out_buf + data_offset) = be64_to_cpu(data[0]);
437 *(uint32_t *)(hdr->out_buf + data_offset) = be32_to_cpu(data[0] & 0xffffffff);
441 *(uint16_t *)(hdr->out_buf + data_offset) = be16_to_cpu(data[0] & 0xffff);
445 *(uint8_t *)(hdr->out_buf + data_offset) = (data[0] & 0xff);
452 static int papr_scm_meta_set(struct papr_scm_priv *p,
453 struct nd_cmd_set_config_hdr *hdr)
455 unsigned long offset, data_offset;
461 if ((hdr->in_offset + hdr->in_length) > p->metadata_size)
464 for (len = hdr->in_length; len; len -= wrote) {
466 data_offset = hdr->in_length - len;
467 offset = hdr->in_offset + data_offset;
470 data = *(uint64_t *)(hdr->in_buf + data_offset);
471 data_be = cpu_to_be64(data);
473 } else if (len >= 4) {
474 data = *(uint32_t *)(hdr->in_buf + data_offset);
476 data_be = cpu_to_be32(data);
478 } else if (len >= 2) {
479 data = *(uint16_t *)(hdr->in_buf + data_offset);
481 data_be = cpu_to_be16(data);
484 data_be = *(uint8_t *)(hdr->in_buf + data_offset);
489 ret = plpar_hcall_norets(H_SCM_WRITE_METADATA, p->drc_index,
490 offset, data_be, wrote);
491 if (ret == H_PARAMETER) /* bad DRC index */
494 return -EINVAL; /* other invalid parameter */
501 * Do a sanity checks on the inputs args to dimm-control function and return
502 * '0' if valid. Validation of PDSM payloads happens later in
503 * papr_scm_service_pdsm.
505 static int is_cmd_valid(struct nvdimm *nvdimm, unsigned int cmd, void *buf,
506 unsigned int buf_len)
508 unsigned long cmd_mask = PAPR_SCM_DIMM_CMD_MASK;
509 struct nd_cmd_pkg *nd_cmd;
510 struct papr_scm_priv *p;
513 /* Only dimm-specific calls are supported atm */
517 /* get the provider data from struct nvdimm */
518 p = nvdimm_provider_data(nvdimm);
520 if (!test_bit(cmd, &cmd_mask)) {
521 dev_dbg(&p->pdev->dev, "Unsupported cmd=%u\n", cmd);
525 /* For CMD_CALL verify pdsm request */
526 if (cmd == ND_CMD_CALL) {
527 /* Verify the envelope and envelop size */
529 buf_len < (sizeof(struct nd_cmd_pkg) + ND_PDSM_HDR_SIZE)) {
530 dev_dbg(&p->pdev->dev, "Invalid pkg size=%u\n",
535 /* Verify that the nd_cmd_pkg.nd_family is correct */
536 nd_cmd = (struct nd_cmd_pkg *)buf;
538 if (nd_cmd->nd_family != NVDIMM_FAMILY_PAPR) {
539 dev_dbg(&p->pdev->dev, "Invalid pkg family=0x%llx\n",
544 pdsm = (enum papr_pdsm)nd_cmd->nd_command;
546 /* Verify if the pdsm command is valid */
547 if (pdsm <= PAPR_PDSM_MIN || pdsm >= PAPR_PDSM_MAX) {
548 dev_dbg(&p->pdev->dev, "PDSM[0x%x]: Invalid PDSM\n",
553 /* Have enough space to hold returned 'nd_pkg_pdsm' header */
554 if (nd_cmd->nd_size_out < ND_PDSM_HDR_SIZE) {
555 dev_dbg(&p->pdev->dev, "PDSM[0x%x]: Invalid payload\n",
561 /* Let the command be further processed */
565 static int papr_pdsm_fuel_gauge(struct papr_scm_priv *p,
566 union nd_pdsm_payload *payload)
570 struct papr_scm_perf_stat *stat;
571 struct papr_scm_perf_stats *stats;
573 /* Silently fail if fetching performance metrics isn't supported */
574 if (!p->stat_buffer_len)
577 /* Allocate request buffer enough to hold single performance stat */
578 size = sizeof(struct papr_scm_perf_stats) +
579 sizeof(struct papr_scm_perf_stat);
581 stats = kzalloc(size, GFP_KERNEL);
585 stat = &stats->scm_statistic[0];
586 memcpy(&stat->stat_id, "MemLife ", sizeof(stat->stat_id));
589 /* Fetch the fuel gauge and populate it in payload */
590 rc = drc_pmem_query_stats(p, stats, 1);
592 dev_dbg(&p->pdev->dev, "Err(%d) fetching fuel gauge\n", rc);
596 statval = be64_to_cpu(stat->stat_val);
597 dev_dbg(&p->pdev->dev,
598 "Fetched fuel-gauge %llu", statval);
599 payload->health.extension_flags |=
600 PDSM_DIMM_HEALTH_RUN_GAUGE_VALID;
601 payload->health.dimm_fuel_gauge = statval;
603 rc = sizeof(struct nd_papr_pdsm_health);
610 /* Add the dirty-shutdown-counter value to the pdsm */
611 static int papr_pdsm_dsc(struct papr_scm_priv *p,
612 union nd_pdsm_payload *payload)
614 payload->health.extension_flags |= PDSM_DIMM_DSC_VALID;
615 payload->health.dimm_dsc = p->dirty_shutdown_counter;
617 return sizeof(struct nd_papr_pdsm_health);
620 /* Fetch the DIMM health info and populate it in provided package. */
621 static int papr_pdsm_health(struct papr_scm_priv *p,
622 union nd_pdsm_payload *payload)
626 /* Ensure dimm health mutex is taken preventing concurrent access */
627 rc = mutex_lock_interruptible(&p->health_mutex);
631 /* Always fetch upto date dimm health data ignoring cached values */
632 rc = __drc_pmem_query_health(p);
634 mutex_unlock(&p->health_mutex);
638 /* update health struct with various flags derived from health bitmap */
639 payload->health = (struct nd_papr_pdsm_health) {
640 .extension_flags = 0,
641 .dimm_unarmed = !!(p->health_bitmap & PAPR_PMEM_UNARMED_MASK),
642 .dimm_bad_shutdown = !!(p->health_bitmap & PAPR_PMEM_BAD_SHUTDOWN_MASK),
643 .dimm_bad_restore = !!(p->health_bitmap & PAPR_PMEM_BAD_RESTORE_MASK),
644 .dimm_scrubbed = !!(p->health_bitmap & PAPR_PMEM_SCRUBBED_AND_LOCKED),
645 .dimm_locked = !!(p->health_bitmap & PAPR_PMEM_SCRUBBED_AND_LOCKED),
646 .dimm_encrypted = !!(p->health_bitmap & PAPR_PMEM_ENCRYPTED),
647 .dimm_health = PAPR_PDSM_DIMM_HEALTHY,
650 /* Update field dimm_health based on health_bitmap flags */
651 if (p->health_bitmap & PAPR_PMEM_HEALTH_FATAL)
652 payload->health.dimm_health = PAPR_PDSM_DIMM_FATAL;
653 else if (p->health_bitmap & PAPR_PMEM_HEALTH_CRITICAL)
654 payload->health.dimm_health = PAPR_PDSM_DIMM_CRITICAL;
655 else if (p->health_bitmap & PAPR_PMEM_HEALTH_UNHEALTHY)
656 payload->health.dimm_health = PAPR_PDSM_DIMM_UNHEALTHY;
658 /* struct populated hence can release the mutex now */
659 mutex_unlock(&p->health_mutex);
661 /* Populate the fuel gauge meter in the payload */
662 papr_pdsm_fuel_gauge(p, payload);
663 /* Populate the dirty-shutdown-counter field */
664 papr_pdsm_dsc(p, payload);
666 rc = sizeof(struct nd_papr_pdsm_health);
673 * 'struct pdsm_cmd_desc'
674 * Identifies supported PDSMs' expected length of in/out payloads
675 * and pdsm service function.
677 * size_in : Size of input payload if any in the PDSM request.
678 * size_out : Size of output payload if any in the PDSM request.
679 * service : Service function for the PDSM request. Return semantics:
680 * rc < 0 : Error servicing PDSM and rc indicates the error.
681 * rc >=0 : Serviced successfully and 'rc' indicate number of
682 * bytes written to payload.
684 struct pdsm_cmd_desc {
687 int (*service)(struct papr_scm_priv *dimm,
688 union nd_pdsm_payload *payload);
691 /* Holds all supported PDSMs' command descriptors */
692 static const struct pdsm_cmd_desc __pdsm_cmd_descriptors[] = {
698 /* New PDSM command descriptors to be added below */
700 [PAPR_PDSM_HEALTH] = {
702 .size_out = sizeof(struct nd_papr_pdsm_health),
703 .service = papr_pdsm_health,
713 /* Given a valid pdsm cmd return its command descriptor else return NULL */
714 static inline const struct pdsm_cmd_desc *pdsm_cmd_desc(enum papr_pdsm cmd)
716 if (cmd >= 0 || cmd < ARRAY_SIZE(__pdsm_cmd_descriptors))
717 return &__pdsm_cmd_descriptors[cmd];
723 * For a given pdsm request call an appropriate service function.
724 * Returns errors if any while handling the pdsm command package.
726 static int papr_scm_service_pdsm(struct papr_scm_priv *p,
727 struct nd_cmd_pkg *pkg)
729 /* Get the PDSM header and PDSM command */
730 struct nd_pkg_pdsm *pdsm_pkg = (struct nd_pkg_pdsm *)pkg->nd_payload;
731 enum papr_pdsm pdsm = (enum papr_pdsm)pkg->nd_command;
732 const struct pdsm_cmd_desc *pdsc;
735 /* Fetch corresponding pdsm descriptor for validation and servicing */
736 pdsc = pdsm_cmd_desc(pdsm);
738 /* Validate pdsm descriptor */
739 /* Ensure that reserved fields are 0 */
740 if (pdsm_pkg->reserved[0] || pdsm_pkg->reserved[1]) {
741 dev_dbg(&p->pdev->dev, "PDSM[0x%x]: Invalid reserved field\n",
746 /* If pdsm expects some input, then ensure that the size_in matches */
748 pkg->nd_size_in != (pdsc->size_in + ND_PDSM_HDR_SIZE)) {
749 dev_dbg(&p->pdev->dev, "PDSM[0x%x]: Mismatched size_in=%d\n",
750 pdsm, pkg->nd_size_in);
754 /* If pdsm wants to return data, then ensure that size_out matches */
755 if (pdsc->size_out &&
756 pkg->nd_size_out != (pdsc->size_out + ND_PDSM_HDR_SIZE)) {
757 dev_dbg(&p->pdev->dev, "PDSM[0x%x]: Mismatched size_out=%d\n",
758 pdsm, pkg->nd_size_out);
762 /* Service the pdsm */
764 dev_dbg(&p->pdev->dev, "PDSM[0x%x]: Servicing..\n", pdsm);
766 rc = pdsc->service(p, &pdsm_pkg->payload);
769 /* error encountered while servicing pdsm */
770 pdsm_pkg->cmd_status = rc;
771 pkg->nd_fw_size = ND_PDSM_HDR_SIZE;
773 /* pdsm serviced and 'rc' bytes written to payload */
774 pdsm_pkg->cmd_status = 0;
775 pkg->nd_fw_size = ND_PDSM_HDR_SIZE + rc;
778 dev_dbg(&p->pdev->dev, "PDSM[0x%x]: Unsupported PDSM request\n",
780 pdsm_pkg->cmd_status = -ENOENT;
781 pkg->nd_fw_size = ND_PDSM_HDR_SIZE;
784 return pdsm_pkg->cmd_status;
787 static int papr_scm_ndctl(struct nvdimm_bus_descriptor *nd_desc,
788 struct nvdimm *nvdimm, unsigned int cmd, void *buf,
789 unsigned int buf_len, int *cmd_rc)
791 struct nd_cmd_get_config_size *get_size_hdr;
792 struct nd_cmd_pkg *call_pkg = NULL;
793 struct papr_scm_priv *p;
796 rc = is_cmd_valid(nvdimm, cmd, buf, buf_len);
798 pr_debug("Invalid cmd=0x%x. Err=%d\n", cmd, rc);
802 /* Use a local variable in case cmd_rc pointer is NULL */
806 p = nvdimm_provider_data(nvdimm);
809 case ND_CMD_GET_CONFIG_SIZE:
812 get_size_hdr->status = 0;
813 get_size_hdr->max_xfer = 8;
814 get_size_hdr->config_size = p->metadata_size;
818 case ND_CMD_GET_CONFIG_DATA:
819 *cmd_rc = papr_scm_meta_get(p, buf);
822 case ND_CMD_SET_CONFIG_DATA:
823 *cmd_rc = papr_scm_meta_set(p, buf);
827 call_pkg = (struct nd_cmd_pkg *)buf;
828 *cmd_rc = papr_scm_service_pdsm(p, call_pkg);
832 dev_dbg(&p->pdev->dev, "Unknown command = %d\n", cmd);
836 dev_dbg(&p->pdev->dev, "returned with cmd_rc = %d\n", *cmd_rc);
841 static ssize_t perf_stats_show(struct device *dev,
842 struct device_attribute *attr, char *buf)
847 struct papr_scm_perf_stat *stat;
848 struct papr_scm_perf_stats *stats;
849 struct nvdimm *dimm = to_nvdimm(dev);
850 struct papr_scm_priv *p = nvdimm_provider_data(dimm);
852 if (!p->stat_buffer_len)
855 /* Allocate the buffer for phyp where stats are written */
856 stats = kzalloc(p->stat_buffer_len, GFP_KERNEL);
860 /* Ask phyp to return all dimm perf stats */
861 rc = drc_pmem_query_stats(p, stats, 0);
865 * Go through the returned output buffer and print stats and
866 * values. Since stat_id is essentially a char string of
867 * 8 bytes, simply use the string format specifier to print it.
869 seq_buf_init(&s, buf, PAGE_SIZE);
870 for (index = 0, stat = stats->scm_statistic;
871 index < be32_to_cpu(stats->num_statistics);
873 seq_buf_printf(&s, "%.8s = 0x%016llX\n",
875 be64_to_cpu(stat->stat_val));
880 return rc ? rc : (ssize_t)seq_buf_used(&s);
882 static DEVICE_ATTR_ADMIN_RO(perf_stats);
884 static ssize_t flags_show(struct device *dev,
885 struct device_attribute *attr, char *buf)
887 struct nvdimm *dimm = to_nvdimm(dev);
888 struct papr_scm_priv *p = nvdimm_provider_data(dimm);
893 rc = drc_pmem_query_health(p);
897 /* Copy health_bitmap locally, check masks & update out buffer */
898 health = READ_ONCE(p->health_bitmap);
900 seq_buf_init(&s, buf, PAGE_SIZE);
901 if (health & PAPR_PMEM_UNARMED_MASK)
902 seq_buf_printf(&s, "not_armed ");
904 if (health & PAPR_PMEM_BAD_SHUTDOWN_MASK)
905 seq_buf_printf(&s, "flush_fail ");
907 if (health & PAPR_PMEM_BAD_RESTORE_MASK)
908 seq_buf_printf(&s, "restore_fail ");
910 if (health & PAPR_PMEM_ENCRYPTED)
911 seq_buf_printf(&s, "encrypted ");
913 if (health & PAPR_PMEM_SMART_EVENT_MASK)
914 seq_buf_printf(&s, "smart_notify ");
916 if (health & PAPR_PMEM_SCRUBBED_AND_LOCKED)
917 seq_buf_printf(&s, "scrubbed locked ");
919 if (seq_buf_used(&s))
920 seq_buf_printf(&s, "\n");
922 return seq_buf_used(&s);
924 DEVICE_ATTR_RO(flags);
926 static ssize_t dirty_shutdown_show(struct device *dev,
927 struct device_attribute *attr, char *buf)
929 struct nvdimm *dimm = to_nvdimm(dev);
930 struct papr_scm_priv *p = nvdimm_provider_data(dimm);
932 return sysfs_emit(buf, "%llu\n", p->dirty_shutdown_counter);
934 DEVICE_ATTR_RO(dirty_shutdown);
936 static umode_t papr_nd_attribute_visible(struct kobject *kobj,
937 struct attribute *attr, int n)
939 struct device *dev = kobj_to_dev(kobj);
940 struct nvdimm *nvdimm = to_nvdimm(dev);
941 struct papr_scm_priv *p = nvdimm_provider_data(nvdimm);
943 /* For if perf-stats not available remove perf_stats sysfs */
944 if (attr == &dev_attr_perf_stats.attr && p->stat_buffer_len == 0)
950 /* papr_scm specific dimm attributes */
951 static struct attribute *papr_nd_attributes[] = {
952 &dev_attr_flags.attr,
953 &dev_attr_perf_stats.attr,
954 &dev_attr_dirty_shutdown.attr,
958 static struct attribute_group papr_nd_attribute_group = {
960 .is_visible = papr_nd_attribute_visible,
961 .attrs = papr_nd_attributes,
964 static const struct attribute_group *papr_nd_attr_groups[] = {
965 &papr_nd_attribute_group,
969 static int papr_scm_nvdimm_init(struct papr_scm_priv *p)
971 struct device *dev = &p->pdev->dev;
972 struct nd_mapping_desc mapping;
973 struct nd_region_desc ndr_desc;
974 unsigned long dimm_flags;
975 int target_nid, online_nid;
977 p->bus_desc.ndctl = papr_scm_ndctl;
978 p->bus_desc.module = THIS_MODULE;
979 p->bus_desc.of_node = p->pdev->dev.of_node;
980 p->bus_desc.provider_name = kstrdup(p->pdev->name, GFP_KERNEL);
982 /* Set the dimm command family mask to accept PDSMs */
983 set_bit(NVDIMM_FAMILY_PAPR, &p->bus_desc.dimm_family_mask);
985 if (!p->bus_desc.provider_name)
988 p->bus = nvdimm_bus_register(NULL, &p->bus_desc);
990 dev_err(dev, "Error creating nvdimm bus %pOF\n", p->dn);
991 kfree(p->bus_desc.provider_name);
996 set_bit(NDD_LABELING, &dimm_flags);
999 * Check if the nvdimm is unarmed. No locking needed as we are still
1000 * initializing. Ignore error encountered if any.
1002 __drc_pmem_query_health(p);
1004 if (p->health_bitmap & PAPR_PMEM_UNARMED_MASK)
1005 set_bit(NDD_UNARMED, &dimm_flags);
1007 p->nvdimm = nvdimm_create(p->bus, p, papr_nd_attr_groups,
1008 dimm_flags, PAPR_SCM_DIMM_CMD_MASK, 0, NULL);
1010 dev_err(dev, "Error creating DIMM object for %pOF\n", p->dn);
1014 if (nvdimm_bus_check_dimm_count(p->bus, 1))
1017 /* now add the region */
1019 memset(&mapping, 0, sizeof(mapping));
1020 mapping.nvdimm = p->nvdimm;
1022 mapping.size = p->blocks * p->block_size; // XXX: potential overflow?
1024 memset(&ndr_desc, 0, sizeof(ndr_desc));
1025 target_nid = dev_to_node(&p->pdev->dev);
1026 online_nid = numa_map_to_online_node(target_nid);
1027 ndr_desc.numa_node = online_nid;
1028 ndr_desc.target_node = target_nid;
1029 ndr_desc.res = &p->res;
1030 ndr_desc.of_node = p->dn;
1031 ndr_desc.provider_data = p;
1032 ndr_desc.mapping = &mapping;
1033 ndr_desc.num_mappings = 1;
1034 ndr_desc.nd_set = &p->nd_set;
1036 if (p->hcall_flush_required) {
1037 set_bit(ND_REGION_ASYNC, &ndr_desc.flags);
1038 ndr_desc.flush = papr_scm_pmem_flush;
1042 p->region = nvdimm_volatile_region_create(p->bus, &ndr_desc);
1044 set_bit(ND_REGION_PERSIST_MEMCTRL, &ndr_desc.flags);
1045 p->region = nvdimm_pmem_region_create(p->bus, &ndr_desc);
1048 dev_err(dev, "Error registering region %pR from %pOF\n",
1049 ndr_desc.res, p->dn);
1052 if (target_nid != online_nid)
1053 dev_info(dev, "Region registered with target node %d and online node %d",
1054 target_nid, online_nid);
1056 mutex_lock(&papr_ndr_lock);
1057 list_add_tail(&p->region_list, &papr_nd_regions);
1058 mutex_unlock(&papr_ndr_lock);
1062 err: nvdimm_bus_unregister(p->bus);
1063 kfree(p->bus_desc.provider_name);
1067 static void papr_scm_add_badblock(struct nd_region *region,
1068 struct nvdimm_bus *bus, u64 phys_addr)
1070 u64 aligned_addr = ALIGN_DOWN(phys_addr, L1_CACHE_BYTES);
1072 if (nvdimm_bus_add_badrange(bus, aligned_addr, L1_CACHE_BYTES)) {
1073 pr_err("Bad block registration for 0x%llx failed\n", phys_addr);
1077 pr_debug("Add memory range (0x%llx - 0x%llx) as bad range\n",
1078 aligned_addr, aligned_addr + L1_CACHE_BYTES);
1080 nvdimm_region_notify(region, NVDIMM_REVALIDATE_POISON);
1083 static int handle_mce_ue(struct notifier_block *nb, unsigned long val,
1086 struct machine_check_event *evt = data;
1087 struct papr_scm_priv *p;
1091 if (evt->error_type != MCE_ERROR_TYPE_UE)
1094 if (list_empty(&papr_nd_regions))
1098 * The physical address obtained here is PAGE_SIZE aligned, so get the
1099 * exact address from the effective address
1101 phys_addr = evt->u.ue_error.physical_address +
1102 (evt->u.ue_error.effective_address & ~PAGE_MASK);
1104 if (!evt->u.ue_error.physical_address_provided ||
1105 !is_zone_device_page(pfn_to_page(phys_addr >> PAGE_SHIFT)))
1108 /* mce notifier is called from a process context, so mutex is safe */
1109 mutex_lock(&papr_ndr_lock);
1110 list_for_each_entry(p, &papr_nd_regions, region_list) {
1111 if (phys_addr >= p->res.start && phys_addr <= p->res.end) {
1118 papr_scm_add_badblock(p->region, p->bus, phys_addr);
1120 mutex_unlock(&papr_ndr_lock);
1122 return found ? NOTIFY_OK : NOTIFY_DONE;
1125 static struct notifier_block mce_ue_nb = {
1126 .notifier_call = handle_mce_ue
1129 static int papr_scm_probe(struct platform_device *pdev)
1131 struct device_node *dn = pdev->dev.of_node;
1132 u32 drc_index, metadata_size;
1133 u64 blocks, block_size;
1134 struct papr_scm_priv *p;
1135 u8 uuid_raw[UUID_SIZE];
1136 const char *uuid_str;
1141 /* check we have all the required DT properties */
1142 if (of_property_read_u32(dn, "ibm,my-drc-index", &drc_index)) {
1143 dev_err(&pdev->dev, "%pOF: missing drc-index!\n", dn);
1147 if (of_property_read_u64(dn, "ibm,block-size", &block_size)) {
1148 dev_err(&pdev->dev, "%pOF: missing block-size!\n", dn);
1152 if (of_property_read_u64(dn, "ibm,number-of-blocks", &blocks)) {
1153 dev_err(&pdev->dev, "%pOF: missing number-of-blocks!\n", dn);
1157 if (of_property_read_string(dn, "ibm,unit-guid", &uuid_str)) {
1158 dev_err(&pdev->dev, "%pOF: missing unit-guid!\n", dn);
1163 p = kzalloc(sizeof(*p), GFP_KERNEL);
1167 /* Initialize the dimm mutex */
1168 mutex_init(&p->health_mutex);
1170 /* optional DT properties */
1171 of_property_read_u32(dn, "ibm,metadata-size", &metadata_size);
1174 p->drc_index = drc_index;
1175 p->block_size = block_size;
1177 p->is_volatile = !of_property_read_bool(dn, "ibm,cache-flush-required");
1178 p->hcall_flush_required = of_property_read_bool(dn, "ibm,hcall-flush-required");
1180 if (of_property_read_u64(dn, "ibm,persistence-failed-count",
1181 &p->dirty_shutdown_counter))
1182 p->dirty_shutdown_counter = 0;
1184 /* We just need to ensure that set cookies are unique across */
1185 uuid_parse(uuid_str, &uuid);
1188 * The cookie1 and cookie2 are not really little endian.
1189 * We store a raw buffer representation of the
1190 * uuid string so that we can compare this with the label
1191 * area cookie irrespective of the endian configuration
1192 * with which the kernel is built.
1194 * Historically we stored the cookie in the below format.
1195 * for a uuid string 72511b67-0b3b-42fd-8d1d-5be3cae8bcaa
1196 * cookie1 was 0xfd423b0b671b5172
1197 * cookie2 was 0xaabce8cae35b1d8d
1199 export_uuid(uuid_raw, &uuid);
1200 p->nd_set.cookie1 = get_unaligned_le64(&uuid_raw[0]);
1201 p->nd_set.cookie2 = get_unaligned_le64(&uuid_raw[8]);
1204 p->metadata_size = metadata_size;
1207 /* request the hypervisor to bind this region to somewhere in memory */
1208 rc = drc_pmem_bind(p);
1210 /* If phyp says drc memory still bound then force unbound and retry */
1211 if (rc == H_OVERLAP)
1212 rc = drc_pmem_query_n_bind(p);
1214 if (rc != H_SUCCESS) {
1215 dev_err(&p->pdev->dev, "bind err: %d\n", rc);
1220 /* setup the resource for the newly bound range */
1221 p->res.start = p->bound_addr;
1222 p->res.end = p->bound_addr + p->blocks * p->block_size - 1;
1223 p->res.name = pdev->name;
1224 p->res.flags = IORESOURCE_MEM;
1226 /* Try retrieving the stat buffer and see if its supported */
1227 stat_size = drc_pmem_query_stats(p, NULL, 0);
1228 if (stat_size > 0) {
1229 p->stat_buffer_len = stat_size;
1230 dev_dbg(&p->pdev->dev, "Max perf-stat size %lu-bytes\n",
1231 p->stat_buffer_len);
1234 rc = papr_scm_nvdimm_init(p);
1238 platform_set_drvdata(pdev, p);
1242 err2: drc_pmem_unbind(p);
1247 static int papr_scm_remove(struct platform_device *pdev)
1249 struct papr_scm_priv *p = platform_get_drvdata(pdev);
1251 mutex_lock(&papr_ndr_lock);
1252 list_del(&p->region_list);
1253 mutex_unlock(&papr_ndr_lock);
1255 nvdimm_bus_unregister(p->bus);
1257 kfree(p->bus_desc.provider_name);
1263 static const struct of_device_id papr_scm_match[] = {
1264 { .compatible = "ibm,pmemory" },
1265 { .compatible = "ibm,pmemory-v2" },
1269 static struct platform_driver papr_scm_driver = {
1270 .probe = papr_scm_probe,
1271 .remove = papr_scm_remove,
1274 .of_match_table = papr_scm_match,
1278 static int __init papr_scm_init(void)
1282 ret = platform_driver_register(&papr_scm_driver);
1284 mce_register_notifier(&mce_ue_nb);
1288 module_init(papr_scm_init);
1290 static void __exit papr_scm_exit(void)
1292 mce_unregister_notifier(&mce_ue_nb);
1293 platform_driver_unregister(&papr_scm_driver);
1295 module_exit(papr_scm_exit);
1297 MODULE_DEVICE_TABLE(of, papr_scm_match);
1298 MODULE_LICENSE("GPL");
1299 MODULE_AUTHOR("IBM Corporation");