Given a DRC Index collect the performance statistics for NVDIMM and copy them
to the resultBuffer.
+**H_SCM_FLUSH**
+
+| Input: *drcIndex, continue-token*
+| Out: *continue-token*
+| Return Value: *H_SUCCESS, H_Parameter, H_P2, H_BUSY*
+
+Given a DRC Index Flush the data to backend NVDIMM device.
+
+The hcall returns H_BUSY when the flush takes longer time and the hcall needs
+to be issued multiple times in order to be completely serviced. The
+*continue-token* from the output to be passed in the argument list of
+subsequent hcalls to the hypervisor until the hcall is completely serviced
+at which point H_SUCCESS or other error is returned by the hypervisor.
+
References
==========
.. [1] "Power Architecture Platform Reference"
#define H_SCM_HEALTH 0x400
#define H_SCM_PERFORMANCE_STATS 0x418
#define H_RPT_INVALIDATE 0x448
-#define MAX_HCALL_OPCODE H_RPT_INVALIDATE
+#define H_SCM_FLUSH 0x44C
+#define MAX_HCALL_OPCODE H_SCM_FLUSH
/* Scope args for H_SCM_UNBIND_ALL */
#define H_UNBIND_SCOPE_ALL (0x1)
uint64_t block_size;
int metadata_size;
bool is_volatile;
+ bool hcall_flush_required;
uint64_t bound_addr;
size_t stat_buffer_len;
};
+static int papr_scm_pmem_flush(struct nd_region *nd_region,
+ struct bio *bio __maybe_unused)
+{
+ struct papr_scm_priv *p = nd_region_provider_data(nd_region);
+ unsigned long ret_buf[PLPAR_HCALL_BUFSIZE], token = 0;
+ long rc;
+
+ dev_dbg(&p->pdev->dev, "flush drc 0x%x", p->drc_index);
+
+ do {
+ rc = plpar_hcall(H_SCM_FLUSH, ret_buf, p->drc_index, token);
+ token = ret_buf[0];
+
+ /* Check if we are stalled for some time */
+ if (H_IS_LONG_BUSY(rc)) {
+ msleep(get_longbusy_msecs(rc));
+ rc = H_BUSY;
+ } else if (rc == H_BUSY) {
+ cond_resched();
+ }
+ } while (rc == H_BUSY);
+
+ if (rc) {
+ dev_err(&p->pdev->dev, "flush error: %lld", rc);
+ rc = -EIO;
+ } else {
+ dev_dbg(&p->pdev->dev, "flush drc 0x%x complete", p->drc_index);
+ }
+
+ return rc;
+}
+
static LIST_HEAD(papr_nd_regions);
static DEFINE_MUTEX(papr_ndr_lock);
ndr_desc.num_mappings = 1;
ndr_desc.nd_set = &p->nd_set;
+ if (p->hcall_flush_required) {
+ set_bit(ND_REGION_ASYNC, &ndr_desc.flags);
+ ndr_desc.flush = papr_scm_pmem_flush;
+ }
+
if (p->is_volatile)
p->region = nvdimm_volatile_region_create(p->bus, &ndr_desc);
else {
p->block_size = block_size;
p->blocks = blocks;
p->is_volatile = !of_property_read_bool(dn, "ibm,cache-flush-required");
+ p->hcall_flush_required = of_property_read_bool(dn, "ibm,hcall-flush-required");
/* We just need to ensure that set cookies are unique across */
uuid_parse(uuid_str, (uuid_t *) uuid);