Merge branch 'for-5.7/libnvdimm' into libnvdimm-for-next
[linux-2.6-microblaze.git] / arch / powerpc / platforms / pseries / papr_scm.c
1 // SPDX-License-Identifier: GPL-2.0
2
3 #define pr_fmt(fmt)     "papr-scm: " fmt
4
5 #include <linux/of.h>
6 #include <linux/kernel.h>
7 #include <linux/module.h>
8 #include <linux/ioport.h>
9 #include <linux/slab.h>
10 #include <linux/ndctl.h>
11 #include <linux/sched.h>
12 #include <linux/libnvdimm.h>
13 #include <linux/platform_device.h>
14 #include <linux/delay.h>
15
16 #include <asm/plpar_wrappers.h>
17
18 #define BIND_ANY_ADDR (~0ul)
19
20 #define PAPR_SCM_DIMM_CMD_MASK \
21         ((1ul << ND_CMD_GET_CONFIG_SIZE) | \
22          (1ul << ND_CMD_GET_CONFIG_DATA) | \
23          (1ul << ND_CMD_SET_CONFIG_DATA))
24
25 struct papr_scm_priv {
26         struct platform_device *pdev;
27         struct device_node *dn;
28         uint32_t drc_index;
29         uint64_t blocks;
30         uint64_t block_size;
31         int metadata_size;
32         bool is_volatile;
33
34         uint64_t bound_addr;
35
36         struct nvdimm_bus_descriptor bus_desc;
37         struct nvdimm_bus *bus;
38         struct nvdimm *nvdimm;
39         struct resource res;
40         struct nd_region *region;
41         struct nd_interleave_set nd_set;
42 };
43
44 static int drc_pmem_bind(struct papr_scm_priv *p)
45 {
46         unsigned long ret[PLPAR_HCALL_BUFSIZE];
47         uint64_t saved = 0;
48         uint64_t token;
49         int64_t rc;
50
51         /*
52          * When the hypervisor cannot map all the requested memory in a single
53          * hcall it returns H_BUSY and we call again with the token until
54          * we get H_SUCCESS. Aborting the retry loop before getting H_SUCCESS
55          * leave the system in an undefined state, so we wait.
56          */
57         token = 0;
58
59         do {
60                 rc = plpar_hcall(H_SCM_BIND_MEM, ret, p->drc_index, 0,
61                                 p->blocks, BIND_ANY_ADDR, token);
62                 token = ret[0];
63                 if (!saved)
64                         saved = ret[1];
65                 cond_resched();
66         } while (rc == H_BUSY);
67
68         if (rc)
69                 return rc;
70
71         p->bound_addr = saved;
72         dev_dbg(&p->pdev->dev, "bound drc 0x%x to 0x%lx\n",
73                 p->drc_index, (unsigned long)saved);
74         return rc;
75 }
76
77 static void drc_pmem_unbind(struct papr_scm_priv *p)
78 {
79         unsigned long ret[PLPAR_HCALL_BUFSIZE];
80         uint64_t token = 0;
81         int64_t rc;
82
83         dev_dbg(&p->pdev->dev, "unbind drc 0x%x\n", p->drc_index);
84
85         /* NB: unbind has the same retry requirements as drc_pmem_bind() */
86         do {
87
88                 /* Unbind of all SCM resources associated with drcIndex */
89                 rc = plpar_hcall(H_SCM_UNBIND_ALL, ret, H_UNBIND_SCOPE_DRC,
90                                  p->drc_index, token);
91                 token = ret[0];
92
93                 /* Check if we are stalled for some time */
94                 if (H_IS_LONG_BUSY(rc)) {
95                         msleep(get_longbusy_msecs(rc));
96                         rc = H_BUSY;
97                 } else if (rc == H_BUSY) {
98                         cond_resched();
99                 }
100
101         } while (rc == H_BUSY);
102
103         if (rc)
104                 dev_err(&p->pdev->dev, "unbind error: %lld\n", rc);
105         else
106                 dev_dbg(&p->pdev->dev, "unbind drc 0x%x complete\n",
107                         p->drc_index);
108
109         return;
110 }
111
112 static int drc_pmem_query_n_bind(struct papr_scm_priv *p)
113 {
114         unsigned long start_addr;
115         unsigned long end_addr;
116         unsigned long ret[PLPAR_HCALL_BUFSIZE];
117         int64_t rc;
118
119
120         rc = plpar_hcall(H_SCM_QUERY_BLOCK_MEM_BINDING, ret,
121                          p->drc_index, 0);
122         if (rc)
123                 goto err_out;
124         start_addr = ret[0];
125
126         /* Make sure the full region is bound. */
127         rc = plpar_hcall(H_SCM_QUERY_BLOCK_MEM_BINDING, ret,
128                          p->drc_index, p->blocks - 1);
129         if (rc)
130                 goto err_out;
131         end_addr = ret[0];
132
133         if ((end_addr - start_addr) != ((p->blocks - 1) * p->block_size))
134                 goto err_out;
135
136         p->bound_addr = start_addr;
137         dev_dbg(&p->pdev->dev, "bound drc 0x%x to 0x%lx\n", p->drc_index, start_addr);
138         return rc;
139
140 err_out:
141         dev_info(&p->pdev->dev,
142                  "Failed to query, trying an unbind followed by bind");
143         drc_pmem_unbind(p);
144         return drc_pmem_bind(p);
145 }
146
147
148 static int papr_scm_meta_get(struct papr_scm_priv *p,
149                              struct nd_cmd_get_config_data_hdr *hdr)
150 {
151         unsigned long data[PLPAR_HCALL_BUFSIZE];
152         unsigned long offset, data_offset;
153         int len, read;
154         int64_t ret;
155
156         if ((hdr->in_offset + hdr->in_length) > p->metadata_size)
157                 return -EINVAL;
158
159         for (len = hdr->in_length; len; len -= read) {
160
161                 data_offset = hdr->in_length - len;
162                 offset = hdr->in_offset + data_offset;
163
164                 if (len >= 8)
165                         read = 8;
166                 else if (len >= 4)
167                         read = 4;
168                 else if (len >= 2)
169                         read = 2;
170                 else
171                         read = 1;
172
173                 ret = plpar_hcall(H_SCM_READ_METADATA, data, p->drc_index,
174                                   offset, read);
175
176                 if (ret == H_PARAMETER) /* bad DRC index */
177                         return -ENODEV;
178                 if (ret)
179                         return -EINVAL; /* other invalid parameter */
180
181                 switch (read) {
182                 case 8:
183                         *(uint64_t *)(hdr->out_buf + data_offset) = be64_to_cpu(data[0]);
184                         break;
185                 case 4:
186                         *(uint32_t *)(hdr->out_buf + data_offset) = be32_to_cpu(data[0] & 0xffffffff);
187                         break;
188
189                 case 2:
190                         *(uint16_t *)(hdr->out_buf + data_offset) = be16_to_cpu(data[0] & 0xffff);
191                         break;
192
193                 case 1:
194                         *(uint8_t *)(hdr->out_buf + data_offset) = (data[0] & 0xff);
195                         break;
196                 }
197         }
198         return 0;
199 }
200
201 static int papr_scm_meta_set(struct papr_scm_priv *p,
202                              struct nd_cmd_set_config_hdr *hdr)
203 {
204         unsigned long offset, data_offset;
205         int len, wrote;
206         unsigned long data;
207         __be64 data_be;
208         int64_t ret;
209
210         if ((hdr->in_offset + hdr->in_length) > p->metadata_size)
211                 return -EINVAL;
212
213         for (len = hdr->in_length; len; len -= wrote) {
214
215                 data_offset = hdr->in_length - len;
216                 offset = hdr->in_offset + data_offset;
217
218                 if (len >= 8) {
219                         data = *(uint64_t *)(hdr->in_buf + data_offset);
220                         data_be = cpu_to_be64(data);
221                         wrote = 8;
222                 } else if (len >= 4) {
223                         data = *(uint32_t *)(hdr->in_buf + data_offset);
224                         data &= 0xffffffff;
225                         data_be = cpu_to_be32(data);
226                         wrote = 4;
227                 } else if (len >= 2) {
228                         data = *(uint16_t *)(hdr->in_buf + data_offset);
229                         data &= 0xffff;
230                         data_be = cpu_to_be16(data);
231                         wrote = 2;
232                 } else {
233                         data_be = *(uint8_t *)(hdr->in_buf + data_offset);
234                         data_be &= 0xff;
235                         wrote = 1;
236                 }
237
238                 ret = plpar_hcall_norets(H_SCM_WRITE_METADATA, p->drc_index,
239                                          offset, data_be, wrote);
240                 if (ret == H_PARAMETER) /* bad DRC index */
241                         return -ENODEV;
242                 if (ret)
243                         return -EINVAL; /* other invalid parameter */
244         }
245
246         return 0;
247 }
248
249 int papr_scm_ndctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
250                 unsigned int cmd, void *buf, unsigned int buf_len, int *cmd_rc)
251 {
252         struct nd_cmd_get_config_size *get_size_hdr;
253         struct papr_scm_priv *p;
254
255         /* Only dimm-specific calls are supported atm */
256         if (!nvdimm)
257                 return -EINVAL;
258
259         p = nvdimm_provider_data(nvdimm);
260
261         switch (cmd) {
262         case ND_CMD_GET_CONFIG_SIZE:
263                 get_size_hdr = buf;
264
265                 get_size_hdr->status = 0;
266                 get_size_hdr->max_xfer = 8;
267                 get_size_hdr->config_size = p->metadata_size;
268                 *cmd_rc = 0;
269                 break;
270
271         case ND_CMD_GET_CONFIG_DATA:
272                 *cmd_rc = papr_scm_meta_get(p, buf);
273                 break;
274
275         case ND_CMD_SET_CONFIG_DATA:
276                 *cmd_rc = papr_scm_meta_set(p, buf);
277                 break;
278
279         default:
280                 return -EINVAL;
281         }
282
283         dev_dbg(&p->pdev->dev, "returned with cmd_rc = %d\n", *cmd_rc);
284
285         return 0;
286 }
287
288 static int papr_scm_nvdimm_init(struct papr_scm_priv *p)
289 {
290         struct device *dev = &p->pdev->dev;
291         struct nd_mapping_desc mapping;
292         struct nd_region_desc ndr_desc;
293         unsigned long dimm_flags;
294         int target_nid, online_nid;
295
296         p->bus_desc.ndctl = papr_scm_ndctl;
297         p->bus_desc.module = THIS_MODULE;
298         p->bus_desc.of_node = p->pdev->dev.of_node;
299         p->bus_desc.provider_name = kstrdup(p->pdev->name, GFP_KERNEL);
300
301         if (!p->bus_desc.provider_name)
302                 return -ENOMEM;
303
304         p->bus = nvdimm_bus_register(NULL, &p->bus_desc);
305         if (!p->bus) {
306                 dev_err(dev, "Error creating nvdimm bus %pOF\n", p->dn);
307                 kfree(p->bus_desc.provider_name);
308                 return -ENXIO;
309         }
310
311         dimm_flags = 0;
312         set_bit(NDD_LABELING, &dimm_flags);
313
314         p->nvdimm = nvdimm_create(p->bus, p, NULL, dimm_flags,
315                                   PAPR_SCM_DIMM_CMD_MASK, 0, NULL);
316         if (!p->nvdimm) {
317                 dev_err(dev, "Error creating DIMM object for %pOF\n", p->dn);
318                 goto err;
319         }
320
321         if (nvdimm_bus_check_dimm_count(p->bus, 1))
322                 goto err;
323
324         /* now add the region */
325
326         memset(&mapping, 0, sizeof(mapping));
327         mapping.nvdimm = p->nvdimm;
328         mapping.start = 0;
329         mapping.size = p->blocks * p->block_size; // XXX: potential overflow?
330
331         memset(&ndr_desc, 0, sizeof(ndr_desc));
332         target_nid = dev_to_node(&p->pdev->dev);
333         online_nid = numa_map_to_online_node(target_nid);
334         ndr_desc.numa_node = online_nid;
335         ndr_desc.target_node = target_nid;
336         ndr_desc.res = &p->res;
337         ndr_desc.of_node = p->dn;
338         ndr_desc.provider_data = p;
339         ndr_desc.mapping = &mapping;
340         ndr_desc.num_mappings = 1;
341         ndr_desc.nd_set = &p->nd_set;
342
343         if (p->is_volatile)
344                 p->region = nvdimm_volatile_region_create(p->bus, &ndr_desc);
345         else {
346                 set_bit(ND_REGION_PERSIST_MEMCTRL, &ndr_desc.flags);
347                 p->region = nvdimm_pmem_region_create(p->bus, &ndr_desc);
348         }
349         if (!p->region) {
350                 dev_err(dev, "Error registering region %pR from %pOF\n",
351                                 ndr_desc.res, p->dn);
352                 goto err;
353         }
354         if (target_nid != online_nid)
355                 dev_info(dev, "Region registered with target node %d and online node %d",
356                          target_nid, online_nid);
357
358         return 0;
359
360 err:    nvdimm_bus_unregister(p->bus);
361         kfree(p->bus_desc.provider_name);
362         return -ENXIO;
363 }
364
365 static int papr_scm_probe(struct platform_device *pdev)
366 {
367         struct device_node *dn = pdev->dev.of_node;
368         u32 drc_index, metadata_size;
369         u64 blocks, block_size;
370         struct papr_scm_priv *p;
371         const char *uuid_str;
372         u64 uuid[2];
373         int rc;
374
375         /* check we have all the required DT properties */
376         if (of_property_read_u32(dn, "ibm,my-drc-index", &drc_index)) {
377                 dev_err(&pdev->dev, "%pOF: missing drc-index!\n", dn);
378                 return -ENODEV;
379         }
380
381         if (of_property_read_u64(dn, "ibm,block-size", &block_size)) {
382                 dev_err(&pdev->dev, "%pOF: missing block-size!\n", dn);
383                 return -ENODEV;
384         }
385
386         if (of_property_read_u64(dn, "ibm,number-of-blocks", &blocks)) {
387                 dev_err(&pdev->dev, "%pOF: missing number-of-blocks!\n", dn);
388                 return -ENODEV;
389         }
390
391         if (of_property_read_string(dn, "ibm,unit-guid", &uuid_str)) {
392                 dev_err(&pdev->dev, "%pOF: missing unit-guid!\n", dn);
393                 return -ENODEV;
394         }
395
396
397         p = kzalloc(sizeof(*p), GFP_KERNEL);
398         if (!p)
399                 return -ENOMEM;
400
401         /* optional DT properties */
402         of_property_read_u32(dn, "ibm,metadata-size", &metadata_size);
403
404         p->dn = dn;
405         p->drc_index = drc_index;
406         p->block_size = block_size;
407         p->blocks = blocks;
408         p->is_volatile = !of_property_read_bool(dn, "ibm,cache-flush-required");
409
410         /* We just need to ensure that set cookies are unique across */
411         uuid_parse(uuid_str, (uuid_t *) uuid);
412         /*
413          * cookie1 and cookie2 are not really little endian
414          * we store a little endian representation of the
415          * uuid str so that we can compare this with the label
416          * area cookie irrespective of the endian config with which
417          * the kernel is built.
418          */
419         p->nd_set.cookie1 = cpu_to_le64(uuid[0]);
420         p->nd_set.cookie2 = cpu_to_le64(uuid[1]);
421
422         /* might be zero */
423         p->metadata_size = metadata_size;
424         p->pdev = pdev;
425
426         /* request the hypervisor to bind this region to somewhere in memory */
427         rc = drc_pmem_bind(p);
428
429         /* If phyp says drc memory still bound then force unbound and retry */
430         if (rc == H_OVERLAP)
431                 rc = drc_pmem_query_n_bind(p);
432
433         if (rc != H_SUCCESS) {
434                 dev_err(&p->pdev->dev, "bind err: %d\n", rc);
435                 rc = -ENXIO;
436                 goto err;
437         }
438
439         /* setup the resource for the newly bound range */
440         p->res.start = p->bound_addr;
441         p->res.end   = p->bound_addr + p->blocks * p->block_size - 1;
442         p->res.name  = pdev->name;
443         p->res.flags = IORESOURCE_MEM;
444
445         rc = papr_scm_nvdimm_init(p);
446         if (rc)
447                 goto err2;
448
449         platform_set_drvdata(pdev, p);
450
451         return 0;
452
453 err2:   drc_pmem_unbind(p);
454 err:    kfree(p);
455         return rc;
456 }
457
458 static int papr_scm_remove(struct platform_device *pdev)
459 {
460         struct papr_scm_priv *p = platform_get_drvdata(pdev);
461
462         nvdimm_bus_unregister(p->bus);
463         drc_pmem_unbind(p);
464         kfree(p->bus_desc.provider_name);
465         kfree(p);
466
467         return 0;
468 }
469
470 static const struct of_device_id papr_scm_match[] = {
471         { .compatible = "ibm,pmemory" },
472         { },
473 };
474
475 static struct platform_driver papr_scm_driver = {
476         .probe = papr_scm_probe,
477         .remove = papr_scm_remove,
478         .driver = {
479                 .name = "papr_scm",
480                 .of_match_table = papr_scm_match,
481         },
482 };
483
484 module_platform_driver(papr_scm_driver);
485 MODULE_DEVICE_TABLE(of, papr_scm_match);
486 MODULE_LICENSE("GPL");
487 MODULE_AUTHOR("IBM Corporation");