acpi/nfit: Add support for Intel DSM 1.8 commands
[linux-2.6-microblaze.git] / drivers / acpi / nfit / core.c
1 /*
2  * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of version 2 of the GNU General Public License as
6  * published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  */
13 #include <linux/list_sort.h>
14 #include <linux/libnvdimm.h>
15 #include <linux/module.h>
16 #include <linux/mutex.h>
17 #include <linux/ndctl.h>
18 #include <linux/sysfs.h>
19 #include <linux/delay.h>
20 #include <linux/list.h>
21 #include <linux/acpi.h>
22 #include <linux/sort.h>
23 #include <linux/io.h>
24 #include <linux/nd.h>
25 #include <asm/cacheflush.h>
26 #include <acpi/nfit.h>
27 #include "intel.h"
28 #include "nfit.h"
29 #include "intel.h"
30
31 /*
32  * For readq() and writeq() on 32-bit builds, the hi-lo, lo-hi order is
33  * irrelevant.
34  */
35 #include <linux/io-64-nonatomic-hi-lo.h>
36
37 static bool force_enable_dimms;
38 module_param(force_enable_dimms, bool, S_IRUGO|S_IWUSR);
39 MODULE_PARM_DESC(force_enable_dimms, "Ignore _STA (ACPI DIMM device) status");
40
41 static bool disable_vendor_specific;
42 module_param(disable_vendor_specific, bool, S_IRUGO);
43 MODULE_PARM_DESC(disable_vendor_specific,
44                 "Limit commands to the publicly specified set");
45
46 static unsigned long override_dsm_mask;
47 module_param(override_dsm_mask, ulong, S_IRUGO);
48 MODULE_PARM_DESC(override_dsm_mask, "Bitmask of allowed NVDIMM DSM functions");
49
50 static int default_dsm_family = -1;
51 module_param(default_dsm_family, int, S_IRUGO);
52 MODULE_PARM_DESC(default_dsm_family,
53                 "Try this DSM type first when identifying NVDIMM family");
54
55 static bool no_init_ars;
56 module_param(no_init_ars, bool, 0644);
57 MODULE_PARM_DESC(no_init_ars, "Skip ARS run at nfit init time");
58
59 LIST_HEAD(acpi_descs);
60 DEFINE_MUTEX(acpi_desc_lock);
61
62 static struct workqueue_struct *nfit_wq;
63
64 struct nfit_table_prev {
65         struct list_head spas;
66         struct list_head memdevs;
67         struct list_head dcrs;
68         struct list_head bdws;
69         struct list_head idts;
70         struct list_head flushes;
71 };
72
73 static guid_t nfit_uuid[NFIT_UUID_MAX];
74
75 const guid_t *to_nfit_uuid(enum nfit_uuids id)
76 {
77         return &nfit_uuid[id];
78 }
79 EXPORT_SYMBOL(to_nfit_uuid);
80
81 static struct acpi_nfit_desc *to_acpi_nfit_desc(
82                 struct nvdimm_bus_descriptor *nd_desc)
83 {
84         return container_of(nd_desc, struct acpi_nfit_desc, nd_desc);
85 }
86
87 static struct acpi_device *to_acpi_dev(struct acpi_nfit_desc *acpi_desc)
88 {
89         struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
90
91         /*
92          * If provider == 'ACPI.NFIT' we can assume 'dev' is a struct
93          * acpi_device.
94          */
95         if (!nd_desc->provider_name
96                         || strcmp(nd_desc->provider_name, "ACPI.NFIT") != 0)
97                 return NULL;
98
99         return to_acpi_device(acpi_desc->dev);
100 }
101
102 static int xlat_bus_status(void *buf, unsigned int cmd, u32 status)
103 {
104         struct nd_cmd_clear_error *clear_err;
105         struct nd_cmd_ars_status *ars_status;
106         u16 flags;
107
108         switch (cmd) {
109         case ND_CMD_ARS_CAP:
110                 if ((status & 0xffff) == NFIT_ARS_CAP_NONE)
111                         return -ENOTTY;
112
113                 /* Command failed */
114                 if (status & 0xffff)
115                         return -EIO;
116
117                 /* No supported scan types for this range */
118                 flags = ND_ARS_PERSISTENT | ND_ARS_VOLATILE;
119                 if ((status >> 16 & flags) == 0)
120                         return -ENOTTY;
121                 return 0;
122         case ND_CMD_ARS_START:
123                 /* ARS is in progress */
124                 if ((status & 0xffff) == NFIT_ARS_START_BUSY)
125                         return -EBUSY;
126
127                 /* Command failed */
128                 if (status & 0xffff)
129                         return -EIO;
130                 return 0;
131         case ND_CMD_ARS_STATUS:
132                 ars_status = buf;
133                 /* Command failed */
134                 if (status & 0xffff)
135                         return -EIO;
136                 /* Check extended status (Upper two bytes) */
137                 if (status == NFIT_ARS_STATUS_DONE)
138                         return 0;
139
140                 /* ARS is in progress */
141                 if (status == NFIT_ARS_STATUS_BUSY)
142                         return -EBUSY;
143
144                 /* No ARS performed for the current boot */
145                 if (status == NFIT_ARS_STATUS_NONE)
146                         return -EAGAIN;
147
148                 /*
149                  * ARS interrupted, either we overflowed or some other
150                  * agent wants the scan to stop.  If we didn't overflow
151                  * then just continue with the returned results.
152                  */
153                 if (status == NFIT_ARS_STATUS_INTR) {
154                         if (ars_status->out_length >= 40 && (ars_status->flags
155                                                 & NFIT_ARS_F_OVERFLOW))
156                                 return -ENOSPC;
157                         return 0;
158                 }
159
160                 /* Unknown status */
161                 if (status >> 16)
162                         return -EIO;
163                 return 0;
164         case ND_CMD_CLEAR_ERROR:
165                 clear_err = buf;
166                 if (status & 0xffff)
167                         return -EIO;
168                 if (!clear_err->cleared)
169                         return -EIO;
170                 if (clear_err->length > clear_err->cleared)
171                         return clear_err->cleared;
172                 return 0;
173         default:
174                 break;
175         }
176
177         /* all other non-zero status results in an error */
178         if (status)
179                 return -EIO;
180         return 0;
181 }
182
183 #define ACPI_LABELS_LOCKED 3
184
185 static int xlat_nvdimm_status(struct nvdimm *nvdimm, void *buf, unsigned int cmd,
186                 u32 status)
187 {
188         struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
189
190         switch (cmd) {
191         case ND_CMD_GET_CONFIG_SIZE:
192                 /*
193                  * In the _LSI, _LSR, _LSW case the locked status is
194                  * communicated via the read/write commands
195                  */
196                 if (test_bit(NFIT_MEM_LSR, &nfit_mem->flags))
197                         break;
198
199                 if (status >> 16 & ND_CONFIG_LOCKED)
200                         return -EACCES;
201                 break;
202         case ND_CMD_GET_CONFIG_DATA:
203                 if (test_bit(NFIT_MEM_LSR, &nfit_mem->flags)
204                                 && status == ACPI_LABELS_LOCKED)
205                         return -EACCES;
206                 break;
207         case ND_CMD_SET_CONFIG_DATA:
208                 if (test_bit(NFIT_MEM_LSW, &nfit_mem->flags)
209                                 && status == ACPI_LABELS_LOCKED)
210                         return -EACCES;
211                 break;
212         default:
213                 break;
214         }
215
216         /* all other non-zero status results in an error */
217         if (status)
218                 return -EIO;
219         return 0;
220 }
221
222 static int xlat_status(struct nvdimm *nvdimm, void *buf, unsigned int cmd,
223                 u32 status)
224 {
225         if (!nvdimm)
226                 return xlat_bus_status(buf, cmd, status);
227         return xlat_nvdimm_status(nvdimm, buf, cmd, status);
228 }
229
230 /* convert _LS{I,R} packages to the buffer object acpi_nfit_ctl expects */
231 static union acpi_object *pkg_to_buf(union acpi_object *pkg)
232 {
233         int i;
234         void *dst;
235         size_t size = 0;
236         union acpi_object *buf = NULL;
237
238         if (pkg->type != ACPI_TYPE_PACKAGE) {
239                 WARN_ONCE(1, "BIOS bug, unexpected element type: %d\n",
240                                 pkg->type);
241                 goto err;
242         }
243
244         for (i = 0; i < pkg->package.count; i++) {
245                 union acpi_object *obj = &pkg->package.elements[i];
246
247                 if (obj->type == ACPI_TYPE_INTEGER)
248                         size += 4;
249                 else if (obj->type == ACPI_TYPE_BUFFER)
250                         size += obj->buffer.length;
251                 else {
252                         WARN_ONCE(1, "BIOS bug, unexpected element type: %d\n",
253                                         obj->type);
254                         goto err;
255                 }
256         }
257
258         buf = ACPI_ALLOCATE(sizeof(*buf) + size);
259         if (!buf)
260                 goto err;
261
262         dst = buf + 1;
263         buf->type = ACPI_TYPE_BUFFER;
264         buf->buffer.length = size;
265         buf->buffer.pointer = dst;
266         for (i = 0; i < pkg->package.count; i++) {
267                 union acpi_object *obj = &pkg->package.elements[i];
268
269                 if (obj->type == ACPI_TYPE_INTEGER) {
270                         memcpy(dst, &obj->integer.value, 4);
271                         dst += 4;
272                 } else if (obj->type == ACPI_TYPE_BUFFER) {
273                         memcpy(dst, obj->buffer.pointer, obj->buffer.length);
274                         dst += obj->buffer.length;
275                 }
276         }
277 err:
278         ACPI_FREE(pkg);
279         return buf;
280 }
281
282 static union acpi_object *int_to_buf(union acpi_object *integer)
283 {
284         union acpi_object *buf = ACPI_ALLOCATE(sizeof(*buf) + 4);
285         void *dst = NULL;
286
287         if (!buf)
288                 goto err;
289
290         if (integer->type != ACPI_TYPE_INTEGER) {
291                 WARN_ONCE(1, "BIOS bug, unexpected element type: %d\n",
292                                 integer->type);
293                 goto err;
294         }
295
296         dst = buf + 1;
297         buf->type = ACPI_TYPE_BUFFER;
298         buf->buffer.length = 4;
299         buf->buffer.pointer = dst;
300         memcpy(dst, &integer->integer.value, 4);
301 err:
302         ACPI_FREE(integer);
303         return buf;
304 }
305
306 static union acpi_object *acpi_label_write(acpi_handle handle, u32 offset,
307                 u32 len, void *data)
308 {
309         acpi_status rc;
310         struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
311         struct acpi_object_list input = {
312                 .count = 3,
313                 .pointer = (union acpi_object []) {
314                         [0] = {
315                                 .integer.type = ACPI_TYPE_INTEGER,
316                                 .integer.value = offset,
317                         },
318                         [1] = {
319                                 .integer.type = ACPI_TYPE_INTEGER,
320                                 .integer.value = len,
321                         },
322                         [2] = {
323                                 .buffer.type = ACPI_TYPE_BUFFER,
324                                 .buffer.pointer = data,
325                                 .buffer.length = len,
326                         },
327                 },
328         };
329
330         rc = acpi_evaluate_object(handle, "_LSW", &input, &buf);
331         if (ACPI_FAILURE(rc))
332                 return NULL;
333         return int_to_buf(buf.pointer);
334 }
335
336 static union acpi_object *acpi_label_read(acpi_handle handle, u32 offset,
337                 u32 len)
338 {
339         acpi_status rc;
340         struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
341         struct acpi_object_list input = {
342                 .count = 2,
343                 .pointer = (union acpi_object []) {
344                         [0] = {
345                                 .integer.type = ACPI_TYPE_INTEGER,
346                                 .integer.value = offset,
347                         },
348                         [1] = {
349                                 .integer.type = ACPI_TYPE_INTEGER,
350                                 .integer.value = len,
351                         },
352                 },
353         };
354
355         rc = acpi_evaluate_object(handle, "_LSR", &input, &buf);
356         if (ACPI_FAILURE(rc))
357                 return NULL;
358         return pkg_to_buf(buf.pointer);
359 }
360
361 static union acpi_object *acpi_label_info(acpi_handle handle)
362 {
363         acpi_status rc;
364         struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
365
366         rc = acpi_evaluate_object(handle, "_LSI", NULL, &buf);
367         if (ACPI_FAILURE(rc))
368                 return NULL;
369         return pkg_to_buf(buf.pointer);
370 }
371
372 static u8 nfit_dsm_revid(unsigned family, unsigned func)
373 {
374         static const u8 revid_table[NVDIMM_FAMILY_MAX+1][32] = {
375                 [NVDIMM_FAMILY_INTEL] = {
376                         [NVDIMM_INTEL_GET_MODES] = 2,
377                         [NVDIMM_INTEL_GET_FWINFO] = 2,
378                         [NVDIMM_INTEL_START_FWUPDATE] = 2,
379                         [NVDIMM_INTEL_SEND_FWUPDATE] = 2,
380                         [NVDIMM_INTEL_FINISH_FWUPDATE] = 2,
381                         [NVDIMM_INTEL_QUERY_FWUPDATE] = 2,
382                         [NVDIMM_INTEL_SET_THRESHOLD] = 2,
383                         [NVDIMM_INTEL_INJECT_ERROR] = 2,
384                         [NVDIMM_INTEL_GET_SECURITY_STATE] = 2,
385                         [NVDIMM_INTEL_SET_PASSPHRASE] = 2,
386                         [NVDIMM_INTEL_DISABLE_PASSPHRASE] = 2,
387                         [NVDIMM_INTEL_UNLOCK_UNIT] = 2,
388                         [NVDIMM_INTEL_FREEZE_LOCK] = 2,
389                         [NVDIMM_INTEL_SECURE_ERASE] = 2,
390                         [NVDIMM_INTEL_OVERWRITE] = 2,
391                         [NVDIMM_INTEL_QUERY_OVERWRITE] = 2,
392                 },
393         };
394         u8 id;
395
396         if (family > NVDIMM_FAMILY_MAX)
397                 return 0;
398         if (func > 31)
399                 return 0;
400         id = revid_table[family][func];
401         if (id == 0)
402                 return 1; /* default */
403         return id;
404 }
405
406 static bool payload_dumpable(struct nvdimm *nvdimm, unsigned int func)
407 {
408         struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
409
410         if (nfit_mem && nfit_mem->family == NVDIMM_FAMILY_INTEL
411                         && func >= NVDIMM_INTEL_GET_SECURITY_STATE
412                         && func <= NVDIMM_INTEL_MASTER_SECURE_ERASE)
413                 return IS_ENABLED(CONFIG_NFIT_SECURITY_DEBUG);
414         return true;
415 }
416
417 int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
418                 unsigned int cmd, void *buf, unsigned int buf_len, int *cmd_rc)
419 {
420         struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc);
421         struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
422         union acpi_object in_obj, in_buf, *out_obj;
423         const struct nd_cmd_desc *desc = NULL;
424         struct device *dev = acpi_desc->dev;
425         struct nd_cmd_pkg *call_pkg = NULL;
426         const char *cmd_name, *dimm_name;
427         unsigned long cmd_mask, dsm_mask;
428         u32 offset, fw_status = 0;
429         acpi_handle handle;
430         unsigned int func;
431         const guid_t *guid;
432         int rc, i;
433
434         if (cmd_rc)
435                 *cmd_rc = -EINVAL;
436         func = cmd;
437         if (cmd == ND_CMD_CALL) {
438                 call_pkg = buf;
439                 func = call_pkg->nd_command;
440
441                 for (i = 0; i < ARRAY_SIZE(call_pkg->nd_reserved2); i++)
442                         if (call_pkg->nd_reserved2[i])
443                                 return -EINVAL;
444         }
445
446         if (nvdimm) {
447                 struct acpi_device *adev = nfit_mem->adev;
448
449                 if (!adev)
450                         return -ENOTTY;
451                 if (call_pkg && nfit_mem->family != call_pkg->nd_family)
452                         return -ENOTTY;
453
454                 dimm_name = nvdimm_name(nvdimm);
455                 cmd_name = nvdimm_cmd_name(cmd);
456                 cmd_mask = nvdimm_cmd_mask(nvdimm);
457                 dsm_mask = nfit_mem->dsm_mask;
458                 desc = nd_cmd_dimm_desc(cmd);
459                 guid = to_nfit_uuid(nfit_mem->family);
460                 handle = adev->handle;
461         } else {
462                 struct acpi_device *adev = to_acpi_dev(acpi_desc);
463
464                 cmd_name = nvdimm_bus_cmd_name(cmd);
465                 cmd_mask = nd_desc->cmd_mask;
466                 dsm_mask = cmd_mask;
467                 if (cmd == ND_CMD_CALL)
468                         dsm_mask = nd_desc->bus_dsm_mask;
469                 desc = nd_cmd_bus_desc(cmd);
470                 guid = to_nfit_uuid(NFIT_DEV_BUS);
471                 handle = adev->handle;
472                 dimm_name = "bus";
473         }
474
475         if (!desc || (cmd && (desc->out_num + desc->in_num == 0)))
476                 return -ENOTTY;
477
478         if (!test_bit(cmd, &cmd_mask) || !test_bit(func, &dsm_mask))
479                 return -ENOTTY;
480
481         in_obj.type = ACPI_TYPE_PACKAGE;
482         in_obj.package.count = 1;
483         in_obj.package.elements = &in_buf;
484         in_buf.type = ACPI_TYPE_BUFFER;
485         in_buf.buffer.pointer = buf;
486         in_buf.buffer.length = 0;
487
488         /* libnvdimm has already validated the input envelope */
489         for (i = 0; i < desc->in_num; i++)
490                 in_buf.buffer.length += nd_cmd_in_size(nvdimm, cmd, desc,
491                                 i, buf);
492
493         if (call_pkg) {
494                 /* skip over package wrapper */
495                 in_buf.buffer.pointer = (void *) &call_pkg->nd_payload;
496                 in_buf.buffer.length = call_pkg->nd_size_in;
497         }
498
499         dev_dbg(dev, "%s cmd: %d: func: %d input length: %d\n",
500                 dimm_name, cmd, func, in_buf.buffer.length);
501         if (payload_dumpable(nvdimm, func))
502                 print_hex_dump_debug("nvdimm in  ", DUMP_PREFIX_OFFSET, 4, 4,
503                                 in_buf.buffer.pointer,
504                                 min_t(u32, 256, in_buf.buffer.length), true);
505
506         /* call the BIOS, prefer the named methods over _DSM if available */
507         if (nvdimm && cmd == ND_CMD_GET_CONFIG_SIZE
508                         && test_bit(NFIT_MEM_LSR, &nfit_mem->flags))
509                 out_obj = acpi_label_info(handle);
510         else if (nvdimm && cmd == ND_CMD_GET_CONFIG_DATA
511                         && test_bit(NFIT_MEM_LSR, &nfit_mem->flags)) {
512                 struct nd_cmd_get_config_data_hdr *p = buf;
513
514                 out_obj = acpi_label_read(handle, p->in_offset, p->in_length);
515         } else if (nvdimm && cmd == ND_CMD_SET_CONFIG_DATA
516                         && test_bit(NFIT_MEM_LSW, &nfit_mem->flags)) {
517                 struct nd_cmd_set_config_hdr *p = buf;
518
519                 out_obj = acpi_label_write(handle, p->in_offset, p->in_length,
520                                 p->in_buf);
521         } else {
522                 u8 revid;
523
524                 if (nvdimm)
525                         revid = nfit_dsm_revid(nfit_mem->family, func);
526                 else
527                         revid = 1;
528                 out_obj = acpi_evaluate_dsm(handle, guid, revid, func, &in_obj);
529         }
530
531         if (!out_obj) {
532                 dev_dbg(dev, "%s _DSM failed cmd: %s\n", dimm_name, cmd_name);
533                 return -EINVAL;
534         }
535
536         if (call_pkg) {
537                 call_pkg->nd_fw_size = out_obj->buffer.length;
538                 memcpy(call_pkg->nd_payload + call_pkg->nd_size_in,
539                         out_obj->buffer.pointer,
540                         min(call_pkg->nd_fw_size, call_pkg->nd_size_out));
541
542                 ACPI_FREE(out_obj);
543                 /*
544                  * Need to support FW function w/o known size in advance.
545                  * Caller can determine required size based upon nd_fw_size.
546                  * If we return an error (like elsewhere) then caller wouldn't
547                  * be able to rely upon data returned to make calculation.
548                  */
549                 if (cmd_rc)
550                         *cmd_rc = 0;
551                 return 0;
552         }
553
554         if (out_obj->package.type != ACPI_TYPE_BUFFER) {
555                 dev_dbg(dev, "%s unexpected output object type cmd: %s type: %d\n",
556                                 dimm_name, cmd_name, out_obj->type);
557                 rc = -EINVAL;
558                 goto out;
559         }
560
561         dev_dbg(dev, "%s cmd: %s output length: %d\n", dimm_name,
562                         cmd_name, out_obj->buffer.length);
563         print_hex_dump_debug(cmd_name, DUMP_PREFIX_OFFSET, 4, 4,
564                         out_obj->buffer.pointer,
565                         min_t(u32, 128, out_obj->buffer.length), true);
566
567         for (i = 0, offset = 0; i < desc->out_num; i++) {
568                 u32 out_size = nd_cmd_out_size(nvdimm, cmd, desc, i, buf,
569                                 (u32 *) out_obj->buffer.pointer,
570                                 out_obj->buffer.length - offset);
571
572                 if (offset + out_size > out_obj->buffer.length) {
573                         dev_dbg(dev, "%s output object underflow cmd: %s field: %d\n",
574                                         dimm_name, cmd_name, i);
575                         break;
576                 }
577
578                 if (in_buf.buffer.length + offset + out_size > buf_len) {
579                         dev_dbg(dev, "%s output overrun cmd: %s field: %d\n",
580                                         dimm_name, cmd_name, i);
581                         rc = -ENXIO;
582                         goto out;
583                 }
584                 memcpy(buf + in_buf.buffer.length + offset,
585                                 out_obj->buffer.pointer + offset, out_size);
586                 offset += out_size;
587         }
588
589         /*
590          * Set fw_status for all the commands with a known format to be
591          * later interpreted by xlat_status().
592          */
593         if (i >= 1 && ((!nvdimm && cmd >= ND_CMD_ARS_CAP
594                                         && cmd <= ND_CMD_CLEAR_ERROR)
595                                 || (nvdimm && cmd >= ND_CMD_SMART
596                                         && cmd <= ND_CMD_VENDOR)))
597                 fw_status = *(u32 *) out_obj->buffer.pointer;
598
599         if (offset + in_buf.buffer.length < buf_len) {
600                 if (i >= 1) {
601                         /*
602                          * status valid, return the number of bytes left
603                          * unfilled in the output buffer
604                          */
605                         rc = buf_len - offset - in_buf.buffer.length;
606                         if (cmd_rc)
607                                 *cmd_rc = xlat_status(nvdimm, buf, cmd,
608                                                 fw_status);
609                 } else {
610                         dev_err(dev, "%s:%s underrun cmd: %s buf_len: %d out_len: %d\n",
611                                         __func__, dimm_name, cmd_name, buf_len,
612                                         offset);
613                         rc = -ENXIO;
614                 }
615         } else {
616                 rc = 0;
617                 if (cmd_rc)
618                         *cmd_rc = xlat_status(nvdimm, buf, cmd, fw_status);
619         }
620
621  out:
622         ACPI_FREE(out_obj);
623
624         return rc;
625 }
626 EXPORT_SYMBOL_GPL(acpi_nfit_ctl);
627
628 static const char *spa_type_name(u16 type)
629 {
630         static const char *to_name[] = {
631                 [NFIT_SPA_VOLATILE] = "volatile",
632                 [NFIT_SPA_PM] = "pmem",
633                 [NFIT_SPA_DCR] = "dimm-control-region",
634                 [NFIT_SPA_BDW] = "block-data-window",
635                 [NFIT_SPA_VDISK] = "volatile-disk",
636                 [NFIT_SPA_VCD] = "volatile-cd",
637                 [NFIT_SPA_PDISK] = "persistent-disk",
638                 [NFIT_SPA_PCD] = "persistent-cd",
639
640         };
641
642         if (type > NFIT_SPA_PCD)
643                 return "unknown";
644
645         return to_name[type];
646 }
647
648 int nfit_spa_type(struct acpi_nfit_system_address *spa)
649 {
650         int i;
651
652         for (i = 0; i < NFIT_UUID_MAX; i++)
653                 if (guid_equal(to_nfit_uuid(i), (guid_t *)&spa->range_guid))
654                         return i;
655         return -1;
656 }
657
658 static bool add_spa(struct acpi_nfit_desc *acpi_desc,
659                 struct nfit_table_prev *prev,
660                 struct acpi_nfit_system_address *spa)
661 {
662         struct device *dev = acpi_desc->dev;
663         struct nfit_spa *nfit_spa;
664
665         if (spa->header.length != sizeof(*spa))
666                 return false;
667
668         list_for_each_entry(nfit_spa, &prev->spas, list) {
669                 if (memcmp(nfit_spa->spa, spa, sizeof(*spa)) == 0) {
670                         list_move_tail(&nfit_spa->list, &acpi_desc->spas);
671                         return true;
672                 }
673         }
674
675         nfit_spa = devm_kzalloc(dev, sizeof(*nfit_spa) + sizeof(*spa),
676                         GFP_KERNEL);
677         if (!nfit_spa)
678                 return false;
679         INIT_LIST_HEAD(&nfit_spa->list);
680         memcpy(nfit_spa->spa, spa, sizeof(*spa));
681         list_add_tail(&nfit_spa->list, &acpi_desc->spas);
682         dev_dbg(dev, "spa index: %d type: %s\n",
683                         spa->range_index,
684                         spa_type_name(nfit_spa_type(spa)));
685         return true;
686 }
687
688 static bool add_memdev(struct acpi_nfit_desc *acpi_desc,
689                 struct nfit_table_prev *prev,
690                 struct acpi_nfit_memory_map *memdev)
691 {
692         struct device *dev = acpi_desc->dev;
693         struct nfit_memdev *nfit_memdev;
694
695         if (memdev->header.length != sizeof(*memdev))
696                 return false;
697
698         list_for_each_entry(nfit_memdev, &prev->memdevs, list)
699                 if (memcmp(nfit_memdev->memdev, memdev, sizeof(*memdev)) == 0) {
700                         list_move_tail(&nfit_memdev->list, &acpi_desc->memdevs);
701                         return true;
702                 }
703
704         nfit_memdev = devm_kzalloc(dev, sizeof(*nfit_memdev) + sizeof(*memdev),
705                         GFP_KERNEL);
706         if (!nfit_memdev)
707                 return false;
708         INIT_LIST_HEAD(&nfit_memdev->list);
709         memcpy(nfit_memdev->memdev, memdev, sizeof(*memdev));
710         list_add_tail(&nfit_memdev->list, &acpi_desc->memdevs);
711         dev_dbg(dev, "memdev handle: %#x spa: %d dcr: %d flags: %#x\n",
712                         memdev->device_handle, memdev->range_index,
713                         memdev->region_index, memdev->flags);
714         return true;
715 }
716
717 int nfit_get_smbios_id(u32 device_handle, u16 *flags)
718 {
719         struct acpi_nfit_memory_map *memdev;
720         struct acpi_nfit_desc *acpi_desc;
721         struct nfit_mem *nfit_mem;
722
723         mutex_lock(&acpi_desc_lock);
724         list_for_each_entry(acpi_desc, &acpi_descs, list) {
725                 mutex_lock(&acpi_desc->init_mutex);
726                 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) {
727                         memdev = __to_nfit_memdev(nfit_mem);
728                         if (memdev->device_handle == device_handle) {
729                                 mutex_unlock(&acpi_desc->init_mutex);
730                                 mutex_unlock(&acpi_desc_lock);
731                                 *flags = memdev->flags;
732                                 return memdev->physical_id;
733                         }
734                 }
735                 mutex_unlock(&acpi_desc->init_mutex);
736         }
737         mutex_unlock(&acpi_desc_lock);
738
739         return -ENODEV;
740 }
741 EXPORT_SYMBOL_GPL(nfit_get_smbios_id);
742
743 /*
744  * An implementation may provide a truncated control region if no block windows
745  * are defined.
746  */
747 static size_t sizeof_dcr(struct acpi_nfit_control_region *dcr)
748 {
749         if (dcr->header.length < offsetof(struct acpi_nfit_control_region,
750                                 window_size))
751                 return 0;
752         if (dcr->windows)
753                 return sizeof(*dcr);
754         return offsetof(struct acpi_nfit_control_region, window_size);
755 }
756
757 static bool add_dcr(struct acpi_nfit_desc *acpi_desc,
758                 struct nfit_table_prev *prev,
759                 struct acpi_nfit_control_region *dcr)
760 {
761         struct device *dev = acpi_desc->dev;
762         struct nfit_dcr *nfit_dcr;
763
764         if (!sizeof_dcr(dcr))
765                 return false;
766
767         list_for_each_entry(nfit_dcr, &prev->dcrs, list)
768                 if (memcmp(nfit_dcr->dcr, dcr, sizeof_dcr(dcr)) == 0) {
769                         list_move_tail(&nfit_dcr->list, &acpi_desc->dcrs);
770                         return true;
771                 }
772
773         nfit_dcr = devm_kzalloc(dev, sizeof(*nfit_dcr) + sizeof(*dcr),
774                         GFP_KERNEL);
775         if (!nfit_dcr)
776                 return false;
777         INIT_LIST_HEAD(&nfit_dcr->list);
778         memcpy(nfit_dcr->dcr, dcr, sizeof_dcr(dcr));
779         list_add_tail(&nfit_dcr->list, &acpi_desc->dcrs);
780         dev_dbg(dev, "dcr index: %d windows: %d\n",
781                         dcr->region_index, dcr->windows);
782         return true;
783 }
784
785 static bool add_bdw(struct acpi_nfit_desc *acpi_desc,
786                 struct nfit_table_prev *prev,
787                 struct acpi_nfit_data_region *bdw)
788 {
789         struct device *dev = acpi_desc->dev;
790         struct nfit_bdw *nfit_bdw;
791
792         if (bdw->header.length != sizeof(*bdw))
793                 return false;
794         list_for_each_entry(nfit_bdw, &prev->bdws, list)
795                 if (memcmp(nfit_bdw->bdw, bdw, sizeof(*bdw)) == 0) {
796                         list_move_tail(&nfit_bdw->list, &acpi_desc->bdws);
797                         return true;
798                 }
799
800         nfit_bdw = devm_kzalloc(dev, sizeof(*nfit_bdw) + sizeof(*bdw),
801                         GFP_KERNEL);
802         if (!nfit_bdw)
803                 return false;
804         INIT_LIST_HEAD(&nfit_bdw->list);
805         memcpy(nfit_bdw->bdw, bdw, sizeof(*bdw));
806         list_add_tail(&nfit_bdw->list, &acpi_desc->bdws);
807         dev_dbg(dev, "bdw dcr: %d windows: %d\n",
808                         bdw->region_index, bdw->windows);
809         return true;
810 }
811
812 static size_t sizeof_idt(struct acpi_nfit_interleave *idt)
813 {
814         if (idt->header.length < sizeof(*idt))
815                 return 0;
816         return sizeof(*idt) + sizeof(u32) * (idt->line_count - 1);
817 }
818
819 static bool add_idt(struct acpi_nfit_desc *acpi_desc,
820                 struct nfit_table_prev *prev,
821                 struct acpi_nfit_interleave *idt)
822 {
823         struct device *dev = acpi_desc->dev;
824         struct nfit_idt *nfit_idt;
825
826         if (!sizeof_idt(idt))
827                 return false;
828
829         list_for_each_entry(nfit_idt, &prev->idts, list) {
830                 if (sizeof_idt(nfit_idt->idt) != sizeof_idt(idt))
831                         continue;
832
833                 if (memcmp(nfit_idt->idt, idt, sizeof_idt(idt)) == 0) {
834                         list_move_tail(&nfit_idt->list, &acpi_desc->idts);
835                         return true;
836                 }
837         }
838
839         nfit_idt = devm_kzalloc(dev, sizeof(*nfit_idt) + sizeof_idt(idt),
840                         GFP_KERNEL);
841         if (!nfit_idt)
842                 return false;
843         INIT_LIST_HEAD(&nfit_idt->list);
844         memcpy(nfit_idt->idt, idt, sizeof_idt(idt));
845         list_add_tail(&nfit_idt->list, &acpi_desc->idts);
846         dev_dbg(dev, "idt index: %d num_lines: %d\n",
847                         idt->interleave_index, idt->line_count);
848         return true;
849 }
850
851 static size_t sizeof_flush(struct acpi_nfit_flush_address *flush)
852 {
853         if (flush->header.length < sizeof(*flush))
854                 return 0;
855         return sizeof(*flush) + sizeof(u64) * (flush->hint_count - 1);
856 }
857
858 static bool add_flush(struct acpi_nfit_desc *acpi_desc,
859                 struct nfit_table_prev *prev,
860                 struct acpi_nfit_flush_address *flush)
861 {
862         struct device *dev = acpi_desc->dev;
863         struct nfit_flush *nfit_flush;
864
865         if (!sizeof_flush(flush))
866                 return false;
867
868         list_for_each_entry(nfit_flush, &prev->flushes, list) {
869                 if (sizeof_flush(nfit_flush->flush) != sizeof_flush(flush))
870                         continue;
871
872                 if (memcmp(nfit_flush->flush, flush,
873                                         sizeof_flush(flush)) == 0) {
874                         list_move_tail(&nfit_flush->list, &acpi_desc->flushes);
875                         return true;
876                 }
877         }
878
879         nfit_flush = devm_kzalloc(dev, sizeof(*nfit_flush)
880                         + sizeof_flush(flush), GFP_KERNEL);
881         if (!nfit_flush)
882                 return false;
883         INIT_LIST_HEAD(&nfit_flush->list);
884         memcpy(nfit_flush->flush, flush, sizeof_flush(flush));
885         list_add_tail(&nfit_flush->list, &acpi_desc->flushes);
886         dev_dbg(dev, "nfit_flush handle: %d hint_count: %d\n",
887                         flush->device_handle, flush->hint_count);
888         return true;
889 }
890
891 static bool add_platform_cap(struct acpi_nfit_desc *acpi_desc,
892                 struct acpi_nfit_capabilities *pcap)
893 {
894         struct device *dev = acpi_desc->dev;
895         u32 mask;
896
897         mask = (1 << (pcap->highest_capability + 1)) - 1;
898         acpi_desc->platform_cap = pcap->capabilities & mask;
899         dev_dbg(dev, "cap: %#x\n", acpi_desc->platform_cap);
900         return true;
901 }
902
903 static void *add_table(struct acpi_nfit_desc *acpi_desc,
904                 struct nfit_table_prev *prev, void *table, const void *end)
905 {
906         struct device *dev = acpi_desc->dev;
907         struct acpi_nfit_header *hdr;
908         void *err = ERR_PTR(-ENOMEM);
909
910         if (table >= end)
911                 return NULL;
912
913         hdr = table;
914         if (!hdr->length) {
915                 dev_warn(dev, "found a zero length table '%d' parsing nfit\n",
916                         hdr->type);
917                 return NULL;
918         }
919
920         switch (hdr->type) {
921         case ACPI_NFIT_TYPE_SYSTEM_ADDRESS:
922                 if (!add_spa(acpi_desc, prev, table))
923                         return err;
924                 break;
925         case ACPI_NFIT_TYPE_MEMORY_MAP:
926                 if (!add_memdev(acpi_desc, prev, table))
927                         return err;
928                 break;
929         case ACPI_NFIT_TYPE_CONTROL_REGION:
930                 if (!add_dcr(acpi_desc, prev, table))
931                         return err;
932                 break;
933         case ACPI_NFIT_TYPE_DATA_REGION:
934                 if (!add_bdw(acpi_desc, prev, table))
935                         return err;
936                 break;
937         case ACPI_NFIT_TYPE_INTERLEAVE:
938                 if (!add_idt(acpi_desc, prev, table))
939                         return err;
940                 break;
941         case ACPI_NFIT_TYPE_FLUSH_ADDRESS:
942                 if (!add_flush(acpi_desc, prev, table))
943                         return err;
944                 break;
945         case ACPI_NFIT_TYPE_SMBIOS:
946                 dev_dbg(dev, "smbios\n");
947                 break;
948         case ACPI_NFIT_TYPE_CAPABILITIES:
949                 if (!add_platform_cap(acpi_desc, table))
950                         return err;
951                 break;
952         default:
953                 dev_err(dev, "unknown table '%d' parsing nfit\n", hdr->type);
954                 break;
955         }
956
957         return table + hdr->length;
958 }
959
960 static void nfit_mem_find_spa_bdw(struct acpi_nfit_desc *acpi_desc,
961                 struct nfit_mem *nfit_mem)
962 {
963         u32 device_handle = __to_nfit_memdev(nfit_mem)->device_handle;
964         u16 dcr = nfit_mem->dcr->region_index;
965         struct nfit_spa *nfit_spa;
966
967         list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
968                 u16 range_index = nfit_spa->spa->range_index;
969                 int type = nfit_spa_type(nfit_spa->spa);
970                 struct nfit_memdev *nfit_memdev;
971
972                 if (type != NFIT_SPA_BDW)
973                         continue;
974
975                 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
976                         if (nfit_memdev->memdev->range_index != range_index)
977                                 continue;
978                         if (nfit_memdev->memdev->device_handle != device_handle)
979                                 continue;
980                         if (nfit_memdev->memdev->region_index != dcr)
981                                 continue;
982
983                         nfit_mem->spa_bdw = nfit_spa->spa;
984                         return;
985                 }
986         }
987
988         dev_dbg(acpi_desc->dev, "SPA-BDW not found for SPA-DCR %d\n",
989                         nfit_mem->spa_dcr->range_index);
990         nfit_mem->bdw = NULL;
991 }
992
993 static void nfit_mem_init_bdw(struct acpi_nfit_desc *acpi_desc,
994                 struct nfit_mem *nfit_mem, struct acpi_nfit_system_address *spa)
995 {
996         u16 dcr = __to_nfit_memdev(nfit_mem)->region_index;
997         struct nfit_memdev *nfit_memdev;
998         struct nfit_bdw *nfit_bdw;
999         struct nfit_idt *nfit_idt;
1000         u16 idt_idx, range_index;
1001
1002         list_for_each_entry(nfit_bdw, &acpi_desc->bdws, list) {
1003                 if (nfit_bdw->bdw->region_index != dcr)
1004                         continue;
1005                 nfit_mem->bdw = nfit_bdw->bdw;
1006                 break;
1007         }
1008
1009         if (!nfit_mem->bdw)
1010                 return;
1011
1012         nfit_mem_find_spa_bdw(acpi_desc, nfit_mem);
1013
1014         if (!nfit_mem->spa_bdw)
1015                 return;
1016
1017         range_index = nfit_mem->spa_bdw->range_index;
1018         list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
1019                 if (nfit_memdev->memdev->range_index != range_index ||
1020                                 nfit_memdev->memdev->region_index != dcr)
1021                         continue;
1022                 nfit_mem->memdev_bdw = nfit_memdev->memdev;
1023                 idt_idx = nfit_memdev->memdev->interleave_index;
1024                 list_for_each_entry(nfit_idt, &acpi_desc->idts, list) {
1025                         if (nfit_idt->idt->interleave_index != idt_idx)
1026                                 continue;
1027                         nfit_mem->idt_bdw = nfit_idt->idt;
1028                         break;
1029                 }
1030                 break;
1031         }
1032 }
1033
1034 static int __nfit_mem_init(struct acpi_nfit_desc *acpi_desc,
1035                 struct acpi_nfit_system_address *spa)
1036 {
1037         struct nfit_mem *nfit_mem, *found;
1038         struct nfit_memdev *nfit_memdev;
1039         int type = spa ? nfit_spa_type(spa) : 0;
1040
1041         switch (type) {
1042         case NFIT_SPA_DCR:
1043         case NFIT_SPA_PM:
1044                 break;
1045         default:
1046                 if (spa)
1047                         return 0;
1048         }
1049
1050         /*
1051          * This loop runs in two modes, when a dimm is mapped the loop
1052          * adds memdev associations to an existing dimm, or creates a
1053          * dimm. In the unmapped dimm case this loop sweeps for memdev
1054          * instances with an invalid / zero range_index and adds those
1055          * dimms without spa associations.
1056          */
1057         list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
1058                 struct nfit_flush *nfit_flush;
1059                 struct nfit_dcr *nfit_dcr;
1060                 u32 device_handle;
1061                 u16 dcr;
1062
1063                 if (spa && nfit_memdev->memdev->range_index != spa->range_index)
1064                         continue;
1065                 if (!spa && nfit_memdev->memdev->range_index)
1066                         continue;
1067                 found = NULL;
1068                 dcr = nfit_memdev->memdev->region_index;
1069                 device_handle = nfit_memdev->memdev->device_handle;
1070                 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list)
1071                         if (__to_nfit_memdev(nfit_mem)->device_handle
1072                                         == device_handle) {
1073                                 found = nfit_mem;
1074                                 break;
1075                         }
1076
1077                 if (found)
1078                         nfit_mem = found;
1079                 else {
1080                         nfit_mem = devm_kzalloc(acpi_desc->dev,
1081                                         sizeof(*nfit_mem), GFP_KERNEL);
1082                         if (!nfit_mem)
1083                                 return -ENOMEM;
1084                         INIT_LIST_HEAD(&nfit_mem->list);
1085                         nfit_mem->acpi_desc = acpi_desc;
1086                         list_add(&nfit_mem->list, &acpi_desc->dimms);
1087                 }
1088
1089                 list_for_each_entry(nfit_dcr, &acpi_desc->dcrs, list) {
1090                         if (nfit_dcr->dcr->region_index != dcr)
1091                                 continue;
1092                         /*
1093                          * Record the control region for the dimm.  For
1094                          * the ACPI 6.1 case, where there are separate
1095                          * control regions for the pmem vs blk
1096                          * interfaces, be sure to record the extended
1097                          * blk details.
1098                          */
1099                         if (!nfit_mem->dcr)
1100                                 nfit_mem->dcr = nfit_dcr->dcr;
1101                         else if (nfit_mem->dcr->windows == 0
1102                                         && nfit_dcr->dcr->windows)
1103                                 nfit_mem->dcr = nfit_dcr->dcr;
1104                         break;
1105                 }
1106
1107                 list_for_each_entry(nfit_flush, &acpi_desc->flushes, list) {
1108                         struct acpi_nfit_flush_address *flush;
1109                         u16 i;
1110
1111                         if (nfit_flush->flush->device_handle != device_handle)
1112                                 continue;
1113                         nfit_mem->nfit_flush = nfit_flush;
1114                         flush = nfit_flush->flush;
1115                         nfit_mem->flush_wpq = devm_kcalloc(acpi_desc->dev,
1116                                         flush->hint_count,
1117                                         sizeof(struct resource),
1118                                         GFP_KERNEL);
1119                         if (!nfit_mem->flush_wpq)
1120                                 return -ENOMEM;
1121                         for (i = 0; i < flush->hint_count; i++) {
1122                                 struct resource *res = &nfit_mem->flush_wpq[i];
1123
1124                                 res->start = flush->hint_address[i];
1125                                 res->end = res->start + 8 - 1;
1126                         }
1127                         break;
1128                 }
1129
1130                 if (dcr && !nfit_mem->dcr) {
1131                         dev_err(acpi_desc->dev, "SPA %d missing DCR %d\n",
1132                                         spa->range_index, dcr);
1133                         return -ENODEV;
1134                 }
1135
1136                 if (type == NFIT_SPA_DCR) {
1137                         struct nfit_idt *nfit_idt;
1138                         u16 idt_idx;
1139
1140                         /* multiple dimms may share a SPA when interleaved */
1141                         nfit_mem->spa_dcr = spa;
1142                         nfit_mem->memdev_dcr = nfit_memdev->memdev;
1143                         idt_idx = nfit_memdev->memdev->interleave_index;
1144                         list_for_each_entry(nfit_idt, &acpi_desc->idts, list) {
1145                                 if (nfit_idt->idt->interleave_index != idt_idx)
1146                                         continue;
1147                                 nfit_mem->idt_dcr = nfit_idt->idt;
1148                                 break;
1149                         }
1150                         nfit_mem_init_bdw(acpi_desc, nfit_mem, spa);
1151                 } else if (type == NFIT_SPA_PM) {
1152                         /*
1153                          * A single dimm may belong to multiple SPA-PM
1154                          * ranges, record at least one in addition to
1155                          * any SPA-DCR range.
1156                          */
1157                         nfit_mem->memdev_pmem = nfit_memdev->memdev;
1158                 } else
1159                         nfit_mem->memdev_dcr = nfit_memdev->memdev;
1160         }
1161
1162         return 0;
1163 }
1164
1165 static int nfit_mem_cmp(void *priv, struct list_head *_a, struct list_head *_b)
1166 {
1167         struct nfit_mem *a = container_of(_a, typeof(*a), list);
1168         struct nfit_mem *b = container_of(_b, typeof(*b), list);
1169         u32 handleA, handleB;
1170
1171         handleA = __to_nfit_memdev(a)->device_handle;
1172         handleB = __to_nfit_memdev(b)->device_handle;
1173         if (handleA < handleB)
1174                 return -1;
1175         else if (handleA > handleB)
1176                 return 1;
1177         return 0;
1178 }
1179
1180 static int nfit_mem_init(struct acpi_nfit_desc *acpi_desc)
1181 {
1182         struct nfit_spa *nfit_spa;
1183         int rc;
1184
1185
1186         /*
1187          * For each SPA-DCR or SPA-PMEM address range find its
1188          * corresponding MEMDEV(s).  From each MEMDEV find the
1189          * corresponding DCR.  Then, if we're operating on a SPA-DCR,
1190          * try to find a SPA-BDW and a corresponding BDW that references
1191          * the DCR.  Throw it all into an nfit_mem object.  Note, that
1192          * BDWs are optional.
1193          */
1194         list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
1195                 rc = __nfit_mem_init(acpi_desc, nfit_spa->spa);
1196                 if (rc)
1197                         return rc;
1198         }
1199
1200         /*
1201          * If a DIMM has failed to be mapped into SPA there will be no
1202          * SPA entries above. Find and register all the unmapped DIMMs
1203          * for reporting and recovery purposes.
1204          */
1205         rc = __nfit_mem_init(acpi_desc, NULL);
1206         if (rc)
1207                 return rc;
1208
1209         list_sort(NULL, &acpi_desc->dimms, nfit_mem_cmp);
1210
1211         return 0;
1212 }
1213
1214 static ssize_t bus_dsm_mask_show(struct device *dev,
1215                 struct device_attribute *attr, char *buf)
1216 {
1217         struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
1218         struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
1219
1220         return sprintf(buf, "%#lx\n", nd_desc->bus_dsm_mask);
1221 }
1222 static struct device_attribute dev_attr_bus_dsm_mask =
1223                 __ATTR(dsm_mask, 0444, bus_dsm_mask_show, NULL);
1224
1225 static ssize_t revision_show(struct device *dev,
1226                 struct device_attribute *attr, char *buf)
1227 {
1228         struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
1229         struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
1230         struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
1231
1232         return sprintf(buf, "%d\n", acpi_desc->acpi_header.revision);
1233 }
1234 static DEVICE_ATTR_RO(revision);
1235
1236 static ssize_t hw_error_scrub_show(struct device *dev,
1237                 struct device_attribute *attr, char *buf)
1238 {
1239         struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
1240         struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
1241         struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
1242
1243         return sprintf(buf, "%d\n", acpi_desc->scrub_mode);
1244 }
1245
1246 /*
1247  * The 'hw_error_scrub' attribute can have the following values written to it:
1248  * '0': Switch to the default mode where an exception will only insert
1249  *      the address of the memory error into the poison and badblocks lists.
1250  * '1': Enable a full scrub to happen if an exception for a memory error is
1251  *      received.
1252  */
1253 static ssize_t hw_error_scrub_store(struct device *dev,
1254                 struct device_attribute *attr, const char *buf, size_t size)
1255 {
1256         struct nvdimm_bus_descriptor *nd_desc;
1257         ssize_t rc;
1258         long val;
1259
1260         rc = kstrtol(buf, 0, &val);
1261         if (rc)
1262                 return rc;
1263
1264         device_lock(dev);
1265         nd_desc = dev_get_drvdata(dev);
1266         if (nd_desc) {
1267                 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
1268
1269                 switch (val) {
1270                 case HW_ERROR_SCRUB_ON:
1271                         acpi_desc->scrub_mode = HW_ERROR_SCRUB_ON;
1272                         break;
1273                 case HW_ERROR_SCRUB_OFF:
1274                         acpi_desc->scrub_mode = HW_ERROR_SCRUB_OFF;
1275                         break;
1276                 default:
1277                         rc = -EINVAL;
1278                         break;
1279                 }
1280         }
1281         device_unlock(dev);
1282         if (rc)
1283                 return rc;
1284         return size;
1285 }
1286 static DEVICE_ATTR_RW(hw_error_scrub);
1287
1288 /*
1289  * This shows the number of full Address Range Scrubs that have been
1290  * completed since driver load time. Userspace can wait on this using
1291  * select/poll etc. A '+' at the end indicates an ARS is in progress
1292  */
1293 static ssize_t scrub_show(struct device *dev,
1294                 struct device_attribute *attr, char *buf)
1295 {
1296         struct nvdimm_bus_descriptor *nd_desc;
1297         ssize_t rc = -ENXIO;
1298
1299         device_lock(dev);
1300         nd_desc = dev_get_drvdata(dev);
1301         if (nd_desc) {
1302                 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
1303
1304                 mutex_lock(&acpi_desc->init_mutex);
1305                 rc = sprintf(buf, "%d%s", acpi_desc->scrub_count,
1306                                 acpi_desc->scrub_busy
1307                                 && !acpi_desc->cancel ? "+\n" : "\n");
1308                 mutex_unlock(&acpi_desc->init_mutex);
1309         }
1310         device_unlock(dev);
1311         return rc;
1312 }
1313
1314 static ssize_t scrub_store(struct device *dev,
1315                 struct device_attribute *attr, const char *buf, size_t size)
1316 {
1317         struct nvdimm_bus_descriptor *nd_desc;
1318         ssize_t rc;
1319         long val;
1320
1321         rc = kstrtol(buf, 0, &val);
1322         if (rc)
1323                 return rc;
1324         if (val != 1)
1325                 return -EINVAL;
1326
1327         device_lock(dev);
1328         nd_desc = dev_get_drvdata(dev);
1329         if (nd_desc) {
1330                 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
1331
1332                 rc = acpi_nfit_ars_rescan(acpi_desc, 0);
1333         }
1334         device_unlock(dev);
1335         if (rc)
1336                 return rc;
1337         return size;
1338 }
1339 static DEVICE_ATTR_RW(scrub);
1340
1341 static bool ars_supported(struct nvdimm_bus *nvdimm_bus)
1342 {
1343         struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
1344         const unsigned long mask = 1 << ND_CMD_ARS_CAP | 1 << ND_CMD_ARS_START
1345                 | 1 << ND_CMD_ARS_STATUS;
1346
1347         return (nd_desc->cmd_mask & mask) == mask;
1348 }
1349
1350 static umode_t nfit_visible(struct kobject *kobj, struct attribute *a, int n)
1351 {
1352         struct device *dev = container_of(kobj, struct device, kobj);
1353         struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
1354
1355         if (a == &dev_attr_scrub.attr && !ars_supported(nvdimm_bus))
1356                 return 0;
1357         return a->mode;
1358 }
1359
1360 static struct attribute *acpi_nfit_attributes[] = {
1361         &dev_attr_revision.attr,
1362         &dev_attr_scrub.attr,
1363         &dev_attr_hw_error_scrub.attr,
1364         &dev_attr_bus_dsm_mask.attr,
1365         NULL,
1366 };
1367
1368 static const struct attribute_group acpi_nfit_attribute_group = {
1369         .name = "nfit",
1370         .attrs = acpi_nfit_attributes,
1371         .is_visible = nfit_visible,
1372 };
1373
1374 static const struct attribute_group *acpi_nfit_attribute_groups[] = {
1375         &nvdimm_bus_attribute_group,
1376         &acpi_nfit_attribute_group,
1377         NULL,
1378 };
1379
1380 static struct acpi_nfit_memory_map *to_nfit_memdev(struct device *dev)
1381 {
1382         struct nvdimm *nvdimm = to_nvdimm(dev);
1383         struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
1384
1385         return __to_nfit_memdev(nfit_mem);
1386 }
1387
1388 static struct acpi_nfit_control_region *to_nfit_dcr(struct device *dev)
1389 {
1390         struct nvdimm *nvdimm = to_nvdimm(dev);
1391         struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
1392
1393         return nfit_mem->dcr;
1394 }
1395
1396 static ssize_t handle_show(struct device *dev,
1397                 struct device_attribute *attr, char *buf)
1398 {
1399         struct acpi_nfit_memory_map *memdev = to_nfit_memdev(dev);
1400
1401         return sprintf(buf, "%#x\n", memdev->device_handle);
1402 }
1403 static DEVICE_ATTR_RO(handle);
1404
1405 static ssize_t phys_id_show(struct device *dev,
1406                 struct device_attribute *attr, char *buf)
1407 {
1408         struct acpi_nfit_memory_map *memdev = to_nfit_memdev(dev);
1409
1410         return sprintf(buf, "%#x\n", memdev->physical_id);
1411 }
1412 static DEVICE_ATTR_RO(phys_id);
1413
1414 static ssize_t vendor_show(struct device *dev,
1415                 struct device_attribute *attr, char *buf)
1416 {
1417         struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
1418
1419         return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->vendor_id));
1420 }
1421 static DEVICE_ATTR_RO(vendor);
1422
1423 static ssize_t rev_id_show(struct device *dev,
1424                 struct device_attribute *attr, char *buf)
1425 {
1426         struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
1427
1428         return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->revision_id));
1429 }
1430 static DEVICE_ATTR_RO(rev_id);
1431
1432 static ssize_t device_show(struct device *dev,
1433                 struct device_attribute *attr, char *buf)
1434 {
1435         struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
1436
1437         return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->device_id));
1438 }
1439 static DEVICE_ATTR_RO(device);
1440
1441 static ssize_t subsystem_vendor_show(struct device *dev,
1442                 struct device_attribute *attr, char *buf)
1443 {
1444         struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
1445
1446         return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->subsystem_vendor_id));
1447 }
1448 static DEVICE_ATTR_RO(subsystem_vendor);
1449
1450 static ssize_t subsystem_rev_id_show(struct device *dev,
1451                 struct device_attribute *attr, char *buf)
1452 {
1453         struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
1454
1455         return sprintf(buf, "0x%04x\n",
1456                         be16_to_cpu(dcr->subsystem_revision_id));
1457 }
1458 static DEVICE_ATTR_RO(subsystem_rev_id);
1459
1460 static ssize_t subsystem_device_show(struct device *dev,
1461                 struct device_attribute *attr, char *buf)
1462 {
1463         struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
1464
1465         return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->subsystem_device_id));
1466 }
1467 static DEVICE_ATTR_RO(subsystem_device);
1468
1469 static int num_nvdimm_formats(struct nvdimm *nvdimm)
1470 {
1471         struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
1472         int formats = 0;
1473
1474         if (nfit_mem->memdev_pmem)
1475                 formats++;
1476         if (nfit_mem->memdev_bdw)
1477                 formats++;
1478         return formats;
1479 }
1480
1481 static ssize_t format_show(struct device *dev,
1482                 struct device_attribute *attr, char *buf)
1483 {
1484         struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
1485
1486         return sprintf(buf, "0x%04x\n", le16_to_cpu(dcr->code));
1487 }
1488 static DEVICE_ATTR_RO(format);
1489
1490 static ssize_t format1_show(struct device *dev,
1491                 struct device_attribute *attr, char *buf)
1492 {
1493         u32 handle;
1494         ssize_t rc = -ENXIO;
1495         struct nfit_mem *nfit_mem;
1496         struct nfit_memdev *nfit_memdev;
1497         struct acpi_nfit_desc *acpi_desc;
1498         struct nvdimm *nvdimm = to_nvdimm(dev);
1499         struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
1500
1501         nfit_mem = nvdimm_provider_data(nvdimm);
1502         acpi_desc = nfit_mem->acpi_desc;
1503         handle = to_nfit_memdev(dev)->device_handle;
1504
1505         /* assumes DIMMs have at most 2 published interface codes */
1506         mutex_lock(&acpi_desc->init_mutex);
1507         list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
1508                 struct acpi_nfit_memory_map *memdev = nfit_memdev->memdev;
1509                 struct nfit_dcr *nfit_dcr;
1510
1511                 if (memdev->device_handle != handle)
1512                         continue;
1513
1514                 list_for_each_entry(nfit_dcr, &acpi_desc->dcrs, list) {
1515                         if (nfit_dcr->dcr->region_index != memdev->region_index)
1516                                 continue;
1517                         if (nfit_dcr->dcr->code == dcr->code)
1518                                 continue;
1519                         rc = sprintf(buf, "0x%04x\n",
1520                                         le16_to_cpu(nfit_dcr->dcr->code));
1521                         break;
1522                 }
1523                 if (rc != ENXIO)
1524                         break;
1525         }
1526         mutex_unlock(&acpi_desc->init_mutex);
1527         return rc;
1528 }
1529 static DEVICE_ATTR_RO(format1);
1530
1531 static ssize_t formats_show(struct device *dev,
1532                 struct device_attribute *attr, char *buf)
1533 {
1534         struct nvdimm *nvdimm = to_nvdimm(dev);
1535
1536         return sprintf(buf, "%d\n", num_nvdimm_formats(nvdimm));
1537 }
1538 static DEVICE_ATTR_RO(formats);
1539
1540 static ssize_t serial_show(struct device *dev,
1541                 struct device_attribute *attr, char *buf)
1542 {
1543         struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
1544
1545         return sprintf(buf, "0x%08x\n", be32_to_cpu(dcr->serial_number));
1546 }
1547 static DEVICE_ATTR_RO(serial);
1548
1549 static ssize_t family_show(struct device *dev,
1550                 struct device_attribute *attr, char *buf)
1551 {
1552         struct nvdimm *nvdimm = to_nvdimm(dev);
1553         struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
1554
1555         if (nfit_mem->family < 0)
1556                 return -ENXIO;
1557         return sprintf(buf, "%d\n", nfit_mem->family);
1558 }
1559 static DEVICE_ATTR_RO(family);
1560
1561 static ssize_t dsm_mask_show(struct device *dev,
1562                 struct device_attribute *attr, char *buf)
1563 {
1564         struct nvdimm *nvdimm = to_nvdimm(dev);
1565         struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
1566
1567         if (nfit_mem->family < 0)
1568                 return -ENXIO;
1569         return sprintf(buf, "%#lx\n", nfit_mem->dsm_mask);
1570 }
1571 static DEVICE_ATTR_RO(dsm_mask);
1572
1573 static ssize_t flags_show(struct device *dev,
1574                 struct device_attribute *attr, char *buf)
1575 {
1576         struct nvdimm *nvdimm = to_nvdimm(dev);
1577         struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
1578         u16 flags = __to_nfit_memdev(nfit_mem)->flags;
1579
1580         if (test_bit(NFIT_MEM_DIRTY, &nfit_mem->flags))
1581                 flags |= ACPI_NFIT_MEM_FLUSH_FAILED;
1582
1583         return sprintf(buf, "%s%s%s%s%s%s%s\n",
1584                 flags & ACPI_NFIT_MEM_SAVE_FAILED ? "save_fail " : "",
1585                 flags & ACPI_NFIT_MEM_RESTORE_FAILED ? "restore_fail " : "",
1586                 flags & ACPI_NFIT_MEM_FLUSH_FAILED ? "flush_fail " : "",
1587                 flags & ACPI_NFIT_MEM_NOT_ARMED ? "not_armed " : "",
1588                 flags & ACPI_NFIT_MEM_HEALTH_OBSERVED ? "smart_event " : "",
1589                 flags & ACPI_NFIT_MEM_MAP_FAILED ? "map_fail " : "",
1590                 flags & ACPI_NFIT_MEM_HEALTH_ENABLED ? "smart_notify " : "");
1591 }
1592 static DEVICE_ATTR_RO(flags);
1593
1594 static ssize_t id_show(struct device *dev,
1595                 struct device_attribute *attr, char *buf)
1596 {
1597         struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
1598
1599         if (dcr->valid_fields & ACPI_NFIT_CONTROL_MFG_INFO_VALID)
1600                 return sprintf(buf, "%04x-%02x-%04x-%08x\n",
1601                                 be16_to_cpu(dcr->vendor_id),
1602                                 dcr->manufacturing_location,
1603                                 be16_to_cpu(dcr->manufacturing_date),
1604                                 be32_to_cpu(dcr->serial_number));
1605         else
1606                 return sprintf(buf, "%04x-%08x\n",
1607                                 be16_to_cpu(dcr->vendor_id),
1608                                 be32_to_cpu(dcr->serial_number));
1609 }
1610 static DEVICE_ATTR_RO(id);
1611
1612 static ssize_t dirty_shutdown_show(struct device *dev,
1613                 struct device_attribute *attr, char *buf)
1614 {
1615         struct nvdimm *nvdimm = to_nvdimm(dev);
1616         struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
1617
1618         return sprintf(buf, "%d\n", nfit_mem->dirty_shutdown);
1619 }
1620 static DEVICE_ATTR_RO(dirty_shutdown);
1621
1622 static struct attribute *acpi_nfit_dimm_attributes[] = {
1623         &dev_attr_handle.attr,
1624         &dev_attr_phys_id.attr,
1625         &dev_attr_vendor.attr,
1626         &dev_attr_device.attr,
1627         &dev_attr_rev_id.attr,
1628         &dev_attr_subsystem_vendor.attr,
1629         &dev_attr_subsystem_device.attr,
1630         &dev_attr_subsystem_rev_id.attr,
1631         &dev_attr_format.attr,
1632         &dev_attr_formats.attr,
1633         &dev_attr_format1.attr,
1634         &dev_attr_serial.attr,
1635         &dev_attr_flags.attr,
1636         &dev_attr_id.attr,
1637         &dev_attr_family.attr,
1638         &dev_attr_dsm_mask.attr,
1639         &dev_attr_dirty_shutdown.attr,
1640         NULL,
1641 };
1642
1643 static umode_t acpi_nfit_dimm_attr_visible(struct kobject *kobj,
1644                 struct attribute *a, int n)
1645 {
1646         struct device *dev = container_of(kobj, struct device, kobj);
1647         struct nvdimm *nvdimm = to_nvdimm(dev);
1648         struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
1649
1650         if (!to_nfit_dcr(dev)) {
1651                 /* Without a dcr only the memdev attributes can be surfaced */
1652                 if (a == &dev_attr_handle.attr || a == &dev_attr_phys_id.attr
1653                                 || a == &dev_attr_flags.attr
1654                                 || a == &dev_attr_family.attr
1655                                 || a == &dev_attr_dsm_mask.attr)
1656                         return a->mode;
1657                 return 0;
1658         }
1659
1660         if (a == &dev_attr_format1.attr && num_nvdimm_formats(nvdimm) <= 1)
1661                 return 0;
1662
1663         if (!test_bit(NFIT_MEM_DIRTY_COUNT, &nfit_mem->flags)
1664                         && a == &dev_attr_dirty_shutdown.attr)
1665                 return 0;
1666
1667         return a->mode;
1668 }
1669
1670 static const struct attribute_group acpi_nfit_dimm_attribute_group = {
1671         .name = "nfit",
1672         .attrs = acpi_nfit_dimm_attributes,
1673         .is_visible = acpi_nfit_dimm_attr_visible,
1674 };
1675
1676 static const struct attribute_group *acpi_nfit_dimm_attribute_groups[] = {
1677         &nvdimm_attribute_group,
1678         &nd_device_attribute_group,
1679         &acpi_nfit_dimm_attribute_group,
1680         NULL,
1681 };
1682
1683 static struct nvdimm *acpi_nfit_dimm_by_handle(struct acpi_nfit_desc *acpi_desc,
1684                 u32 device_handle)
1685 {
1686         struct nfit_mem *nfit_mem;
1687
1688         list_for_each_entry(nfit_mem, &acpi_desc->dimms, list)
1689                 if (__to_nfit_memdev(nfit_mem)->device_handle == device_handle)
1690                         return nfit_mem->nvdimm;
1691
1692         return NULL;
1693 }
1694
1695 void __acpi_nvdimm_notify(struct device *dev, u32 event)
1696 {
1697         struct nfit_mem *nfit_mem;
1698         struct acpi_nfit_desc *acpi_desc;
1699
1700         dev_dbg(dev->parent, "%s: event: %d\n", dev_name(dev),
1701                         event);
1702
1703         if (event != NFIT_NOTIFY_DIMM_HEALTH) {
1704                 dev_dbg(dev->parent, "%s: unknown event: %d\n", dev_name(dev),
1705                                 event);
1706                 return;
1707         }
1708
1709         acpi_desc = dev_get_drvdata(dev->parent);
1710         if (!acpi_desc)
1711                 return;
1712
1713         /*
1714          * If we successfully retrieved acpi_desc, then we know nfit_mem data
1715          * is still valid.
1716          */
1717         nfit_mem = dev_get_drvdata(dev);
1718         if (nfit_mem && nfit_mem->flags_attr)
1719                 sysfs_notify_dirent(nfit_mem->flags_attr);
1720 }
1721 EXPORT_SYMBOL_GPL(__acpi_nvdimm_notify);
1722
1723 static void acpi_nvdimm_notify(acpi_handle handle, u32 event, void *data)
1724 {
1725         struct acpi_device *adev = data;
1726         struct device *dev = &adev->dev;
1727
1728         device_lock(dev->parent);
1729         __acpi_nvdimm_notify(dev, event);
1730         device_unlock(dev->parent);
1731 }
1732
1733 static bool acpi_nvdimm_has_method(struct acpi_device *adev, char *method)
1734 {
1735         acpi_handle handle;
1736         acpi_status status;
1737
1738         status = acpi_get_handle(adev->handle, method, &handle);
1739
1740         if (ACPI_SUCCESS(status))
1741                 return true;
1742         return false;
1743 }
1744
1745 __weak void nfit_intel_shutdown_status(struct nfit_mem *nfit_mem)
1746 {
1747         struct nd_intel_smart smart = { 0 };
1748         union acpi_object in_buf = {
1749                 .type = ACPI_TYPE_BUFFER,
1750                 .buffer.pointer = (char *) &smart,
1751                 .buffer.length = sizeof(smart),
1752         };
1753         union acpi_object in_obj = {
1754                 .type = ACPI_TYPE_PACKAGE,
1755                 .package.count = 1,
1756                 .package.elements = &in_buf,
1757         };
1758         const u8 func = ND_INTEL_SMART;
1759         const guid_t *guid = to_nfit_uuid(nfit_mem->family);
1760         u8 revid = nfit_dsm_revid(nfit_mem->family, func);
1761         struct acpi_device *adev = nfit_mem->adev;
1762         acpi_handle handle = adev->handle;
1763         union acpi_object *out_obj;
1764
1765         if ((nfit_mem->dsm_mask & (1 << func)) == 0)
1766                 return;
1767
1768         out_obj = acpi_evaluate_dsm(handle, guid, revid, func, &in_obj);
1769         if (!out_obj)
1770                 return;
1771
1772         if (smart.flags & ND_INTEL_SMART_SHUTDOWN_VALID) {
1773                 if (smart.shutdown_state)
1774                         set_bit(NFIT_MEM_DIRTY, &nfit_mem->flags);
1775         }
1776
1777         if (smart.flags & ND_INTEL_SMART_SHUTDOWN_COUNT_VALID) {
1778                 set_bit(NFIT_MEM_DIRTY_COUNT, &nfit_mem->flags);
1779                 nfit_mem->dirty_shutdown = smart.shutdown_count;
1780         }
1781         ACPI_FREE(out_obj);
1782 }
1783
1784 static void populate_shutdown_status(struct nfit_mem *nfit_mem)
1785 {
1786         /*
1787          * For DIMMs that provide a dynamic facility to retrieve a
1788          * dirty-shutdown status and/or a dirty-shutdown count, cache
1789          * these values in nfit_mem.
1790          */
1791         if (nfit_mem->family == NVDIMM_FAMILY_INTEL)
1792                 nfit_intel_shutdown_status(nfit_mem);
1793 }
1794
1795 static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc,
1796                 struct nfit_mem *nfit_mem, u32 device_handle)
1797 {
1798         struct acpi_device *adev, *adev_dimm;
1799         struct device *dev = acpi_desc->dev;
1800         unsigned long dsm_mask, label_mask;
1801         const guid_t *guid;
1802         int i;
1803         int family = -1;
1804
1805         /* nfit test assumes 1:1 relationship between commands and dsms */
1806         nfit_mem->dsm_mask = acpi_desc->dimm_cmd_force_en;
1807         nfit_mem->family = NVDIMM_FAMILY_INTEL;
1808         adev = to_acpi_dev(acpi_desc);
1809         if (!adev) {
1810                 /* unit test case */
1811                 populate_shutdown_status(nfit_mem);
1812                 return 0;
1813         }
1814
1815         adev_dimm = acpi_find_child_device(adev, device_handle, false);
1816         nfit_mem->adev = adev_dimm;
1817         if (!adev_dimm) {
1818                 dev_err(dev, "no ACPI.NFIT device with _ADR %#x, disabling...\n",
1819                                 device_handle);
1820                 return force_enable_dimms ? 0 : -ENODEV;
1821         }
1822
1823         if (ACPI_FAILURE(acpi_install_notify_handler(adev_dimm->handle,
1824                 ACPI_DEVICE_NOTIFY, acpi_nvdimm_notify, adev_dimm))) {
1825                 dev_err(dev, "%s: notification registration failed\n",
1826                                 dev_name(&adev_dimm->dev));
1827                 return -ENXIO;
1828         }
1829         /*
1830          * Record nfit_mem for the notification path to track back to
1831          * the nfit sysfs attributes for this dimm device object.
1832          */
1833         dev_set_drvdata(&adev_dimm->dev, nfit_mem);
1834
1835         /*
1836          * Until standardization materializes we need to consider 4
1837          * different command sets.  Note, that checking for function0 (bit0)
1838          * tells us if any commands are reachable through this GUID.
1839          */
1840         for (i = 0; i <= NVDIMM_FAMILY_MAX; i++)
1841                 if (acpi_check_dsm(adev_dimm->handle, to_nfit_uuid(i), 1, 1))
1842                         if (family < 0 || i == default_dsm_family)
1843                                 family = i;
1844
1845         /* limit the supported commands to those that are publicly documented */
1846         nfit_mem->family = family;
1847         if (override_dsm_mask && !disable_vendor_specific)
1848                 dsm_mask = override_dsm_mask;
1849         else if (nfit_mem->family == NVDIMM_FAMILY_INTEL) {
1850                 dsm_mask = NVDIMM_INTEL_CMDMASK;
1851                 if (disable_vendor_specific)
1852                         dsm_mask &= ~(1 << ND_CMD_VENDOR);
1853         } else if (nfit_mem->family == NVDIMM_FAMILY_HPE1) {
1854                 dsm_mask = 0x1c3c76;
1855         } else if (nfit_mem->family == NVDIMM_FAMILY_HPE2) {
1856                 dsm_mask = 0x1fe;
1857                 if (disable_vendor_specific)
1858                         dsm_mask &= ~(1 << 8);
1859         } else if (nfit_mem->family == NVDIMM_FAMILY_MSFT) {
1860                 dsm_mask = 0xffffffff;
1861         } else {
1862                 dev_dbg(dev, "unknown dimm command family\n");
1863                 nfit_mem->family = -1;
1864                 /* DSMs are optional, continue loading the driver... */
1865                 return 0;
1866         }
1867
1868         guid = to_nfit_uuid(nfit_mem->family);
1869         for_each_set_bit(i, &dsm_mask, BITS_PER_LONG)
1870                 if (acpi_check_dsm(adev_dimm->handle, guid,
1871                                         nfit_dsm_revid(nfit_mem->family, i),
1872                                         1ULL << i))
1873                         set_bit(i, &nfit_mem->dsm_mask);
1874
1875         /*
1876          * Prefer the NVDIMM_FAMILY_INTEL label read commands if present
1877          * due to their better semantics handling locked capacity.
1878          */
1879         label_mask = 1 << ND_CMD_GET_CONFIG_SIZE | 1 << ND_CMD_GET_CONFIG_DATA
1880                 | 1 << ND_CMD_SET_CONFIG_DATA;
1881         if (family == NVDIMM_FAMILY_INTEL
1882                         && (dsm_mask & label_mask) == label_mask)
1883                 return 0;
1884
1885         if (acpi_nvdimm_has_method(adev_dimm, "_LSI")
1886                         && acpi_nvdimm_has_method(adev_dimm, "_LSR")) {
1887                 dev_dbg(dev, "%s: has _LSR\n", dev_name(&adev_dimm->dev));
1888                 set_bit(NFIT_MEM_LSR, &nfit_mem->flags);
1889         }
1890
1891         if (test_bit(NFIT_MEM_LSR, &nfit_mem->flags)
1892                         && acpi_nvdimm_has_method(adev_dimm, "_LSW")) {
1893                 dev_dbg(dev, "%s: has _LSW\n", dev_name(&adev_dimm->dev));
1894                 set_bit(NFIT_MEM_LSW, &nfit_mem->flags);
1895         }
1896
1897         populate_shutdown_status(nfit_mem);
1898
1899         return 0;
1900 }
1901
1902 static void shutdown_dimm_notify(void *data)
1903 {
1904         struct acpi_nfit_desc *acpi_desc = data;
1905         struct nfit_mem *nfit_mem;
1906
1907         mutex_lock(&acpi_desc->init_mutex);
1908         /*
1909          * Clear out the nfit_mem->flags_attr and shut down dimm event
1910          * notifications.
1911          */
1912         list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) {
1913                 struct acpi_device *adev_dimm = nfit_mem->adev;
1914
1915                 if (nfit_mem->flags_attr) {
1916                         sysfs_put(nfit_mem->flags_attr);
1917                         nfit_mem->flags_attr = NULL;
1918                 }
1919                 if (adev_dimm) {
1920                         acpi_remove_notify_handler(adev_dimm->handle,
1921                                         ACPI_DEVICE_NOTIFY, acpi_nvdimm_notify);
1922                         dev_set_drvdata(&adev_dimm->dev, NULL);
1923                 }
1924         }
1925         mutex_unlock(&acpi_desc->init_mutex);
1926 }
1927
1928 static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc)
1929 {
1930         struct nfit_mem *nfit_mem;
1931         int dimm_count = 0, rc;
1932         struct nvdimm *nvdimm;
1933
1934         list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) {
1935                 struct acpi_nfit_flush_address *flush;
1936                 unsigned long flags = 0, cmd_mask;
1937                 struct nfit_memdev *nfit_memdev;
1938                 u32 device_handle;
1939                 u16 mem_flags;
1940
1941                 device_handle = __to_nfit_memdev(nfit_mem)->device_handle;
1942                 nvdimm = acpi_nfit_dimm_by_handle(acpi_desc, device_handle);
1943                 if (nvdimm) {
1944                         dimm_count++;
1945                         continue;
1946                 }
1947
1948                 if (nfit_mem->bdw && nfit_mem->memdev_pmem)
1949                         set_bit(NDD_ALIASING, &flags);
1950
1951                 /* collate flags across all memdevs for this dimm */
1952                 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
1953                         struct acpi_nfit_memory_map *dimm_memdev;
1954
1955                         dimm_memdev = __to_nfit_memdev(nfit_mem);
1956                         if (dimm_memdev->device_handle
1957                                         != nfit_memdev->memdev->device_handle)
1958                                 continue;
1959                         dimm_memdev->flags |= nfit_memdev->memdev->flags;
1960                 }
1961
1962                 mem_flags = __to_nfit_memdev(nfit_mem)->flags;
1963                 if (mem_flags & ACPI_NFIT_MEM_NOT_ARMED)
1964                         set_bit(NDD_UNARMED, &flags);
1965
1966                 rc = acpi_nfit_add_dimm(acpi_desc, nfit_mem, device_handle);
1967                 if (rc)
1968                         continue;
1969
1970                 /*
1971                  * TODO: provide translation for non-NVDIMM_FAMILY_INTEL
1972                  * devices (i.e. from nd_cmd to acpi_dsm) to standardize the
1973                  * userspace interface.
1974                  */
1975                 cmd_mask = 1UL << ND_CMD_CALL;
1976                 if (nfit_mem->family == NVDIMM_FAMILY_INTEL) {
1977                         /*
1978                          * These commands have a 1:1 correspondence
1979                          * between DSM payload and libnvdimm ioctl
1980                          * payload format.
1981                          */
1982                         cmd_mask |= nfit_mem->dsm_mask & NVDIMM_STANDARD_CMDMASK;
1983                 }
1984
1985                 if (test_bit(NFIT_MEM_LSR, &nfit_mem->flags)) {
1986                         set_bit(ND_CMD_GET_CONFIG_SIZE, &cmd_mask);
1987                         set_bit(ND_CMD_GET_CONFIG_DATA, &cmd_mask);
1988                 }
1989                 if (test_bit(NFIT_MEM_LSW, &nfit_mem->flags))
1990                         set_bit(ND_CMD_SET_CONFIG_DATA, &cmd_mask);
1991
1992                 flush = nfit_mem->nfit_flush ? nfit_mem->nfit_flush->flush
1993                         : NULL;
1994                 nvdimm = nvdimm_create(acpi_desc->nvdimm_bus, nfit_mem,
1995                                 acpi_nfit_dimm_attribute_groups,
1996                                 flags, cmd_mask, flush ? flush->hint_count : 0,
1997                                 nfit_mem->flush_wpq);
1998                 if (!nvdimm)
1999                         return -ENOMEM;
2000
2001                 nfit_mem->nvdimm = nvdimm;
2002                 dimm_count++;
2003
2004                 if ((mem_flags & ACPI_NFIT_MEM_FAILED_MASK) == 0)
2005                         continue;
2006
2007                 dev_info(acpi_desc->dev, "%s flags:%s%s%s%s%s\n",
2008                                 nvdimm_name(nvdimm),
2009                   mem_flags & ACPI_NFIT_MEM_SAVE_FAILED ? " save_fail" : "",
2010                   mem_flags & ACPI_NFIT_MEM_RESTORE_FAILED ? " restore_fail":"",
2011                   mem_flags & ACPI_NFIT_MEM_FLUSH_FAILED ? " flush_fail" : "",
2012                   mem_flags & ACPI_NFIT_MEM_NOT_ARMED ? " not_armed" : "",
2013                   mem_flags & ACPI_NFIT_MEM_MAP_FAILED ? " map_fail" : "");
2014
2015         }
2016
2017         rc = nvdimm_bus_check_dimm_count(acpi_desc->nvdimm_bus, dimm_count);
2018         if (rc)
2019                 return rc;
2020
2021         /*
2022          * Now that dimms are successfully registered, and async registration
2023          * is flushed, attempt to enable event notification.
2024          */
2025         list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) {
2026                 struct kernfs_node *nfit_kernfs;
2027
2028                 nvdimm = nfit_mem->nvdimm;
2029                 if (!nvdimm)
2030                         continue;
2031
2032                 nfit_kernfs = sysfs_get_dirent(nvdimm_kobj(nvdimm)->sd, "nfit");
2033                 if (nfit_kernfs)
2034                         nfit_mem->flags_attr = sysfs_get_dirent(nfit_kernfs,
2035                                         "flags");
2036                 sysfs_put(nfit_kernfs);
2037                 if (!nfit_mem->flags_attr)
2038                         dev_warn(acpi_desc->dev, "%s: notifications disabled\n",
2039                                         nvdimm_name(nvdimm));
2040         }
2041
2042         return devm_add_action_or_reset(acpi_desc->dev, shutdown_dimm_notify,
2043                         acpi_desc);
2044 }
2045
2046 /*
2047  * These constants are private because there are no kernel consumers of
2048  * these commands.
2049  */
2050 enum nfit_aux_cmds {
2051         NFIT_CMD_TRANSLATE_SPA = 5,
2052         NFIT_CMD_ARS_INJECT_SET = 7,
2053         NFIT_CMD_ARS_INJECT_CLEAR = 8,
2054         NFIT_CMD_ARS_INJECT_GET = 9,
2055 };
2056
2057 static void acpi_nfit_init_dsms(struct acpi_nfit_desc *acpi_desc)
2058 {
2059         struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
2060         const guid_t *guid = to_nfit_uuid(NFIT_DEV_BUS);
2061         struct acpi_device *adev;
2062         unsigned long dsm_mask;
2063         int i;
2064
2065         nd_desc->cmd_mask = acpi_desc->bus_cmd_force_en;
2066         nd_desc->bus_dsm_mask = acpi_desc->bus_nfit_cmd_force_en;
2067         adev = to_acpi_dev(acpi_desc);
2068         if (!adev)
2069                 return;
2070
2071         for (i = ND_CMD_ARS_CAP; i <= ND_CMD_CLEAR_ERROR; i++)
2072                 if (acpi_check_dsm(adev->handle, guid, 1, 1ULL << i))
2073                         set_bit(i, &nd_desc->cmd_mask);
2074         set_bit(ND_CMD_CALL, &nd_desc->cmd_mask);
2075
2076         dsm_mask =
2077                 (1 << ND_CMD_ARS_CAP) |
2078                 (1 << ND_CMD_ARS_START) |
2079                 (1 << ND_CMD_ARS_STATUS) |
2080                 (1 << ND_CMD_CLEAR_ERROR) |
2081                 (1 << NFIT_CMD_TRANSLATE_SPA) |
2082                 (1 << NFIT_CMD_ARS_INJECT_SET) |
2083                 (1 << NFIT_CMD_ARS_INJECT_CLEAR) |
2084                 (1 << NFIT_CMD_ARS_INJECT_GET);
2085         for_each_set_bit(i, &dsm_mask, BITS_PER_LONG)
2086                 if (acpi_check_dsm(adev->handle, guid, 1, 1ULL << i))
2087                         set_bit(i, &nd_desc->bus_dsm_mask);
2088 }
2089
2090 static ssize_t range_index_show(struct device *dev,
2091                 struct device_attribute *attr, char *buf)
2092 {
2093         struct nd_region *nd_region = to_nd_region(dev);
2094         struct nfit_spa *nfit_spa = nd_region_provider_data(nd_region);
2095
2096         return sprintf(buf, "%d\n", nfit_spa->spa->range_index);
2097 }
2098 static DEVICE_ATTR_RO(range_index);
2099
2100 static struct attribute *acpi_nfit_region_attributes[] = {
2101         &dev_attr_range_index.attr,
2102         NULL,
2103 };
2104
2105 static const struct attribute_group acpi_nfit_region_attribute_group = {
2106         .name = "nfit",
2107         .attrs = acpi_nfit_region_attributes,
2108 };
2109
2110 static const struct attribute_group *acpi_nfit_region_attribute_groups[] = {
2111         &nd_region_attribute_group,
2112         &nd_mapping_attribute_group,
2113         &nd_device_attribute_group,
2114         &nd_numa_attribute_group,
2115         &acpi_nfit_region_attribute_group,
2116         NULL,
2117 };
2118
2119 /* enough info to uniquely specify an interleave set */
2120 struct nfit_set_info {
2121         struct nfit_set_info_map {
2122                 u64 region_offset;
2123                 u32 serial_number;
2124                 u32 pad;
2125         } mapping[0];
2126 };
2127
2128 struct nfit_set_info2 {
2129         struct nfit_set_info_map2 {
2130                 u64 region_offset;
2131                 u32 serial_number;
2132                 u16 vendor_id;
2133                 u16 manufacturing_date;
2134                 u8  manufacturing_location;
2135                 u8  reserved[31];
2136         } mapping[0];
2137 };
2138
2139 static size_t sizeof_nfit_set_info(int num_mappings)
2140 {
2141         return sizeof(struct nfit_set_info)
2142                 + num_mappings * sizeof(struct nfit_set_info_map);
2143 }
2144
2145 static size_t sizeof_nfit_set_info2(int num_mappings)
2146 {
2147         return sizeof(struct nfit_set_info2)
2148                 + num_mappings * sizeof(struct nfit_set_info_map2);
2149 }
2150
2151 static int cmp_map_compat(const void *m0, const void *m1)
2152 {
2153         const struct nfit_set_info_map *map0 = m0;
2154         const struct nfit_set_info_map *map1 = m1;
2155
2156         return memcmp(&map0->region_offset, &map1->region_offset,
2157                         sizeof(u64));
2158 }
2159
2160 static int cmp_map(const void *m0, const void *m1)
2161 {
2162         const struct nfit_set_info_map *map0 = m0;
2163         const struct nfit_set_info_map *map1 = m1;
2164
2165         if (map0->region_offset < map1->region_offset)
2166                 return -1;
2167         else if (map0->region_offset > map1->region_offset)
2168                 return 1;
2169         return 0;
2170 }
2171
2172 static int cmp_map2(const void *m0, const void *m1)
2173 {
2174         const struct nfit_set_info_map2 *map0 = m0;
2175         const struct nfit_set_info_map2 *map1 = m1;
2176
2177         if (map0->region_offset < map1->region_offset)
2178                 return -1;
2179         else if (map0->region_offset > map1->region_offset)
2180                 return 1;
2181         return 0;
2182 }
2183
2184 /* Retrieve the nth entry referencing this spa */
2185 static struct acpi_nfit_memory_map *memdev_from_spa(
2186                 struct acpi_nfit_desc *acpi_desc, u16 range_index, int n)
2187 {
2188         struct nfit_memdev *nfit_memdev;
2189
2190         list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list)
2191                 if (nfit_memdev->memdev->range_index == range_index)
2192                         if (n-- == 0)
2193                                 return nfit_memdev->memdev;
2194         return NULL;
2195 }
2196
2197 static int acpi_nfit_init_interleave_set(struct acpi_nfit_desc *acpi_desc,
2198                 struct nd_region_desc *ndr_desc,
2199                 struct acpi_nfit_system_address *spa)
2200 {
2201         struct device *dev = acpi_desc->dev;
2202         struct nd_interleave_set *nd_set;
2203         u16 nr = ndr_desc->num_mappings;
2204         struct nfit_set_info2 *info2;
2205         struct nfit_set_info *info;
2206         int i;
2207
2208         nd_set = devm_kzalloc(dev, sizeof(*nd_set), GFP_KERNEL);
2209         if (!nd_set)
2210                 return -ENOMEM;
2211         ndr_desc->nd_set = nd_set;
2212         guid_copy(&nd_set->type_guid, (guid_t *) spa->range_guid);
2213
2214         info = devm_kzalloc(dev, sizeof_nfit_set_info(nr), GFP_KERNEL);
2215         if (!info)
2216                 return -ENOMEM;
2217
2218         info2 = devm_kzalloc(dev, sizeof_nfit_set_info2(nr), GFP_KERNEL);
2219         if (!info2)
2220                 return -ENOMEM;
2221
2222         for (i = 0; i < nr; i++) {
2223                 struct nd_mapping_desc *mapping = &ndr_desc->mapping[i];
2224                 struct nfit_set_info_map *map = &info->mapping[i];
2225                 struct nfit_set_info_map2 *map2 = &info2->mapping[i];
2226                 struct nvdimm *nvdimm = mapping->nvdimm;
2227                 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
2228                 struct acpi_nfit_memory_map *memdev = memdev_from_spa(acpi_desc,
2229                                 spa->range_index, i);
2230                 struct acpi_nfit_control_region *dcr = nfit_mem->dcr;
2231
2232                 if (!memdev || !nfit_mem->dcr) {
2233                         dev_err(dev, "%s: failed to find DCR\n", __func__);
2234                         return -ENODEV;
2235                 }
2236
2237                 map->region_offset = memdev->region_offset;
2238                 map->serial_number = dcr->serial_number;
2239
2240                 map2->region_offset = memdev->region_offset;
2241                 map2->serial_number = dcr->serial_number;
2242                 map2->vendor_id = dcr->vendor_id;
2243                 map2->manufacturing_date = dcr->manufacturing_date;
2244                 map2->manufacturing_location = dcr->manufacturing_location;
2245         }
2246
2247         /* v1.1 namespaces */
2248         sort(&info->mapping[0], nr, sizeof(struct nfit_set_info_map),
2249                         cmp_map, NULL);
2250         nd_set->cookie1 = nd_fletcher64(info, sizeof_nfit_set_info(nr), 0);
2251
2252         /* v1.2 namespaces */
2253         sort(&info2->mapping[0], nr, sizeof(struct nfit_set_info_map2),
2254                         cmp_map2, NULL);
2255         nd_set->cookie2 = nd_fletcher64(info2, sizeof_nfit_set_info2(nr), 0);
2256
2257         /* support v1.1 namespaces created with the wrong sort order */
2258         sort(&info->mapping[0], nr, sizeof(struct nfit_set_info_map),
2259                         cmp_map_compat, NULL);
2260         nd_set->altcookie = nd_fletcher64(info, sizeof_nfit_set_info(nr), 0);
2261
2262         /* record the result of the sort for the mapping position */
2263         for (i = 0; i < nr; i++) {
2264                 struct nfit_set_info_map2 *map2 = &info2->mapping[i];
2265                 int j;
2266
2267                 for (j = 0; j < nr; j++) {
2268                         struct nd_mapping_desc *mapping = &ndr_desc->mapping[j];
2269                         struct nvdimm *nvdimm = mapping->nvdimm;
2270                         struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
2271                         struct acpi_nfit_control_region *dcr = nfit_mem->dcr;
2272
2273                         if (map2->serial_number == dcr->serial_number &&
2274                             map2->vendor_id == dcr->vendor_id &&
2275                             map2->manufacturing_date == dcr->manufacturing_date &&
2276                             map2->manufacturing_location
2277                                     == dcr->manufacturing_location) {
2278                                 mapping->position = i;
2279                                 break;
2280                         }
2281                 }
2282         }
2283
2284         ndr_desc->nd_set = nd_set;
2285         devm_kfree(dev, info);
2286         devm_kfree(dev, info2);
2287
2288         return 0;
2289 }
2290
2291 static u64 to_interleave_offset(u64 offset, struct nfit_blk_mmio *mmio)
2292 {
2293         struct acpi_nfit_interleave *idt = mmio->idt;
2294         u32 sub_line_offset, line_index, line_offset;
2295         u64 line_no, table_skip_count, table_offset;
2296
2297         line_no = div_u64_rem(offset, mmio->line_size, &sub_line_offset);
2298         table_skip_count = div_u64_rem(line_no, mmio->num_lines, &line_index);
2299         line_offset = idt->line_offset[line_index]
2300                 * mmio->line_size;
2301         table_offset = table_skip_count * mmio->table_size;
2302
2303         return mmio->base_offset + line_offset + table_offset + sub_line_offset;
2304 }
2305
2306 static u32 read_blk_stat(struct nfit_blk *nfit_blk, unsigned int bw)
2307 {
2308         struct nfit_blk_mmio *mmio = &nfit_blk->mmio[DCR];
2309         u64 offset = nfit_blk->stat_offset + mmio->size * bw;
2310         const u32 STATUS_MASK = 0x80000037;
2311
2312         if (mmio->num_lines)
2313                 offset = to_interleave_offset(offset, mmio);
2314
2315         return readl(mmio->addr.base + offset) & STATUS_MASK;
2316 }
2317
2318 static void write_blk_ctl(struct nfit_blk *nfit_blk, unsigned int bw,
2319                 resource_size_t dpa, unsigned int len, unsigned int write)
2320 {
2321         u64 cmd, offset;
2322         struct nfit_blk_mmio *mmio = &nfit_blk->mmio[DCR];
2323
2324         enum {
2325                 BCW_OFFSET_MASK = (1ULL << 48)-1,
2326                 BCW_LEN_SHIFT = 48,
2327                 BCW_LEN_MASK = (1ULL << 8) - 1,
2328                 BCW_CMD_SHIFT = 56,
2329         };
2330
2331         cmd = (dpa >> L1_CACHE_SHIFT) & BCW_OFFSET_MASK;
2332         len = len >> L1_CACHE_SHIFT;
2333         cmd |= ((u64) len & BCW_LEN_MASK) << BCW_LEN_SHIFT;
2334         cmd |= ((u64) write) << BCW_CMD_SHIFT;
2335
2336         offset = nfit_blk->cmd_offset + mmio->size * bw;
2337         if (mmio->num_lines)
2338                 offset = to_interleave_offset(offset, mmio);
2339
2340         writeq(cmd, mmio->addr.base + offset);
2341         nvdimm_flush(nfit_blk->nd_region);
2342
2343         if (nfit_blk->dimm_flags & NFIT_BLK_DCR_LATCH)
2344                 readq(mmio->addr.base + offset);
2345 }
2346
2347 static int acpi_nfit_blk_single_io(struct nfit_blk *nfit_blk,
2348                 resource_size_t dpa, void *iobuf, size_t len, int rw,
2349                 unsigned int lane)
2350 {
2351         struct nfit_blk_mmio *mmio = &nfit_blk->mmio[BDW];
2352         unsigned int copied = 0;
2353         u64 base_offset;
2354         int rc;
2355
2356         base_offset = nfit_blk->bdw_offset + dpa % L1_CACHE_BYTES
2357                 + lane * mmio->size;
2358         write_blk_ctl(nfit_blk, lane, dpa, len, rw);
2359         while (len) {
2360                 unsigned int c;
2361                 u64 offset;
2362
2363                 if (mmio->num_lines) {
2364                         u32 line_offset;
2365
2366                         offset = to_interleave_offset(base_offset + copied,
2367                                         mmio);
2368                         div_u64_rem(offset, mmio->line_size, &line_offset);
2369                         c = min_t(size_t, len, mmio->line_size - line_offset);
2370                 } else {
2371                         offset = base_offset + nfit_blk->bdw_offset;
2372                         c = len;
2373                 }
2374
2375                 if (rw)
2376                         memcpy_flushcache(mmio->addr.aperture + offset, iobuf + copied, c);
2377                 else {
2378                         if (nfit_blk->dimm_flags & NFIT_BLK_READ_FLUSH)
2379                                 arch_invalidate_pmem((void __force *)
2380                                         mmio->addr.aperture + offset, c);
2381
2382                         memcpy(iobuf + copied, mmio->addr.aperture + offset, c);
2383                 }
2384
2385                 copied += c;
2386                 len -= c;
2387         }
2388
2389         if (rw)
2390                 nvdimm_flush(nfit_blk->nd_region);
2391
2392         rc = read_blk_stat(nfit_blk, lane) ? -EIO : 0;
2393         return rc;
2394 }
2395
2396 static int acpi_nfit_blk_region_do_io(struct nd_blk_region *ndbr,
2397                 resource_size_t dpa, void *iobuf, u64 len, int rw)
2398 {
2399         struct nfit_blk *nfit_blk = nd_blk_region_provider_data(ndbr);
2400         struct nfit_blk_mmio *mmio = &nfit_blk->mmio[BDW];
2401         struct nd_region *nd_region = nfit_blk->nd_region;
2402         unsigned int lane, copied = 0;
2403         int rc = 0;
2404
2405         lane = nd_region_acquire_lane(nd_region);
2406         while (len) {
2407                 u64 c = min(len, mmio->size);
2408
2409                 rc = acpi_nfit_blk_single_io(nfit_blk, dpa + copied,
2410                                 iobuf + copied, c, rw, lane);
2411                 if (rc)
2412                         break;
2413
2414                 copied += c;
2415                 len -= c;
2416         }
2417         nd_region_release_lane(nd_region, lane);
2418
2419         return rc;
2420 }
2421
2422 static int nfit_blk_init_interleave(struct nfit_blk_mmio *mmio,
2423                 struct acpi_nfit_interleave *idt, u16 interleave_ways)
2424 {
2425         if (idt) {
2426                 mmio->num_lines = idt->line_count;
2427                 mmio->line_size = idt->line_size;
2428                 if (interleave_ways == 0)
2429                         return -ENXIO;
2430                 mmio->table_size = mmio->num_lines * interleave_ways
2431                         * mmio->line_size;
2432         }
2433
2434         return 0;
2435 }
2436
2437 static int acpi_nfit_blk_get_flags(struct nvdimm_bus_descriptor *nd_desc,
2438                 struct nvdimm *nvdimm, struct nfit_blk *nfit_blk)
2439 {
2440         struct nd_cmd_dimm_flags flags;
2441         int rc;
2442
2443         memset(&flags, 0, sizeof(flags));
2444         rc = nd_desc->ndctl(nd_desc, nvdimm, ND_CMD_DIMM_FLAGS, &flags,
2445                         sizeof(flags), NULL);
2446
2447         if (rc >= 0 && flags.status == 0)
2448                 nfit_blk->dimm_flags = flags.flags;
2449         else if (rc == -ENOTTY) {
2450                 /* fall back to a conservative default */
2451                 nfit_blk->dimm_flags = NFIT_BLK_DCR_LATCH | NFIT_BLK_READ_FLUSH;
2452                 rc = 0;
2453         } else
2454                 rc = -ENXIO;
2455
2456         return rc;
2457 }
2458
2459 static int acpi_nfit_blk_region_enable(struct nvdimm_bus *nvdimm_bus,
2460                 struct device *dev)
2461 {
2462         struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
2463         struct nd_blk_region *ndbr = to_nd_blk_region(dev);
2464         struct nfit_blk_mmio *mmio;
2465         struct nfit_blk *nfit_blk;
2466         struct nfit_mem *nfit_mem;
2467         struct nvdimm *nvdimm;
2468         int rc;
2469
2470         nvdimm = nd_blk_region_to_dimm(ndbr);
2471         nfit_mem = nvdimm_provider_data(nvdimm);
2472         if (!nfit_mem || !nfit_mem->dcr || !nfit_mem->bdw) {
2473                 dev_dbg(dev, "missing%s%s%s\n",
2474                                 nfit_mem ? "" : " nfit_mem",
2475                                 (nfit_mem && nfit_mem->dcr) ? "" : " dcr",
2476                                 (nfit_mem && nfit_mem->bdw) ? "" : " bdw");
2477                 return -ENXIO;
2478         }
2479
2480         nfit_blk = devm_kzalloc(dev, sizeof(*nfit_blk), GFP_KERNEL);
2481         if (!nfit_blk)
2482                 return -ENOMEM;
2483         nd_blk_region_set_provider_data(ndbr, nfit_blk);
2484         nfit_blk->nd_region = to_nd_region(dev);
2485
2486         /* map block aperture memory */
2487         nfit_blk->bdw_offset = nfit_mem->bdw->offset;
2488         mmio = &nfit_blk->mmio[BDW];
2489         mmio->addr.base = devm_nvdimm_memremap(dev, nfit_mem->spa_bdw->address,
2490                         nfit_mem->spa_bdw->length, nd_blk_memremap_flags(ndbr));
2491         if (!mmio->addr.base) {
2492                 dev_dbg(dev, "%s failed to map bdw\n",
2493                                 nvdimm_name(nvdimm));
2494                 return -ENOMEM;
2495         }
2496         mmio->size = nfit_mem->bdw->size;
2497         mmio->base_offset = nfit_mem->memdev_bdw->region_offset;
2498         mmio->idt = nfit_mem->idt_bdw;
2499         mmio->spa = nfit_mem->spa_bdw;
2500         rc = nfit_blk_init_interleave(mmio, nfit_mem->idt_bdw,
2501                         nfit_mem->memdev_bdw->interleave_ways);
2502         if (rc) {
2503                 dev_dbg(dev, "%s failed to init bdw interleave\n",
2504                                 nvdimm_name(nvdimm));
2505                 return rc;
2506         }
2507
2508         /* map block control memory */
2509         nfit_blk->cmd_offset = nfit_mem->dcr->command_offset;
2510         nfit_blk->stat_offset = nfit_mem->dcr->status_offset;
2511         mmio = &nfit_blk->mmio[DCR];
2512         mmio->addr.base = devm_nvdimm_ioremap(dev, nfit_mem->spa_dcr->address,
2513                         nfit_mem->spa_dcr->length);
2514         if (!mmio->addr.base) {
2515                 dev_dbg(dev, "%s failed to map dcr\n",
2516                                 nvdimm_name(nvdimm));
2517                 return -ENOMEM;
2518         }
2519         mmio->size = nfit_mem->dcr->window_size;
2520         mmio->base_offset = nfit_mem->memdev_dcr->region_offset;
2521         mmio->idt = nfit_mem->idt_dcr;
2522         mmio->spa = nfit_mem->spa_dcr;
2523         rc = nfit_blk_init_interleave(mmio, nfit_mem->idt_dcr,
2524                         nfit_mem->memdev_dcr->interleave_ways);
2525         if (rc) {
2526                 dev_dbg(dev, "%s failed to init dcr interleave\n",
2527                                 nvdimm_name(nvdimm));
2528                 return rc;
2529         }
2530
2531         rc = acpi_nfit_blk_get_flags(nd_desc, nvdimm, nfit_blk);
2532         if (rc < 0) {
2533                 dev_dbg(dev, "%s failed get DIMM flags\n",
2534                                 nvdimm_name(nvdimm));
2535                 return rc;
2536         }
2537
2538         if (nvdimm_has_flush(nfit_blk->nd_region) < 0)
2539                 dev_warn(dev, "unable to guarantee persistence of writes\n");
2540
2541         if (mmio->line_size == 0)
2542                 return 0;
2543
2544         if ((u32) nfit_blk->cmd_offset % mmio->line_size
2545                         + 8 > mmio->line_size) {
2546                 dev_dbg(dev, "cmd_offset crosses interleave boundary\n");
2547                 return -ENXIO;
2548         } else if ((u32) nfit_blk->stat_offset % mmio->line_size
2549                         + 8 > mmio->line_size) {
2550                 dev_dbg(dev, "stat_offset crosses interleave boundary\n");
2551                 return -ENXIO;
2552         }
2553
2554         return 0;
2555 }
2556
2557 static int ars_get_cap(struct acpi_nfit_desc *acpi_desc,
2558                 struct nd_cmd_ars_cap *cmd, struct nfit_spa *nfit_spa)
2559 {
2560         struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
2561         struct acpi_nfit_system_address *spa = nfit_spa->spa;
2562         int cmd_rc, rc;
2563
2564         cmd->address = spa->address;
2565         cmd->length = spa->length;
2566         rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_CAP, cmd,
2567                         sizeof(*cmd), &cmd_rc);
2568         if (rc < 0)
2569                 return rc;
2570         return cmd_rc;
2571 }
2572
2573 static int ars_start(struct acpi_nfit_desc *acpi_desc,
2574                 struct nfit_spa *nfit_spa, enum nfit_ars_state req_type)
2575 {
2576         int rc;
2577         int cmd_rc;
2578         struct nd_cmd_ars_start ars_start;
2579         struct acpi_nfit_system_address *spa = nfit_spa->spa;
2580         struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
2581
2582         memset(&ars_start, 0, sizeof(ars_start));
2583         ars_start.address = spa->address;
2584         ars_start.length = spa->length;
2585         if (req_type == ARS_REQ_SHORT)
2586                 ars_start.flags = ND_ARS_RETURN_PREV_DATA;
2587         if (nfit_spa_type(spa) == NFIT_SPA_PM)
2588                 ars_start.type = ND_ARS_PERSISTENT;
2589         else if (nfit_spa_type(spa) == NFIT_SPA_VOLATILE)
2590                 ars_start.type = ND_ARS_VOLATILE;
2591         else
2592                 return -ENOTTY;
2593
2594         rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_START, &ars_start,
2595                         sizeof(ars_start), &cmd_rc);
2596
2597         if (rc < 0)
2598                 return rc;
2599         return cmd_rc;
2600 }
2601
2602 static int ars_continue(struct acpi_nfit_desc *acpi_desc)
2603 {
2604         int rc, cmd_rc;
2605         struct nd_cmd_ars_start ars_start;
2606         struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
2607         struct nd_cmd_ars_status *ars_status = acpi_desc->ars_status;
2608
2609         memset(&ars_start, 0, sizeof(ars_start));
2610         ars_start.address = ars_status->restart_address;
2611         ars_start.length = ars_status->restart_length;
2612         ars_start.type = ars_status->type;
2613         ars_start.flags = acpi_desc->ars_start_flags;
2614         rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_START, &ars_start,
2615                         sizeof(ars_start), &cmd_rc);
2616         if (rc < 0)
2617                 return rc;
2618         return cmd_rc;
2619 }
2620
2621 static int ars_get_status(struct acpi_nfit_desc *acpi_desc)
2622 {
2623         struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
2624         struct nd_cmd_ars_status *ars_status = acpi_desc->ars_status;
2625         int rc, cmd_rc;
2626
2627         rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_STATUS, ars_status,
2628                         acpi_desc->max_ars, &cmd_rc);
2629         if (rc < 0)
2630                 return rc;
2631         return cmd_rc;
2632 }
2633
2634 static void ars_complete(struct acpi_nfit_desc *acpi_desc,
2635                 struct nfit_spa *nfit_spa)
2636 {
2637         struct nd_cmd_ars_status *ars_status = acpi_desc->ars_status;
2638         struct acpi_nfit_system_address *spa = nfit_spa->spa;
2639         struct nd_region *nd_region = nfit_spa->nd_region;
2640         struct device *dev;
2641
2642         lockdep_assert_held(&acpi_desc->init_mutex);
2643         /*
2644          * Only advance the ARS state for ARS runs initiated by the
2645          * kernel, ignore ARS results from BIOS initiated runs for scrub
2646          * completion tracking.
2647          */
2648         if (acpi_desc->scrub_spa != nfit_spa)
2649                 return;
2650
2651         if ((ars_status->address >= spa->address && ars_status->address
2652                                 < spa->address + spa->length)
2653                         || (ars_status->address < spa->address)) {
2654                 /*
2655                  * Assume that if a scrub starts at an offset from the
2656                  * start of nfit_spa that we are in the continuation
2657                  * case.
2658                  *
2659                  * Otherwise, if the scrub covers the spa range, mark
2660                  * any pending request complete.
2661                  */
2662                 if (ars_status->address + ars_status->length
2663                                 >= spa->address + spa->length)
2664                                 /* complete */;
2665                 else
2666                         return;
2667         } else
2668                 return;
2669
2670         acpi_desc->scrub_spa = NULL;
2671         if (nd_region) {
2672                 dev = nd_region_dev(nd_region);
2673                 nvdimm_region_notify(nd_region, NVDIMM_REVALIDATE_POISON);
2674         } else
2675                 dev = acpi_desc->dev;
2676         dev_dbg(dev, "ARS: range %d complete\n", spa->range_index);
2677 }
2678
2679 static int ars_status_process_records(struct acpi_nfit_desc *acpi_desc)
2680 {
2681         struct nvdimm_bus *nvdimm_bus = acpi_desc->nvdimm_bus;
2682         struct nd_cmd_ars_status *ars_status = acpi_desc->ars_status;
2683         int rc;
2684         u32 i;
2685
2686         /*
2687          * First record starts at 44 byte offset from the start of the
2688          * payload.
2689          */
2690         if (ars_status->out_length < 44)
2691                 return 0;
2692         for (i = 0; i < ars_status->num_records; i++) {
2693                 /* only process full records */
2694                 if (ars_status->out_length
2695                                 < 44 + sizeof(struct nd_ars_record) * (i + 1))
2696                         break;
2697                 rc = nvdimm_bus_add_badrange(nvdimm_bus,
2698                                 ars_status->records[i].err_address,
2699                                 ars_status->records[i].length);
2700                 if (rc)
2701                         return rc;
2702         }
2703         if (i < ars_status->num_records)
2704                 dev_warn(acpi_desc->dev, "detected truncated ars results\n");
2705
2706         return 0;
2707 }
2708
2709 static void acpi_nfit_remove_resource(void *data)
2710 {
2711         struct resource *res = data;
2712
2713         remove_resource(res);
2714 }
2715
2716 static int acpi_nfit_insert_resource(struct acpi_nfit_desc *acpi_desc,
2717                 struct nd_region_desc *ndr_desc)
2718 {
2719         struct resource *res, *nd_res = ndr_desc->res;
2720         int is_pmem, ret;
2721
2722         /* No operation if the region is already registered as PMEM */
2723         is_pmem = region_intersects(nd_res->start, resource_size(nd_res),
2724                                 IORESOURCE_MEM, IORES_DESC_PERSISTENT_MEMORY);
2725         if (is_pmem == REGION_INTERSECTS)
2726                 return 0;
2727
2728         res = devm_kzalloc(acpi_desc->dev, sizeof(*res), GFP_KERNEL);
2729         if (!res)
2730                 return -ENOMEM;
2731
2732         res->name = "Persistent Memory";
2733         res->start = nd_res->start;
2734         res->end = nd_res->end;
2735         res->flags = IORESOURCE_MEM;
2736         res->desc = IORES_DESC_PERSISTENT_MEMORY;
2737
2738         ret = insert_resource(&iomem_resource, res);
2739         if (ret)
2740                 return ret;
2741
2742         ret = devm_add_action_or_reset(acpi_desc->dev,
2743                                         acpi_nfit_remove_resource,
2744                                         res);
2745         if (ret)
2746                 return ret;
2747
2748         return 0;
2749 }
2750
2751 static int acpi_nfit_init_mapping(struct acpi_nfit_desc *acpi_desc,
2752                 struct nd_mapping_desc *mapping, struct nd_region_desc *ndr_desc,
2753                 struct acpi_nfit_memory_map *memdev,
2754                 struct nfit_spa *nfit_spa)
2755 {
2756         struct nvdimm *nvdimm = acpi_nfit_dimm_by_handle(acpi_desc,
2757                         memdev->device_handle);
2758         struct acpi_nfit_system_address *spa = nfit_spa->spa;
2759         struct nd_blk_region_desc *ndbr_desc;
2760         struct nfit_mem *nfit_mem;
2761         int rc;
2762
2763         if (!nvdimm) {
2764                 dev_err(acpi_desc->dev, "spa%d dimm: %#x not found\n",
2765                                 spa->range_index, memdev->device_handle);
2766                 return -ENODEV;
2767         }
2768
2769         mapping->nvdimm = nvdimm;
2770         switch (nfit_spa_type(spa)) {
2771         case NFIT_SPA_PM:
2772         case NFIT_SPA_VOLATILE:
2773                 mapping->start = memdev->address;
2774                 mapping->size = memdev->region_size;
2775                 break;
2776         case NFIT_SPA_DCR:
2777                 nfit_mem = nvdimm_provider_data(nvdimm);
2778                 if (!nfit_mem || !nfit_mem->bdw) {
2779                         dev_dbg(acpi_desc->dev, "spa%d %s missing bdw\n",
2780                                         spa->range_index, nvdimm_name(nvdimm));
2781                         break;
2782                 }
2783
2784                 mapping->size = nfit_mem->bdw->capacity;
2785                 mapping->start = nfit_mem->bdw->start_address;
2786                 ndr_desc->num_lanes = nfit_mem->bdw->windows;
2787                 ndr_desc->mapping = mapping;
2788                 ndr_desc->num_mappings = 1;
2789                 ndbr_desc = to_blk_region_desc(ndr_desc);
2790                 ndbr_desc->enable = acpi_nfit_blk_region_enable;
2791                 ndbr_desc->do_io = acpi_desc->blk_do_io;
2792                 rc = acpi_nfit_init_interleave_set(acpi_desc, ndr_desc, spa);
2793                 if (rc)
2794                         return rc;
2795                 nfit_spa->nd_region = nvdimm_blk_region_create(acpi_desc->nvdimm_bus,
2796                                 ndr_desc);
2797                 if (!nfit_spa->nd_region)
2798                         return -ENOMEM;
2799                 break;
2800         }
2801
2802         return 0;
2803 }
2804
2805 static bool nfit_spa_is_virtual(struct acpi_nfit_system_address *spa)
2806 {
2807         return (nfit_spa_type(spa) == NFIT_SPA_VDISK ||
2808                 nfit_spa_type(spa) == NFIT_SPA_VCD   ||
2809                 nfit_spa_type(spa) == NFIT_SPA_PDISK ||
2810                 nfit_spa_type(spa) == NFIT_SPA_PCD);
2811 }
2812
2813 static bool nfit_spa_is_volatile(struct acpi_nfit_system_address *spa)
2814 {
2815         return (nfit_spa_type(spa) == NFIT_SPA_VDISK ||
2816                 nfit_spa_type(spa) == NFIT_SPA_VCD   ||
2817                 nfit_spa_type(spa) == NFIT_SPA_VOLATILE);
2818 }
2819
2820 static int acpi_nfit_register_region(struct acpi_nfit_desc *acpi_desc,
2821                 struct nfit_spa *nfit_spa)
2822 {
2823         static struct nd_mapping_desc mappings[ND_MAX_MAPPINGS];
2824         struct acpi_nfit_system_address *spa = nfit_spa->spa;
2825         struct nd_blk_region_desc ndbr_desc;
2826         struct nd_region_desc *ndr_desc;
2827         struct nfit_memdev *nfit_memdev;
2828         struct nvdimm_bus *nvdimm_bus;
2829         struct resource res;
2830         int count = 0, rc;
2831
2832         if (nfit_spa->nd_region)
2833                 return 0;
2834
2835         if (spa->range_index == 0 && !nfit_spa_is_virtual(spa)) {
2836                 dev_dbg(acpi_desc->dev, "detected invalid spa index\n");
2837                 return 0;
2838         }
2839
2840         memset(&res, 0, sizeof(res));
2841         memset(&mappings, 0, sizeof(mappings));
2842         memset(&ndbr_desc, 0, sizeof(ndbr_desc));
2843         res.start = spa->address;
2844         res.end = res.start + spa->length - 1;
2845         ndr_desc = &ndbr_desc.ndr_desc;
2846         ndr_desc->res = &res;
2847         ndr_desc->provider_data = nfit_spa;
2848         ndr_desc->attr_groups = acpi_nfit_region_attribute_groups;
2849         if (spa->flags & ACPI_NFIT_PROXIMITY_VALID)
2850                 ndr_desc->numa_node = acpi_map_pxm_to_online_node(
2851                                                 spa->proximity_domain);
2852         else
2853                 ndr_desc->numa_node = NUMA_NO_NODE;
2854
2855         /*
2856          * Persistence domain bits are hierarchical, if
2857          * ACPI_NFIT_CAPABILITY_CACHE_FLUSH is set then
2858          * ACPI_NFIT_CAPABILITY_MEM_FLUSH is implied.
2859          */
2860         if (acpi_desc->platform_cap & ACPI_NFIT_CAPABILITY_CACHE_FLUSH)
2861                 set_bit(ND_REGION_PERSIST_CACHE, &ndr_desc->flags);
2862         else if (acpi_desc->platform_cap & ACPI_NFIT_CAPABILITY_MEM_FLUSH)
2863                 set_bit(ND_REGION_PERSIST_MEMCTRL, &ndr_desc->flags);
2864
2865         list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
2866                 struct acpi_nfit_memory_map *memdev = nfit_memdev->memdev;
2867                 struct nd_mapping_desc *mapping;
2868
2869                 if (memdev->range_index != spa->range_index)
2870                         continue;
2871                 if (count >= ND_MAX_MAPPINGS) {
2872                         dev_err(acpi_desc->dev, "spa%d exceeds max mappings %d\n",
2873                                         spa->range_index, ND_MAX_MAPPINGS);
2874                         return -ENXIO;
2875                 }
2876                 mapping = &mappings[count++];
2877                 rc = acpi_nfit_init_mapping(acpi_desc, mapping, ndr_desc,
2878                                 memdev, nfit_spa);
2879                 if (rc)
2880                         goto out;
2881         }
2882
2883         ndr_desc->mapping = mappings;
2884         ndr_desc->num_mappings = count;
2885         rc = acpi_nfit_init_interleave_set(acpi_desc, ndr_desc, spa);
2886         if (rc)
2887                 goto out;
2888
2889         nvdimm_bus = acpi_desc->nvdimm_bus;
2890         if (nfit_spa_type(spa) == NFIT_SPA_PM) {
2891                 rc = acpi_nfit_insert_resource(acpi_desc, ndr_desc);
2892                 if (rc) {
2893                         dev_warn(acpi_desc->dev,
2894                                 "failed to insert pmem resource to iomem: %d\n",
2895                                 rc);
2896                         goto out;
2897                 }
2898
2899                 nfit_spa->nd_region = nvdimm_pmem_region_create(nvdimm_bus,
2900                                 ndr_desc);
2901                 if (!nfit_spa->nd_region)
2902                         rc = -ENOMEM;
2903         } else if (nfit_spa_is_volatile(spa)) {
2904                 nfit_spa->nd_region = nvdimm_volatile_region_create(nvdimm_bus,
2905                                 ndr_desc);
2906                 if (!nfit_spa->nd_region)
2907                         rc = -ENOMEM;
2908         } else if (nfit_spa_is_virtual(spa)) {
2909                 nfit_spa->nd_region = nvdimm_pmem_region_create(nvdimm_bus,
2910                                 ndr_desc);
2911                 if (!nfit_spa->nd_region)
2912                         rc = -ENOMEM;
2913         }
2914
2915  out:
2916         if (rc)
2917                 dev_err(acpi_desc->dev, "failed to register spa range %d\n",
2918                                 nfit_spa->spa->range_index);
2919         return rc;
2920 }
2921
2922 static int ars_status_alloc(struct acpi_nfit_desc *acpi_desc)
2923 {
2924         struct device *dev = acpi_desc->dev;
2925         struct nd_cmd_ars_status *ars_status;
2926
2927         if (acpi_desc->ars_status) {
2928                 memset(acpi_desc->ars_status, 0, acpi_desc->max_ars);
2929                 return 0;
2930         }
2931
2932         ars_status = devm_kzalloc(dev, acpi_desc->max_ars, GFP_KERNEL);
2933         if (!ars_status)
2934                 return -ENOMEM;
2935         acpi_desc->ars_status = ars_status;
2936         return 0;
2937 }
2938
2939 static int acpi_nfit_query_poison(struct acpi_nfit_desc *acpi_desc)
2940 {
2941         int rc;
2942
2943         if (ars_status_alloc(acpi_desc))
2944                 return -ENOMEM;
2945
2946         rc = ars_get_status(acpi_desc);
2947
2948         if (rc < 0 && rc != -ENOSPC)
2949                 return rc;
2950
2951         if (ars_status_process_records(acpi_desc))
2952                 dev_err(acpi_desc->dev, "Failed to process ARS records\n");
2953
2954         return rc;
2955 }
2956
2957 static int ars_register(struct acpi_nfit_desc *acpi_desc,
2958                 struct nfit_spa *nfit_spa)
2959 {
2960         int rc;
2961
2962         if (no_init_ars || test_bit(ARS_FAILED, &nfit_spa->ars_state))
2963                 return acpi_nfit_register_region(acpi_desc, nfit_spa);
2964
2965         set_bit(ARS_REQ_SHORT, &nfit_spa->ars_state);
2966         set_bit(ARS_REQ_LONG, &nfit_spa->ars_state);
2967
2968         switch (acpi_nfit_query_poison(acpi_desc)) {
2969         case 0:
2970         case -EAGAIN:
2971                 rc = ars_start(acpi_desc, nfit_spa, ARS_REQ_SHORT);
2972                 /* shouldn't happen, try again later */
2973                 if (rc == -EBUSY)
2974                         break;
2975                 if (rc) {
2976                         set_bit(ARS_FAILED, &nfit_spa->ars_state);
2977                         break;
2978                 }
2979                 clear_bit(ARS_REQ_SHORT, &nfit_spa->ars_state);
2980                 rc = acpi_nfit_query_poison(acpi_desc);
2981                 if (rc)
2982                         break;
2983                 acpi_desc->scrub_spa = nfit_spa;
2984                 ars_complete(acpi_desc, nfit_spa);
2985                 /*
2986                  * If ars_complete() says we didn't complete the
2987                  * short scrub, we'll try again with a long
2988                  * request.
2989                  */
2990                 acpi_desc->scrub_spa = NULL;
2991                 break;
2992         case -EBUSY:
2993         case -ENOMEM:
2994         case -ENOSPC:
2995                 /*
2996                  * BIOS was using ARS, wait for it to complete (or
2997                  * resources to become available) and then perform our
2998                  * own scrubs.
2999                  */
3000                 break;
3001         default:
3002                 set_bit(ARS_FAILED, &nfit_spa->ars_state);
3003                 break;
3004         }
3005
3006         return acpi_nfit_register_region(acpi_desc, nfit_spa);
3007 }
3008
3009 static void ars_complete_all(struct acpi_nfit_desc *acpi_desc)
3010 {
3011         struct nfit_spa *nfit_spa;
3012
3013         list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
3014                 if (test_bit(ARS_FAILED, &nfit_spa->ars_state))
3015                         continue;
3016                 ars_complete(acpi_desc, nfit_spa);
3017         }
3018 }
3019
3020 static unsigned int __acpi_nfit_scrub(struct acpi_nfit_desc *acpi_desc,
3021                 int query_rc)
3022 {
3023         unsigned int tmo = acpi_desc->scrub_tmo;
3024         struct device *dev = acpi_desc->dev;
3025         struct nfit_spa *nfit_spa;
3026
3027         lockdep_assert_held(&acpi_desc->init_mutex);
3028
3029         if (acpi_desc->cancel)
3030                 return 0;
3031
3032         if (query_rc == -EBUSY) {
3033                 dev_dbg(dev, "ARS: ARS busy\n");
3034                 return min(30U * 60U, tmo * 2);
3035         }
3036         if (query_rc == -ENOSPC) {
3037                 dev_dbg(dev, "ARS: ARS continue\n");
3038                 ars_continue(acpi_desc);
3039                 return 1;
3040         }
3041         if (query_rc && query_rc != -EAGAIN) {
3042                 unsigned long long addr, end;
3043
3044                 addr = acpi_desc->ars_status->address;
3045                 end = addr + acpi_desc->ars_status->length;
3046                 dev_dbg(dev, "ARS: %llx-%llx failed (%d)\n", addr, end,
3047                                 query_rc);
3048         }
3049
3050         ars_complete_all(acpi_desc);
3051         list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
3052                 enum nfit_ars_state req_type;
3053                 int rc;
3054
3055                 if (test_bit(ARS_FAILED, &nfit_spa->ars_state))
3056                         continue;
3057
3058                 /* prefer short ARS requests first */
3059                 if (test_bit(ARS_REQ_SHORT, &nfit_spa->ars_state))
3060                         req_type = ARS_REQ_SHORT;
3061                 else if (test_bit(ARS_REQ_LONG, &nfit_spa->ars_state))
3062                         req_type = ARS_REQ_LONG;
3063                 else
3064                         continue;
3065                 rc = ars_start(acpi_desc, nfit_spa, req_type);
3066
3067                 dev = nd_region_dev(nfit_spa->nd_region);
3068                 dev_dbg(dev, "ARS: range %d ARS start %s (%d)\n",
3069                                 nfit_spa->spa->range_index,
3070                                 req_type == ARS_REQ_SHORT ? "short" : "long",
3071                                 rc);
3072                 /*
3073                  * Hmm, we raced someone else starting ARS? Try again in
3074                  * a bit.
3075                  */
3076                 if (rc == -EBUSY)
3077                         return 1;
3078                 if (rc == 0) {
3079                         dev_WARN_ONCE(dev, acpi_desc->scrub_spa,
3080                                         "scrub start while range %d active\n",
3081                                         acpi_desc->scrub_spa->spa->range_index);
3082                         clear_bit(req_type, &nfit_spa->ars_state);
3083                         acpi_desc->scrub_spa = nfit_spa;
3084                         /*
3085                          * Consider this spa last for future scrub
3086                          * requests
3087                          */
3088                         list_move_tail(&nfit_spa->list, &acpi_desc->spas);
3089                         return 1;
3090                 }
3091
3092                 dev_err(dev, "ARS: range %d ARS failed (%d)\n",
3093                                 nfit_spa->spa->range_index, rc);
3094                 set_bit(ARS_FAILED, &nfit_spa->ars_state);
3095         }
3096         return 0;
3097 }
3098
3099 static void __sched_ars(struct acpi_nfit_desc *acpi_desc, unsigned int tmo)
3100 {
3101         lockdep_assert_held(&acpi_desc->init_mutex);
3102
3103         acpi_desc->scrub_busy = 1;
3104         /* note this should only be set from within the workqueue */
3105         if (tmo)
3106                 acpi_desc->scrub_tmo = tmo;
3107         queue_delayed_work(nfit_wq, &acpi_desc->dwork, tmo * HZ);
3108 }
3109
3110 static void sched_ars(struct acpi_nfit_desc *acpi_desc)
3111 {
3112         __sched_ars(acpi_desc, 0);
3113 }
3114
3115 static void notify_ars_done(struct acpi_nfit_desc *acpi_desc)
3116 {
3117         lockdep_assert_held(&acpi_desc->init_mutex);
3118
3119         acpi_desc->scrub_busy = 0;
3120         acpi_desc->scrub_count++;
3121         if (acpi_desc->scrub_count_state)
3122                 sysfs_notify_dirent(acpi_desc->scrub_count_state);
3123 }
3124
3125 static void acpi_nfit_scrub(struct work_struct *work)
3126 {
3127         struct acpi_nfit_desc *acpi_desc;
3128         unsigned int tmo;
3129         int query_rc;
3130
3131         acpi_desc = container_of(work, typeof(*acpi_desc), dwork.work);
3132         mutex_lock(&acpi_desc->init_mutex);
3133         query_rc = acpi_nfit_query_poison(acpi_desc);
3134         tmo = __acpi_nfit_scrub(acpi_desc, query_rc);
3135         if (tmo)
3136                 __sched_ars(acpi_desc, tmo);
3137         else
3138                 notify_ars_done(acpi_desc);
3139         memset(acpi_desc->ars_status, 0, acpi_desc->max_ars);
3140         mutex_unlock(&acpi_desc->init_mutex);
3141 }
3142
3143 static void acpi_nfit_init_ars(struct acpi_nfit_desc *acpi_desc,
3144                 struct nfit_spa *nfit_spa)
3145 {
3146         int type = nfit_spa_type(nfit_spa->spa);
3147         struct nd_cmd_ars_cap ars_cap;
3148         int rc;
3149
3150         set_bit(ARS_FAILED, &nfit_spa->ars_state);
3151         memset(&ars_cap, 0, sizeof(ars_cap));
3152         rc = ars_get_cap(acpi_desc, &ars_cap, nfit_spa);
3153         if (rc < 0)
3154                 return;
3155         /* check that the supported scrub types match the spa type */
3156         if (type == NFIT_SPA_VOLATILE && ((ars_cap.status >> 16)
3157                                 & ND_ARS_VOLATILE) == 0)
3158                 return;
3159         if (type == NFIT_SPA_PM && ((ars_cap.status >> 16)
3160                                 & ND_ARS_PERSISTENT) == 0)
3161                 return;
3162
3163         nfit_spa->max_ars = ars_cap.max_ars_out;
3164         nfit_spa->clear_err_unit = ars_cap.clear_err_unit;
3165         acpi_desc->max_ars = max(nfit_spa->max_ars, acpi_desc->max_ars);
3166         clear_bit(ARS_FAILED, &nfit_spa->ars_state);
3167 }
3168
3169 static int acpi_nfit_register_regions(struct acpi_nfit_desc *acpi_desc)
3170 {
3171         struct nfit_spa *nfit_spa;
3172         int rc;
3173
3174         list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
3175                 switch (nfit_spa_type(nfit_spa->spa)) {
3176                 case NFIT_SPA_VOLATILE:
3177                 case NFIT_SPA_PM:
3178                         acpi_nfit_init_ars(acpi_desc, nfit_spa);
3179                         break;
3180                 }
3181         }
3182
3183         list_for_each_entry(nfit_spa, &acpi_desc->spas, list)
3184                 switch (nfit_spa_type(nfit_spa->spa)) {
3185                 case NFIT_SPA_VOLATILE:
3186                 case NFIT_SPA_PM:
3187                         /* register regions and kick off initial ARS run */
3188                         rc = ars_register(acpi_desc, nfit_spa);
3189                         if (rc)
3190                                 return rc;
3191                         break;
3192                 case NFIT_SPA_BDW:
3193                         /* nothing to register */
3194                         break;
3195                 case NFIT_SPA_DCR:
3196                 case NFIT_SPA_VDISK:
3197                 case NFIT_SPA_VCD:
3198                 case NFIT_SPA_PDISK:
3199                 case NFIT_SPA_PCD:
3200                         /* register known regions that don't support ARS */
3201                         rc = acpi_nfit_register_region(acpi_desc, nfit_spa);
3202                         if (rc)
3203                                 return rc;
3204                         break;
3205                 default:
3206                         /* don't register unknown regions */
3207                         break;
3208                 }
3209
3210         sched_ars(acpi_desc);
3211         return 0;
3212 }
3213
3214 static int acpi_nfit_check_deletions(struct acpi_nfit_desc *acpi_desc,
3215                 struct nfit_table_prev *prev)
3216 {
3217         struct device *dev = acpi_desc->dev;
3218
3219         if (!list_empty(&prev->spas) ||
3220                         !list_empty(&prev->memdevs) ||
3221                         !list_empty(&prev->dcrs) ||
3222                         !list_empty(&prev->bdws) ||
3223                         !list_empty(&prev->idts) ||
3224                         !list_empty(&prev->flushes)) {
3225                 dev_err(dev, "new nfit deletes entries (unsupported)\n");
3226                 return -ENXIO;
3227         }
3228         return 0;
3229 }
3230
3231 static int acpi_nfit_desc_init_scrub_attr(struct acpi_nfit_desc *acpi_desc)
3232 {
3233         struct device *dev = acpi_desc->dev;
3234         struct kernfs_node *nfit;
3235         struct device *bus_dev;
3236
3237         if (!ars_supported(acpi_desc->nvdimm_bus))
3238                 return 0;
3239
3240         bus_dev = to_nvdimm_bus_dev(acpi_desc->nvdimm_bus);
3241         nfit = sysfs_get_dirent(bus_dev->kobj.sd, "nfit");
3242         if (!nfit) {
3243                 dev_err(dev, "sysfs_get_dirent 'nfit' failed\n");
3244                 return -ENODEV;
3245         }
3246         acpi_desc->scrub_count_state = sysfs_get_dirent(nfit, "scrub");
3247         sysfs_put(nfit);
3248         if (!acpi_desc->scrub_count_state) {
3249                 dev_err(dev, "sysfs_get_dirent 'scrub' failed\n");
3250                 return -ENODEV;
3251         }
3252
3253         return 0;
3254 }
3255
3256 static void acpi_nfit_unregister(void *data)
3257 {
3258         struct acpi_nfit_desc *acpi_desc = data;
3259
3260         nvdimm_bus_unregister(acpi_desc->nvdimm_bus);
3261 }
3262
3263 int acpi_nfit_init(struct acpi_nfit_desc *acpi_desc, void *data, acpi_size sz)
3264 {
3265         struct device *dev = acpi_desc->dev;
3266         struct nfit_table_prev prev;
3267         const void *end;
3268         int rc;
3269
3270         if (!acpi_desc->nvdimm_bus) {
3271                 acpi_nfit_init_dsms(acpi_desc);
3272
3273                 acpi_desc->nvdimm_bus = nvdimm_bus_register(dev,
3274                                 &acpi_desc->nd_desc);
3275                 if (!acpi_desc->nvdimm_bus)
3276                         return -ENOMEM;
3277
3278                 rc = devm_add_action_or_reset(dev, acpi_nfit_unregister,
3279                                 acpi_desc);
3280                 if (rc)
3281                         return rc;
3282
3283                 rc = acpi_nfit_desc_init_scrub_attr(acpi_desc);
3284                 if (rc)
3285                         return rc;
3286
3287                 /* register this acpi_desc for mce notifications */
3288                 mutex_lock(&acpi_desc_lock);
3289                 list_add_tail(&acpi_desc->list, &acpi_descs);
3290                 mutex_unlock(&acpi_desc_lock);
3291         }
3292
3293         mutex_lock(&acpi_desc->init_mutex);
3294
3295         INIT_LIST_HEAD(&prev.spas);
3296         INIT_LIST_HEAD(&prev.memdevs);
3297         INIT_LIST_HEAD(&prev.dcrs);
3298         INIT_LIST_HEAD(&prev.bdws);
3299         INIT_LIST_HEAD(&prev.idts);
3300         INIT_LIST_HEAD(&prev.flushes);
3301
3302         list_cut_position(&prev.spas, &acpi_desc->spas,
3303                                 acpi_desc->spas.prev);
3304         list_cut_position(&prev.memdevs, &acpi_desc->memdevs,
3305                                 acpi_desc->memdevs.prev);
3306         list_cut_position(&prev.dcrs, &acpi_desc->dcrs,
3307                                 acpi_desc->dcrs.prev);
3308         list_cut_position(&prev.bdws, &acpi_desc->bdws,
3309                                 acpi_desc->bdws.prev);
3310         list_cut_position(&prev.idts, &acpi_desc->idts,
3311                                 acpi_desc->idts.prev);
3312         list_cut_position(&prev.flushes, &acpi_desc->flushes,
3313                                 acpi_desc->flushes.prev);
3314
3315         end = data + sz;
3316         while (!IS_ERR_OR_NULL(data))
3317                 data = add_table(acpi_desc, &prev, data, end);
3318
3319         if (IS_ERR(data)) {
3320                 dev_dbg(dev, "nfit table parsing error: %ld\n", PTR_ERR(data));
3321                 rc = PTR_ERR(data);
3322                 goto out_unlock;
3323         }
3324
3325         rc = acpi_nfit_check_deletions(acpi_desc, &prev);
3326         if (rc)
3327                 goto out_unlock;
3328
3329         rc = nfit_mem_init(acpi_desc);
3330         if (rc)
3331                 goto out_unlock;
3332
3333         rc = acpi_nfit_register_dimms(acpi_desc);
3334         if (rc)
3335                 goto out_unlock;
3336
3337         rc = acpi_nfit_register_regions(acpi_desc);
3338
3339  out_unlock:
3340         mutex_unlock(&acpi_desc->init_mutex);
3341         return rc;
3342 }
3343 EXPORT_SYMBOL_GPL(acpi_nfit_init);
3344
3345 static int acpi_nfit_flush_probe(struct nvdimm_bus_descriptor *nd_desc)
3346 {
3347         struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc);
3348         struct device *dev = acpi_desc->dev;
3349
3350         /* Bounce the device lock to flush acpi_nfit_add / acpi_nfit_notify */
3351         device_lock(dev);
3352         device_unlock(dev);
3353
3354         /* Bounce the init_mutex to complete initial registration */
3355         mutex_lock(&acpi_desc->init_mutex);
3356         mutex_unlock(&acpi_desc->init_mutex);
3357
3358         return 0;
3359 }
3360
3361 static int __acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc,
3362                 struct nvdimm *nvdimm, unsigned int cmd)
3363 {
3364         struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc);
3365
3366         if (nvdimm)
3367                 return 0;
3368         if (cmd != ND_CMD_ARS_START)
3369                 return 0;
3370
3371         /*
3372          * The kernel and userspace may race to initiate a scrub, but
3373          * the scrub thread is prepared to lose that initial race.  It
3374          * just needs guarantees that any ARS it initiates are not
3375          * interrupted by any intervening start requests from userspace.
3376          */
3377         if (work_busy(&acpi_desc->dwork.work))
3378                 return -EBUSY;
3379
3380         return 0;
3381 }
3382
3383 /* prevent security commands from being issued via ioctl */
3384 static int acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc,
3385                 struct nvdimm *nvdimm, unsigned int cmd, void *buf)
3386 {
3387         struct nd_cmd_pkg *call_pkg = buf;
3388         unsigned int func;
3389
3390         if (nvdimm && cmd == ND_CMD_CALL &&
3391                         call_pkg->nd_family == NVDIMM_FAMILY_INTEL) {
3392                 func = call_pkg->nd_command;
3393                 if ((1 << func) & NVDIMM_INTEL_SECURITY_CMDMASK)
3394                         return -EOPNOTSUPP;
3395         }
3396
3397         return __acpi_nfit_clear_to_send(nd_desc, nvdimm, cmd);
3398 }
3399
3400 int acpi_nfit_ars_rescan(struct acpi_nfit_desc *acpi_desc,
3401                 enum nfit_ars_state req_type)
3402 {
3403         struct device *dev = acpi_desc->dev;
3404         int scheduled = 0, busy = 0;
3405         struct nfit_spa *nfit_spa;
3406
3407         mutex_lock(&acpi_desc->init_mutex);
3408         if (acpi_desc->cancel) {
3409                 mutex_unlock(&acpi_desc->init_mutex);
3410                 return 0;
3411         }
3412
3413         list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
3414                 int type = nfit_spa_type(nfit_spa->spa);
3415
3416                 if (type != NFIT_SPA_PM && type != NFIT_SPA_VOLATILE)
3417                         continue;
3418                 if (test_bit(ARS_FAILED, &nfit_spa->ars_state))
3419                         continue;
3420
3421                 if (test_and_set_bit(req_type, &nfit_spa->ars_state))
3422                         busy++;
3423                 else
3424                         scheduled++;
3425         }
3426         if (scheduled) {
3427                 sched_ars(acpi_desc);
3428                 dev_dbg(dev, "ars_scan triggered\n");
3429         }
3430         mutex_unlock(&acpi_desc->init_mutex);
3431
3432         if (scheduled)
3433                 return 0;
3434         if (busy)
3435                 return -EBUSY;
3436         return -ENOTTY;
3437 }
3438
3439 void acpi_nfit_desc_init(struct acpi_nfit_desc *acpi_desc, struct device *dev)
3440 {
3441         struct nvdimm_bus_descriptor *nd_desc;
3442
3443         dev_set_drvdata(dev, acpi_desc);
3444         acpi_desc->dev = dev;
3445         acpi_desc->blk_do_io = acpi_nfit_blk_region_do_io;
3446         nd_desc = &acpi_desc->nd_desc;
3447         nd_desc->provider_name = "ACPI.NFIT";
3448         nd_desc->module = THIS_MODULE;
3449         nd_desc->ndctl = acpi_nfit_ctl;
3450         nd_desc->flush_probe = acpi_nfit_flush_probe;
3451         nd_desc->clear_to_send = acpi_nfit_clear_to_send;
3452         nd_desc->attr_groups = acpi_nfit_attribute_groups;
3453
3454         INIT_LIST_HEAD(&acpi_desc->spas);
3455         INIT_LIST_HEAD(&acpi_desc->dcrs);
3456         INIT_LIST_HEAD(&acpi_desc->bdws);
3457         INIT_LIST_HEAD(&acpi_desc->idts);
3458         INIT_LIST_HEAD(&acpi_desc->flushes);
3459         INIT_LIST_HEAD(&acpi_desc->memdevs);
3460         INIT_LIST_HEAD(&acpi_desc->dimms);
3461         INIT_LIST_HEAD(&acpi_desc->list);
3462         mutex_init(&acpi_desc->init_mutex);
3463         acpi_desc->scrub_tmo = 1;
3464         INIT_DELAYED_WORK(&acpi_desc->dwork, acpi_nfit_scrub);
3465 }
3466 EXPORT_SYMBOL_GPL(acpi_nfit_desc_init);
3467
3468 static void acpi_nfit_put_table(void *table)
3469 {
3470         acpi_put_table(table);
3471 }
3472
3473 void acpi_nfit_shutdown(void *data)
3474 {
3475         struct acpi_nfit_desc *acpi_desc = data;
3476         struct device *bus_dev = to_nvdimm_bus_dev(acpi_desc->nvdimm_bus);
3477
3478         /*
3479          * Destruct under acpi_desc_lock so that nfit_handle_mce does not
3480          * race teardown
3481          */
3482         mutex_lock(&acpi_desc_lock);
3483         list_del(&acpi_desc->list);
3484         mutex_unlock(&acpi_desc_lock);
3485
3486         mutex_lock(&acpi_desc->init_mutex);
3487         acpi_desc->cancel = 1;
3488         cancel_delayed_work_sync(&acpi_desc->dwork);
3489         mutex_unlock(&acpi_desc->init_mutex);
3490
3491         /*
3492          * Bounce the nvdimm bus lock to make sure any in-flight
3493          * acpi_nfit_ars_rescan() submissions have had a chance to
3494          * either submit or see ->cancel set.
3495          */
3496         device_lock(bus_dev);
3497         device_unlock(bus_dev);
3498
3499         flush_workqueue(nfit_wq);
3500 }
3501 EXPORT_SYMBOL_GPL(acpi_nfit_shutdown);
3502
3503 static int acpi_nfit_add(struct acpi_device *adev)
3504 {
3505         struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
3506         struct acpi_nfit_desc *acpi_desc;
3507         struct device *dev = &adev->dev;
3508         struct acpi_table_header *tbl;
3509         acpi_status status = AE_OK;
3510         acpi_size sz;
3511         int rc = 0;
3512
3513         status = acpi_get_table(ACPI_SIG_NFIT, 0, &tbl);
3514         if (ACPI_FAILURE(status)) {
3515                 /* This is ok, we could have an nvdimm hotplugged later */
3516                 dev_dbg(dev, "failed to find NFIT at startup\n");
3517                 return 0;
3518         }
3519
3520         rc = devm_add_action_or_reset(dev, acpi_nfit_put_table, tbl);
3521         if (rc)
3522                 return rc;
3523         sz = tbl->length;
3524
3525         acpi_desc = devm_kzalloc(dev, sizeof(*acpi_desc), GFP_KERNEL);
3526         if (!acpi_desc)
3527                 return -ENOMEM;
3528         acpi_nfit_desc_init(acpi_desc, &adev->dev);
3529
3530         /* Save the acpi header for exporting the revision via sysfs */
3531         acpi_desc->acpi_header = *tbl;
3532
3533         /* Evaluate _FIT and override with that if present */
3534         status = acpi_evaluate_object(adev->handle, "_FIT", NULL, &buf);
3535         if (ACPI_SUCCESS(status) && buf.length > 0) {
3536                 union acpi_object *obj = buf.pointer;
3537
3538                 if (obj->type == ACPI_TYPE_BUFFER)
3539                         rc = acpi_nfit_init(acpi_desc, obj->buffer.pointer,
3540                                         obj->buffer.length);
3541                 else
3542                         dev_dbg(dev, "invalid type %d, ignoring _FIT\n",
3543                                 (int) obj->type);
3544                 kfree(buf.pointer);
3545         } else
3546                 /* skip over the lead-in header table */
3547                 rc = acpi_nfit_init(acpi_desc, (void *) tbl
3548                                 + sizeof(struct acpi_table_nfit),
3549                                 sz - sizeof(struct acpi_table_nfit));
3550
3551         if (rc)
3552                 return rc;
3553         return devm_add_action_or_reset(dev, acpi_nfit_shutdown, acpi_desc);
3554 }
3555
3556 static int acpi_nfit_remove(struct acpi_device *adev)
3557 {
3558         /* see acpi_nfit_unregister */
3559         return 0;
3560 }
3561
3562 static void acpi_nfit_update_notify(struct device *dev, acpi_handle handle)
3563 {
3564         struct acpi_nfit_desc *acpi_desc = dev_get_drvdata(dev);
3565         struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
3566         union acpi_object *obj;
3567         acpi_status status;
3568         int ret;
3569
3570         if (!dev->driver) {
3571                 /* dev->driver may be null if we're being removed */
3572                 dev_dbg(dev, "no driver found for dev\n");
3573                 return;
3574         }
3575
3576         if (!acpi_desc) {
3577                 acpi_desc = devm_kzalloc(dev, sizeof(*acpi_desc), GFP_KERNEL);
3578                 if (!acpi_desc)
3579                         return;
3580                 acpi_nfit_desc_init(acpi_desc, dev);
3581         } else {
3582                 /*
3583                  * Finish previous registration before considering new
3584                  * regions.
3585                  */
3586                 flush_workqueue(nfit_wq);
3587         }
3588
3589         /* Evaluate _FIT */
3590         status = acpi_evaluate_object(handle, "_FIT", NULL, &buf);
3591         if (ACPI_FAILURE(status)) {
3592                 dev_err(dev, "failed to evaluate _FIT\n");
3593                 return;
3594         }
3595
3596         obj = buf.pointer;
3597         if (obj->type == ACPI_TYPE_BUFFER) {
3598                 ret = acpi_nfit_init(acpi_desc, obj->buffer.pointer,
3599                                 obj->buffer.length);
3600                 if (ret)
3601                         dev_err(dev, "failed to merge updated NFIT\n");
3602         } else
3603                 dev_err(dev, "Invalid _FIT\n");
3604         kfree(buf.pointer);
3605 }
3606
3607 static void acpi_nfit_uc_error_notify(struct device *dev, acpi_handle handle)
3608 {
3609         struct acpi_nfit_desc *acpi_desc = dev_get_drvdata(dev);
3610
3611         if (acpi_desc->scrub_mode == HW_ERROR_SCRUB_ON)
3612                 acpi_nfit_ars_rescan(acpi_desc, ARS_REQ_LONG);
3613         else
3614                 acpi_nfit_ars_rescan(acpi_desc, ARS_REQ_SHORT);
3615 }
3616
3617 void __acpi_nfit_notify(struct device *dev, acpi_handle handle, u32 event)
3618 {
3619         dev_dbg(dev, "event: 0x%x\n", event);
3620
3621         switch (event) {
3622         case NFIT_NOTIFY_UPDATE:
3623                 return acpi_nfit_update_notify(dev, handle);
3624         case NFIT_NOTIFY_UC_MEMORY_ERROR:
3625                 return acpi_nfit_uc_error_notify(dev, handle);
3626         default:
3627                 return;
3628         }
3629 }
3630 EXPORT_SYMBOL_GPL(__acpi_nfit_notify);
3631
3632 static void acpi_nfit_notify(struct acpi_device *adev, u32 event)
3633 {
3634         device_lock(&adev->dev);
3635         __acpi_nfit_notify(&adev->dev, adev->handle, event);
3636         device_unlock(&adev->dev);
3637 }
3638
3639 static const struct acpi_device_id acpi_nfit_ids[] = {
3640         { "ACPI0012", 0 },
3641         { "", 0 },
3642 };
3643 MODULE_DEVICE_TABLE(acpi, acpi_nfit_ids);
3644
3645 static struct acpi_driver acpi_nfit_driver = {
3646         .name = KBUILD_MODNAME,
3647         .ids = acpi_nfit_ids,
3648         .ops = {
3649                 .add = acpi_nfit_add,
3650                 .remove = acpi_nfit_remove,
3651                 .notify = acpi_nfit_notify,
3652         },
3653 };
3654
3655 static __init int nfit_init(void)
3656 {
3657         int ret;
3658
3659         BUILD_BUG_ON(sizeof(struct acpi_table_nfit) != 40);
3660         BUILD_BUG_ON(sizeof(struct acpi_nfit_system_address) != 56);
3661         BUILD_BUG_ON(sizeof(struct acpi_nfit_memory_map) != 48);
3662         BUILD_BUG_ON(sizeof(struct acpi_nfit_interleave) != 20);
3663         BUILD_BUG_ON(sizeof(struct acpi_nfit_smbios) != 9);
3664         BUILD_BUG_ON(sizeof(struct acpi_nfit_control_region) != 80);
3665         BUILD_BUG_ON(sizeof(struct acpi_nfit_data_region) != 40);
3666         BUILD_BUG_ON(sizeof(struct acpi_nfit_capabilities) != 16);
3667
3668         guid_parse(UUID_VOLATILE_MEMORY, &nfit_uuid[NFIT_SPA_VOLATILE]);
3669         guid_parse(UUID_PERSISTENT_MEMORY, &nfit_uuid[NFIT_SPA_PM]);
3670         guid_parse(UUID_CONTROL_REGION, &nfit_uuid[NFIT_SPA_DCR]);
3671         guid_parse(UUID_DATA_REGION, &nfit_uuid[NFIT_SPA_BDW]);
3672         guid_parse(UUID_VOLATILE_VIRTUAL_DISK, &nfit_uuid[NFIT_SPA_VDISK]);
3673         guid_parse(UUID_VOLATILE_VIRTUAL_CD, &nfit_uuid[NFIT_SPA_VCD]);
3674         guid_parse(UUID_PERSISTENT_VIRTUAL_DISK, &nfit_uuid[NFIT_SPA_PDISK]);
3675         guid_parse(UUID_PERSISTENT_VIRTUAL_CD, &nfit_uuid[NFIT_SPA_PCD]);
3676         guid_parse(UUID_NFIT_BUS, &nfit_uuid[NFIT_DEV_BUS]);
3677         guid_parse(UUID_NFIT_DIMM, &nfit_uuid[NFIT_DEV_DIMM]);
3678         guid_parse(UUID_NFIT_DIMM_N_HPE1, &nfit_uuid[NFIT_DEV_DIMM_N_HPE1]);
3679         guid_parse(UUID_NFIT_DIMM_N_HPE2, &nfit_uuid[NFIT_DEV_DIMM_N_HPE2]);
3680         guid_parse(UUID_NFIT_DIMM_N_MSFT, &nfit_uuid[NFIT_DEV_DIMM_N_MSFT]);
3681
3682         nfit_wq = create_singlethread_workqueue("nfit");
3683         if (!nfit_wq)
3684                 return -ENOMEM;
3685
3686         nfit_mce_register();
3687         ret = acpi_bus_register_driver(&acpi_nfit_driver);
3688         if (ret) {
3689                 nfit_mce_unregister();
3690                 destroy_workqueue(nfit_wq);
3691         }
3692
3693         return ret;
3694
3695 }
3696
3697 static __exit void nfit_exit(void)
3698 {
3699         nfit_mce_unregister();
3700         acpi_bus_unregister_driver(&acpi_nfit_driver);
3701         destroy_workqueue(nfit_wq);
3702         WARN_ON(!list_empty(&acpi_descs));
3703 }
3704
3705 module_init(nfit_init);
3706 module_exit(nfit_exit);
3707 MODULE_LICENSE("GPL v2");
3708 MODULE_AUTHOR("Intel Corporation");