HMAT: Register attributes for memory hot add
[linux-2.6-microblaze.git] / drivers / acpi / hmat / hmat.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2019, Intel Corporation.
4  *
5  * Heterogeneous Memory Attributes Table (HMAT) representation
6  *
7  * This program parses and reports the platform's HMAT tables, and registers
8  * the applicable attributes with the node's interfaces.
9  */
10
11 #include <linux/acpi.h>
12 #include <linux/bitops.h>
13 #include <linux/device.h>
14 #include <linux/init.h>
15 #include <linux/list.h>
16 #include <linux/list_sort.h>
17 #include <linux/memory.h>
18 #include <linux/mutex.h>
19 #include <linux/node.h>
20 #include <linux/sysfs.h>
21
22 static u8 hmat_revision;
23
24 static LIST_HEAD(targets);
25 static LIST_HEAD(initiators);
26 static LIST_HEAD(localities);
27
28 static DEFINE_MUTEX(target_lock);
29
30 /*
31  * The defined enum order is used to prioritize attributes to break ties when
32  * selecting the best performing node.
33  */
34 enum locality_types {
35         WRITE_LATENCY,
36         READ_LATENCY,
37         WRITE_BANDWIDTH,
38         READ_BANDWIDTH,
39 };
40
41 static struct memory_locality *localities_types[4];
42
43 struct target_cache {
44         struct list_head node;
45         struct node_cache_attrs cache_attrs;
46 };
47
48 struct memory_target {
49         struct list_head node;
50         unsigned int memory_pxm;
51         unsigned int processor_pxm;
52         struct node_hmem_attrs hmem_attrs;
53         struct list_head caches;
54         struct node_cache_attrs cache_attrs;
55         bool registered;
56 };
57
58 struct memory_initiator {
59         struct list_head node;
60         unsigned int processor_pxm;
61 };
62
63 struct memory_locality {
64         struct list_head node;
65         struct acpi_hmat_locality *hmat_loc;
66 };
67
68 static struct memory_initiator *find_mem_initiator(unsigned int cpu_pxm)
69 {
70         struct memory_initiator *initiator;
71
72         list_for_each_entry(initiator, &initiators, node)
73                 if (initiator->processor_pxm == cpu_pxm)
74                         return initiator;
75         return NULL;
76 }
77
78 static struct memory_target *find_mem_target(unsigned int mem_pxm)
79 {
80         struct memory_target *target;
81
82         list_for_each_entry(target, &targets, node)
83                 if (target->memory_pxm == mem_pxm)
84                         return target;
85         return NULL;
86 }
87
88 static __init void alloc_memory_initiator(unsigned int cpu_pxm)
89 {
90         struct memory_initiator *initiator;
91
92         if (pxm_to_node(cpu_pxm) == NUMA_NO_NODE)
93                 return;
94
95         initiator = find_mem_initiator(cpu_pxm);
96         if (initiator)
97                 return;
98
99         initiator = kzalloc(sizeof(*initiator), GFP_KERNEL);
100         if (!initiator)
101                 return;
102
103         initiator->processor_pxm = cpu_pxm;
104         list_add_tail(&initiator->node, &initiators);
105 }
106
107 static __init void alloc_memory_target(unsigned int mem_pxm)
108 {
109         struct memory_target *target;
110
111         if (pxm_to_node(mem_pxm) == NUMA_NO_NODE)
112                 return;
113
114         target = find_mem_target(mem_pxm);
115         if (target)
116                 return;
117
118         target = kzalloc(sizeof(*target), GFP_KERNEL);
119         if (!target)
120                 return;
121
122         target->memory_pxm = mem_pxm;
123         target->processor_pxm = PXM_INVAL;
124         list_add_tail(&target->node, &targets);
125         INIT_LIST_HEAD(&target->caches);
126 }
127
128 static __init const char *hmat_data_type(u8 type)
129 {
130         switch (type) {
131         case ACPI_HMAT_ACCESS_LATENCY:
132                 return "Access Latency";
133         case ACPI_HMAT_READ_LATENCY:
134                 return "Read Latency";
135         case ACPI_HMAT_WRITE_LATENCY:
136                 return "Write Latency";
137         case ACPI_HMAT_ACCESS_BANDWIDTH:
138                 return "Access Bandwidth";
139         case ACPI_HMAT_READ_BANDWIDTH:
140                 return "Read Bandwidth";
141         case ACPI_HMAT_WRITE_BANDWIDTH:
142                 return "Write Bandwidth";
143         default:
144                 return "Reserved";
145         }
146 }
147
148 static __init const char *hmat_data_type_suffix(u8 type)
149 {
150         switch (type) {
151         case ACPI_HMAT_ACCESS_LATENCY:
152         case ACPI_HMAT_READ_LATENCY:
153         case ACPI_HMAT_WRITE_LATENCY:
154                 return " nsec";
155         case ACPI_HMAT_ACCESS_BANDWIDTH:
156         case ACPI_HMAT_READ_BANDWIDTH:
157         case ACPI_HMAT_WRITE_BANDWIDTH:
158                 return " MB/s";
159         default:
160                 return "";
161         }
162 }
163
164 static u32 hmat_normalize(u16 entry, u64 base, u8 type)
165 {
166         u32 value;
167
168         /*
169          * Check for invalid and overflow values
170          */
171         if (entry == 0xffff || !entry)
172                 return 0;
173         else if (base > (UINT_MAX / (entry)))
174                 return 0;
175
176         /*
177          * Divide by the base unit for version 1, convert latency from
178          * picosenonds to nanoseconds if revision 2.
179          */
180         value = entry * base;
181         if (hmat_revision == 1) {
182                 if (value < 10)
183                         return 0;
184                 value = DIV_ROUND_UP(value, 10);
185         } else if (hmat_revision == 2) {
186                 switch (type) {
187                 case ACPI_HMAT_ACCESS_LATENCY:
188                 case ACPI_HMAT_READ_LATENCY:
189                 case ACPI_HMAT_WRITE_LATENCY:
190                         value = DIV_ROUND_UP(value, 1000);
191                         break;
192                 default:
193                         break;
194                 }
195         }
196         return value;
197 }
198
199 static void hmat_update_target_access(struct memory_target *target,
200                                              u8 type, u32 value)
201 {
202         switch (type) {
203         case ACPI_HMAT_ACCESS_LATENCY:
204                 target->hmem_attrs.read_latency = value;
205                 target->hmem_attrs.write_latency = value;
206                 break;
207         case ACPI_HMAT_READ_LATENCY:
208                 target->hmem_attrs.read_latency = value;
209                 break;
210         case ACPI_HMAT_WRITE_LATENCY:
211                 target->hmem_attrs.write_latency = value;
212                 break;
213         case ACPI_HMAT_ACCESS_BANDWIDTH:
214                 target->hmem_attrs.read_bandwidth = value;
215                 target->hmem_attrs.write_bandwidth = value;
216                 break;
217         case ACPI_HMAT_READ_BANDWIDTH:
218                 target->hmem_attrs.read_bandwidth = value;
219                 break;
220         case ACPI_HMAT_WRITE_BANDWIDTH:
221                 target->hmem_attrs.write_bandwidth = value;
222                 break;
223         default:
224                 break;
225         }
226 }
227
228 static __init void hmat_add_locality(struct acpi_hmat_locality *hmat_loc)
229 {
230         struct memory_locality *loc;
231
232         loc = kzalloc(sizeof(*loc), GFP_KERNEL);
233         if (!loc) {
234                 pr_notice_once("Failed to allocate HMAT locality\n");
235                 return;
236         }
237
238         loc->hmat_loc = hmat_loc;
239         list_add_tail(&loc->node, &localities);
240
241         switch (hmat_loc->data_type) {
242         case ACPI_HMAT_ACCESS_LATENCY:
243                 localities_types[READ_LATENCY] = loc;
244                 localities_types[WRITE_LATENCY] = loc;
245                 break;
246         case ACPI_HMAT_READ_LATENCY:
247                 localities_types[READ_LATENCY] = loc;
248                 break;
249         case ACPI_HMAT_WRITE_LATENCY:
250                 localities_types[WRITE_LATENCY] = loc;
251                 break;
252         case ACPI_HMAT_ACCESS_BANDWIDTH:
253                 localities_types[READ_BANDWIDTH] = loc;
254                 localities_types[WRITE_BANDWIDTH] = loc;
255                 break;
256         case ACPI_HMAT_READ_BANDWIDTH:
257                 localities_types[READ_BANDWIDTH] = loc;
258                 break;
259         case ACPI_HMAT_WRITE_BANDWIDTH:
260                 localities_types[WRITE_BANDWIDTH] = loc;
261                 break;
262         default:
263                 break;
264         }
265 }
266
267 static __init int hmat_parse_locality(union acpi_subtable_headers *header,
268                                       const unsigned long end)
269 {
270         struct acpi_hmat_locality *hmat_loc = (void *)header;
271         struct memory_target *target;
272         unsigned int init, targ, total_size, ipds, tpds;
273         u32 *inits, *targs, value;
274         u16 *entries;
275         u8 type, mem_hier;
276
277         if (hmat_loc->header.length < sizeof(*hmat_loc)) {
278                 pr_notice("HMAT: Unexpected locality header length: %d\n",
279                          hmat_loc->header.length);
280                 return -EINVAL;
281         }
282
283         type = hmat_loc->data_type;
284         mem_hier = hmat_loc->flags & ACPI_HMAT_MEMORY_HIERARCHY;
285         ipds = hmat_loc->number_of_initiator_Pds;
286         tpds = hmat_loc->number_of_target_Pds;
287         total_size = sizeof(*hmat_loc) + sizeof(*entries) * ipds * tpds +
288                      sizeof(*inits) * ipds + sizeof(*targs) * tpds;
289         if (hmat_loc->header.length < total_size) {
290                 pr_notice("HMAT: Unexpected locality header length:%d, minimum required:%d\n",
291                          hmat_loc->header.length, total_size);
292                 return -EINVAL;
293         }
294
295         pr_info("HMAT: Locality: Flags:%02x Type:%s Initiator Domains:%d Target Domains:%d Base:%lld\n",
296                 hmat_loc->flags, hmat_data_type(type), ipds, tpds,
297                 hmat_loc->entry_base_unit);
298
299         inits = (u32 *)(hmat_loc + 1);
300         targs = inits + ipds;
301         entries = (u16 *)(targs + tpds);
302         for (init = 0; init < ipds; init++) {
303                 alloc_memory_initiator(inits[init]);
304                 for (targ = 0; targ < tpds; targ++) {
305                         value = hmat_normalize(entries[init * tpds + targ],
306                                                hmat_loc->entry_base_unit,
307                                                type);
308                         pr_info("  Initiator-Target[%d-%d]:%d%s\n",
309                                 inits[init], targs[targ], value,
310                                 hmat_data_type_suffix(type));
311
312                         if (mem_hier == ACPI_HMAT_MEMORY) {
313                                 target = find_mem_target(targs[targ]);
314                                 if (target && target->processor_pxm == inits[init])
315                                         hmat_update_target_access(target, type, value);
316                         }
317                 }
318         }
319
320         if (mem_hier == ACPI_HMAT_MEMORY)
321                 hmat_add_locality(hmat_loc);
322
323         return 0;
324 }
325
326 static __init int hmat_parse_cache(union acpi_subtable_headers *header,
327                                    const unsigned long end)
328 {
329         struct acpi_hmat_cache *cache = (void *)header;
330         struct memory_target *target;
331         struct target_cache *tcache;
332         u32 attrs;
333
334         if (cache->header.length < sizeof(*cache)) {
335                 pr_notice("HMAT: Unexpected cache header length: %d\n",
336                          cache->header.length);
337                 return -EINVAL;
338         }
339
340         attrs = cache->cache_attributes;
341         pr_info("HMAT: Cache: Domain:%d Size:%llu Attrs:%08x SMBIOS Handles:%d\n",
342                 cache->memory_PD, cache->cache_size, attrs,
343                 cache->number_of_SMBIOShandles);
344
345         target = find_mem_target(cache->memory_PD);
346         if (!target)
347                 return 0;
348
349         tcache = kzalloc(sizeof(*tcache), GFP_KERNEL);
350         if (!tcache) {
351                 pr_notice_once("Failed to allocate HMAT cache info\n");
352                 return 0;
353         }
354
355         tcache->cache_attrs.size = cache->cache_size;
356         tcache->cache_attrs.level = (attrs & ACPI_HMAT_CACHE_LEVEL) >> 4;
357         tcache->cache_attrs.line_size = (attrs & ACPI_HMAT_CACHE_LINE_SIZE) >> 16;
358
359         switch ((attrs & ACPI_HMAT_CACHE_ASSOCIATIVITY) >> 8) {
360         case ACPI_HMAT_CA_DIRECT_MAPPED:
361                 tcache->cache_attrs.indexing = NODE_CACHE_DIRECT_MAP;
362                 break;
363         case ACPI_HMAT_CA_COMPLEX_CACHE_INDEXING:
364                 tcache->cache_attrs.indexing = NODE_CACHE_INDEXED;
365                 break;
366         case ACPI_HMAT_CA_NONE:
367         default:
368                 tcache->cache_attrs.indexing = NODE_CACHE_OTHER;
369                 break;
370         }
371
372         switch ((attrs & ACPI_HMAT_WRITE_POLICY) >> 12) {
373         case ACPI_HMAT_CP_WB:
374                 tcache->cache_attrs.write_policy = NODE_CACHE_WRITE_BACK;
375                 break;
376         case ACPI_HMAT_CP_WT:
377                 tcache->cache_attrs.write_policy = NODE_CACHE_WRITE_THROUGH;
378                 break;
379         case ACPI_HMAT_CP_NONE:
380         default:
381                 tcache->cache_attrs.write_policy = NODE_CACHE_WRITE_OTHER;
382                 break;
383         }
384         list_add_tail(&tcache->node, &target->caches);
385
386         return 0;
387 }
388
389 static int __init hmat_parse_proximity_domain(union acpi_subtable_headers *header,
390                                               const unsigned long end)
391 {
392         struct acpi_hmat_proximity_domain *p = (void *)header;
393         struct memory_target *target = NULL;
394
395         if (p->header.length != sizeof(*p)) {
396                 pr_notice("HMAT: Unexpected address range header length: %d\n",
397                          p->header.length);
398                 return -EINVAL;
399         }
400
401         if (hmat_revision == 1)
402                 pr_info("HMAT: Memory (%#llx length %#llx) Flags:%04x Processor Domain:%d Memory Domain:%d\n",
403                         p->reserved3, p->reserved4, p->flags, p->processor_PD,
404                         p->memory_PD);
405         else
406                 pr_info("HMAT: Memory Flags:%04x Processor Domain:%d Memory Domain:%d\n",
407                         p->flags, p->processor_PD, p->memory_PD);
408
409         if (p->flags & ACPI_HMAT_MEMORY_PD_VALID) {
410                 target = find_mem_target(p->memory_PD);
411                 if (!target) {
412                         pr_debug("HMAT: Memory Domain missing from SRAT\n");
413                         return -EINVAL;
414                 }
415         }
416         if (target && p->flags & ACPI_HMAT_PROCESSOR_PD_VALID) {
417                 int p_node = pxm_to_node(p->processor_PD);
418
419                 if (p_node == NUMA_NO_NODE) {
420                         pr_debug("HMAT: Invalid Processor Domain\n");
421                         return -EINVAL;
422                 }
423                 target->processor_pxm = p_node;
424         }
425
426         return 0;
427 }
428
429 static int __init hmat_parse_subtable(union acpi_subtable_headers *header,
430                                       const unsigned long end)
431 {
432         struct acpi_hmat_structure *hdr = (void *)header;
433
434         if (!hdr)
435                 return -EINVAL;
436
437         switch (hdr->type) {
438         case ACPI_HMAT_TYPE_PROXIMITY:
439                 return hmat_parse_proximity_domain(header, end);
440         case ACPI_HMAT_TYPE_LOCALITY:
441                 return hmat_parse_locality(header, end);
442         case ACPI_HMAT_TYPE_CACHE:
443                 return hmat_parse_cache(header, end);
444         default:
445                 return -EINVAL;
446         }
447 }
448
449 static __init int srat_parse_mem_affinity(union acpi_subtable_headers *header,
450                                           const unsigned long end)
451 {
452         struct acpi_srat_mem_affinity *ma = (void *)header;
453
454         if (!ma)
455                 return -EINVAL;
456         if (!(ma->flags & ACPI_SRAT_MEM_ENABLED))
457                 return 0;
458         alloc_memory_target(ma->proximity_domain);
459         return 0;
460 }
461
462 static u32 hmat_initiator_perf(struct memory_target *target,
463                                struct memory_initiator *initiator,
464                                struct acpi_hmat_locality *hmat_loc)
465 {
466         unsigned int ipds, tpds, i, idx = 0, tdx = 0;
467         u32 *inits, *targs;
468         u16 *entries;
469
470         ipds = hmat_loc->number_of_initiator_Pds;
471         tpds = hmat_loc->number_of_target_Pds;
472         inits = (u32 *)(hmat_loc + 1);
473         targs = inits + ipds;
474         entries = (u16 *)(targs + tpds);
475
476         for (i = 0; i < ipds; i++) {
477                 if (inits[i] == initiator->processor_pxm) {
478                         idx = i;
479                         break;
480                 }
481         }
482
483         if (i == ipds)
484                 return 0;
485
486         for (i = 0; i < tpds; i++) {
487                 if (targs[i] == target->memory_pxm) {
488                         tdx = i;
489                         break;
490                 }
491         }
492         if (i == tpds)
493                 return 0;
494
495         return hmat_normalize(entries[idx * tpds + tdx],
496                               hmat_loc->entry_base_unit,
497                               hmat_loc->data_type);
498 }
499
500 static bool hmat_update_best(u8 type, u32 value, u32 *best)
501 {
502         bool updated = false;
503
504         if (!value)
505                 return false;
506
507         switch (type) {
508         case ACPI_HMAT_ACCESS_LATENCY:
509         case ACPI_HMAT_READ_LATENCY:
510         case ACPI_HMAT_WRITE_LATENCY:
511                 if (!*best || *best > value) {
512                         *best = value;
513                         updated = true;
514                 }
515                 break;
516         case ACPI_HMAT_ACCESS_BANDWIDTH:
517         case ACPI_HMAT_READ_BANDWIDTH:
518         case ACPI_HMAT_WRITE_BANDWIDTH:
519                 if (!*best || *best < value) {
520                         *best = value;
521                         updated = true;
522                 }
523                 break;
524         }
525
526         return updated;
527 }
528
529 static int initiator_cmp(void *priv, struct list_head *a, struct list_head *b)
530 {
531         struct memory_initiator *ia;
532         struct memory_initiator *ib;
533         unsigned long *p_nodes = priv;
534
535         ia = list_entry(a, struct memory_initiator, node);
536         ib = list_entry(b, struct memory_initiator, node);
537
538         set_bit(ia->processor_pxm, p_nodes);
539         set_bit(ib->processor_pxm, p_nodes);
540
541         return ia->processor_pxm - ib->processor_pxm;
542 }
543
544 static void hmat_register_target_initiators(struct memory_target *target)
545 {
546         static DECLARE_BITMAP(p_nodes, MAX_NUMNODES);
547         struct memory_initiator *initiator;
548         unsigned int mem_nid, cpu_nid;
549         struct memory_locality *loc = NULL;
550         u32 best = 0;
551         int i;
552
553         mem_nid = pxm_to_node(target->memory_pxm);
554         /*
555          * If the Address Range Structure provides a local processor pxm, link
556          * only that one. Otherwise, find the best performance attributes and
557          * register all initiators that match.
558          */
559         if (target->processor_pxm != PXM_INVAL) {
560                 cpu_nid = pxm_to_node(target->processor_pxm);
561                 register_memory_node_under_compute_node(mem_nid, cpu_nid, 0);
562                 return;
563         }
564
565         if (list_empty(&localities))
566                 return;
567
568         /*
569          * We need the initiator list sorted so we can use bitmap_clear for
570          * previously set initiators when we find a better memory accessor.
571          * We'll also use the sorting to prime the candidate nodes with known
572          * initiators.
573          */
574         bitmap_zero(p_nodes, MAX_NUMNODES);
575         list_sort(p_nodes, &initiators, initiator_cmp);
576         for (i = WRITE_LATENCY; i <= READ_BANDWIDTH; i++) {
577                 loc = localities_types[i];
578                 if (!loc)
579                         continue;
580
581                 best = 0;
582                 list_for_each_entry(initiator, &initiators, node) {
583                         u32 value;
584
585                         if (!test_bit(initiator->processor_pxm, p_nodes))
586                                 continue;
587
588                         value = hmat_initiator_perf(target, initiator, loc->hmat_loc);
589                         if (hmat_update_best(loc->hmat_loc->data_type, value, &best))
590                                 bitmap_clear(p_nodes, 0, initiator->processor_pxm);
591                         if (value != best)
592                                 clear_bit(initiator->processor_pxm, p_nodes);
593                 }
594                 if (best)
595                         hmat_update_target_access(target, loc->hmat_loc->data_type, best);
596         }
597
598         for_each_set_bit(i, p_nodes, MAX_NUMNODES) {
599                 cpu_nid = pxm_to_node(i);
600                 register_memory_node_under_compute_node(mem_nid, cpu_nid, 0);
601         }
602 }
603
604 static void hmat_register_target_cache(struct memory_target *target)
605 {
606         unsigned mem_nid = pxm_to_node(target->memory_pxm);
607         struct target_cache *tcache;
608
609         list_for_each_entry(tcache, &target->caches, node)
610                 node_add_cache(mem_nid, &tcache->cache_attrs);
611 }
612
613 static void hmat_register_target_perf(struct memory_target *target)
614 {
615         unsigned mem_nid = pxm_to_node(target->memory_pxm);
616         node_set_perf_attrs(mem_nid, &target->hmem_attrs, 0);
617 }
618
619 static void hmat_register_target(struct memory_target *target)
620 {
621         if (!node_online(pxm_to_node(target->memory_pxm)))
622                 return;
623
624         mutex_lock(&target_lock);
625         if (!target->registered) {
626                 hmat_register_target_initiators(target);
627                 hmat_register_target_cache(target);
628                 hmat_register_target_perf(target);
629                 target->registered = true;
630         }
631         mutex_unlock(&target_lock);
632 }
633
634 static void hmat_register_targets(void)
635 {
636         struct memory_target *target;
637
638         list_for_each_entry(target, &targets, node)
639                 hmat_register_target(target);
640 }
641
642 static int hmat_callback(struct notifier_block *self,
643                          unsigned long action, void *arg)
644 {
645         struct memory_target *target;
646         struct memory_notify *mnb = arg;
647         int pxm, nid = mnb->status_change_nid;
648
649         if (nid == NUMA_NO_NODE || action != MEM_ONLINE)
650                 return NOTIFY_OK;
651
652         pxm = node_to_pxm(nid);
653         target = find_mem_target(pxm);
654         if (!target)
655                 return NOTIFY_OK;
656
657         hmat_register_target(target);
658         return NOTIFY_OK;
659 }
660
661 static struct notifier_block hmat_callback_nb = {
662         .notifier_call = hmat_callback,
663         .priority = 2,
664 };
665
666 static __init void hmat_free_structures(void)
667 {
668         struct memory_target *target, *tnext;
669         struct memory_locality *loc, *lnext;
670         struct memory_initiator *initiator, *inext;
671         struct target_cache *tcache, *cnext;
672
673         list_for_each_entry_safe(target, tnext, &targets, node) {
674                 list_for_each_entry_safe(tcache, cnext, &target->caches, node) {
675                         list_del(&tcache->node);
676                         kfree(tcache);
677                 }
678                 list_del(&target->node);
679                 kfree(target);
680         }
681
682         list_for_each_entry_safe(initiator, inext, &initiators, node) {
683                 list_del(&initiator->node);
684                 kfree(initiator);
685         }
686
687         list_for_each_entry_safe(loc, lnext, &localities, node) {
688                 list_del(&loc->node);
689                 kfree(loc);
690         }
691 }
692
693 static __init int hmat_init(void)
694 {
695         struct acpi_table_header *tbl;
696         enum acpi_hmat_type i;
697         acpi_status status;
698
699         if (srat_disabled())
700                 return 0;
701
702         status = acpi_get_table(ACPI_SIG_SRAT, 0, &tbl);
703         if (ACPI_FAILURE(status))
704                 return 0;
705
706         if (acpi_table_parse_entries(ACPI_SIG_SRAT,
707                                 sizeof(struct acpi_table_srat),
708                                 ACPI_SRAT_TYPE_MEMORY_AFFINITY,
709                                 srat_parse_mem_affinity, 0) < 0)
710                 goto out_put;
711         acpi_put_table(tbl);
712
713         status = acpi_get_table(ACPI_SIG_HMAT, 0, &tbl);
714         if (ACPI_FAILURE(status))
715                 goto out_put;
716
717         hmat_revision = tbl->revision;
718         switch (hmat_revision) {
719         case 1:
720         case 2:
721                 break;
722         default:
723                 pr_notice("Ignoring HMAT: Unknown revision:%d\n", hmat_revision);
724                 goto out_put;
725         }
726
727         for (i = ACPI_HMAT_TYPE_PROXIMITY; i < ACPI_HMAT_TYPE_RESERVED; i++) {
728                 if (acpi_table_parse_entries(ACPI_SIG_HMAT,
729                                              sizeof(struct acpi_table_hmat), i,
730                                              hmat_parse_subtable, 0) < 0) {
731                         pr_notice("Ignoring HMAT: Invalid table");
732                         goto out_put;
733                 }
734         }
735         hmat_register_targets();
736
737         /* Keep the table and structures if the notifier may use them */
738         if (!register_hotmemory_notifier(&hmat_callback_nb))
739                 return 0;
740 out_put:
741         hmat_free_structures();
742         acpi_put_table(tbl);
743         return 0;
744 }
745 subsys_initcall(hmat_init);