1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2019, Intel Corporation.
5 * Heterogeneous Memory Attributes Table (HMAT) representation
7 * This program parses and reports the platform's HMAT tables, and registers
8 * the applicable attributes with the node's interfaces.
11 #include <linux/acpi.h>
12 #include <linux/bitops.h>
13 #include <linux/device.h>
14 #include <linux/init.h>
15 #include <linux/list.h>
16 #include <linux/list_sort.h>
17 #include <linux/memory.h>
18 #include <linux/mutex.h>
19 #include <linux/node.h>
20 #include <linux/sysfs.h>
22 static u8 hmat_revision;
24 static LIST_HEAD(targets);
25 static LIST_HEAD(initiators);
26 static LIST_HEAD(localities);
28 static DEFINE_MUTEX(target_lock);
31 * The defined enum order is used to prioritize attributes to break ties when
32 * selecting the best performing node.
41 static struct memory_locality *localities_types[4];
44 struct list_head node;
45 struct node_cache_attrs cache_attrs;
48 struct memory_target {
49 struct list_head node;
50 unsigned int memory_pxm;
51 unsigned int processor_pxm;
52 struct node_hmem_attrs hmem_attrs;
53 struct list_head caches;
54 struct node_cache_attrs cache_attrs;
58 struct memory_initiator {
59 struct list_head node;
60 unsigned int processor_pxm;
63 struct memory_locality {
64 struct list_head node;
65 struct acpi_hmat_locality *hmat_loc;
68 static struct memory_initiator *find_mem_initiator(unsigned int cpu_pxm)
70 struct memory_initiator *initiator;
72 list_for_each_entry(initiator, &initiators, node)
73 if (initiator->processor_pxm == cpu_pxm)
78 static struct memory_target *find_mem_target(unsigned int mem_pxm)
80 struct memory_target *target;
82 list_for_each_entry(target, &targets, node)
83 if (target->memory_pxm == mem_pxm)
88 static __init void alloc_memory_initiator(unsigned int cpu_pxm)
90 struct memory_initiator *initiator;
92 if (pxm_to_node(cpu_pxm) == NUMA_NO_NODE)
95 initiator = find_mem_initiator(cpu_pxm);
99 initiator = kzalloc(sizeof(*initiator), GFP_KERNEL);
103 initiator->processor_pxm = cpu_pxm;
104 list_add_tail(&initiator->node, &initiators);
107 static __init void alloc_memory_target(unsigned int mem_pxm)
109 struct memory_target *target;
111 if (pxm_to_node(mem_pxm) == NUMA_NO_NODE)
114 target = find_mem_target(mem_pxm);
118 target = kzalloc(sizeof(*target), GFP_KERNEL);
122 target->memory_pxm = mem_pxm;
123 target->processor_pxm = PXM_INVAL;
124 list_add_tail(&target->node, &targets);
125 INIT_LIST_HEAD(&target->caches);
128 static __init const char *hmat_data_type(u8 type)
131 case ACPI_HMAT_ACCESS_LATENCY:
132 return "Access Latency";
133 case ACPI_HMAT_READ_LATENCY:
134 return "Read Latency";
135 case ACPI_HMAT_WRITE_LATENCY:
136 return "Write Latency";
137 case ACPI_HMAT_ACCESS_BANDWIDTH:
138 return "Access Bandwidth";
139 case ACPI_HMAT_READ_BANDWIDTH:
140 return "Read Bandwidth";
141 case ACPI_HMAT_WRITE_BANDWIDTH:
142 return "Write Bandwidth";
148 static __init const char *hmat_data_type_suffix(u8 type)
151 case ACPI_HMAT_ACCESS_LATENCY:
152 case ACPI_HMAT_READ_LATENCY:
153 case ACPI_HMAT_WRITE_LATENCY:
155 case ACPI_HMAT_ACCESS_BANDWIDTH:
156 case ACPI_HMAT_READ_BANDWIDTH:
157 case ACPI_HMAT_WRITE_BANDWIDTH:
164 static u32 hmat_normalize(u16 entry, u64 base, u8 type)
169 * Check for invalid and overflow values
171 if (entry == 0xffff || !entry)
173 else if (base > (UINT_MAX / (entry)))
177 * Divide by the base unit for version 1, convert latency from
178 * picosenonds to nanoseconds if revision 2.
180 value = entry * base;
181 if (hmat_revision == 1) {
184 value = DIV_ROUND_UP(value, 10);
185 } else if (hmat_revision == 2) {
187 case ACPI_HMAT_ACCESS_LATENCY:
188 case ACPI_HMAT_READ_LATENCY:
189 case ACPI_HMAT_WRITE_LATENCY:
190 value = DIV_ROUND_UP(value, 1000);
199 static void hmat_update_target_access(struct memory_target *target,
203 case ACPI_HMAT_ACCESS_LATENCY:
204 target->hmem_attrs.read_latency = value;
205 target->hmem_attrs.write_latency = value;
207 case ACPI_HMAT_READ_LATENCY:
208 target->hmem_attrs.read_latency = value;
210 case ACPI_HMAT_WRITE_LATENCY:
211 target->hmem_attrs.write_latency = value;
213 case ACPI_HMAT_ACCESS_BANDWIDTH:
214 target->hmem_attrs.read_bandwidth = value;
215 target->hmem_attrs.write_bandwidth = value;
217 case ACPI_HMAT_READ_BANDWIDTH:
218 target->hmem_attrs.read_bandwidth = value;
220 case ACPI_HMAT_WRITE_BANDWIDTH:
221 target->hmem_attrs.write_bandwidth = value;
228 static __init void hmat_add_locality(struct acpi_hmat_locality *hmat_loc)
230 struct memory_locality *loc;
232 loc = kzalloc(sizeof(*loc), GFP_KERNEL);
234 pr_notice_once("Failed to allocate HMAT locality\n");
238 loc->hmat_loc = hmat_loc;
239 list_add_tail(&loc->node, &localities);
241 switch (hmat_loc->data_type) {
242 case ACPI_HMAT_ACCESS_LATENCY:
243 localities_types[READ_LATENCY] = loc;
244 localities_types[WRITE_LATENCY] = loc;
246 case ACPI_HMAT_READ_LATENCY:
247 localities_types[READ_LATENCY] = loc;
249 case ACPI_HMAT_WRITE_LATENCY:
250 localities_types[WRITE_LATENCY] = loc;
252 case ACPI_HMAT_ACCESS_BANDWIDTH:
253 localities_types[READ_BANDWIDTH] = loc;
254 localities_types[WRITE_BANDWIDTH] = loc;
256 case ACPI_HMAT_READ_BANDWIDTH:
257 localities_types[READ_BANDWIDTH] = loc;
259 case ACPI_HMAT_WRITE_BANDWIDTH:
260 localities_types[WRITE_BANDWIDTH] = loc;
267 static __init int hmat_parse_locality(union acpi_subtable_headers *header,
268 const unsigned long end)
270 struct acpi_hmat_locality *hmat_loc = (void *)header;
271 struct memory_target *target;
272 unsigned int init, targ, total_size, ipds, tpds;
273 u32 *inits, *targs, value;
277 if (hmat_loc->header.length < sizeof(*hmat_loc)) {
278 pr_notice("HMAT: Unexpected locality header length: %d\n",
279 hmat_loc->header.length);
283 type = hmat_loc->data_type;
284 mem_hier = hmat_loc->flags & ACPI_HMAT_MEMORY_HIERARCHY;
285 ipds = hmat_loc->number_of_initiator_Pds;
286 tpds = hmat_loc->number_of_target_Pds;
287 total_size = sizeof(*hmat_loc) + sizeof(*entries) * ipds * tpds +
288 sizeof(*inits) * ipds + sizeof(*targs) * tpds;
289 if (hmat_loc->header.length < total_size) {
290 pr_notice("HMAT: Unexpected locality header length:%d, minimum required:%d\n",
291 hmat_loc->header.length, total_size);
295 pr_info("HMAT: Locality: Flags:%02x Type:%s Initiator Domains:%d Target Domains:%d Base:%lld\n",
296 hmat_loc->flags, hmat_data_type(type), ipds, tpds,
297 hmat_loc->entry_base_unit);
299 inits = (u32 *)(hmat_loc + 1);
300 targs = inits + ipds;
301 entries = (u16 *)(targs + tpds);
302 for (init = 0; init < ipds; init++) {
303 alloc_memory_initiator(inits[init]);
304 for (targ = 0; targ < tpds; targ++) {
305 value = hmat_normalize(entries[init * tpds + targ],
306 hmat_loc->entry_base_unit,
308 pr_info(" Initiator-Target[%d-%d]:%d%s\n",
309 inits[init], targs[targ], value,
310 hmat_data_type_suffix(type));
312 if (mem_hier == ACPI_HMAT_MEMORY) {
313 target = find_mem_target(targs[targ]);
314 if (target && target->processor_pxm == inits[init])
315 hmat_update_target_access(target, type, value);
320 if (mem_hier == ACPI_HMAT_MEMORY)
321 hmat_add_locality(hmat_loc);
326 static __init int hmat_parse_cache(union acpi_subtable_headers *header,
327 const unsigned long end)
329 struct acpi_hmat_cache *cache = (void *)header;
330 struct memory_target *target;
331 struct target_cache *tcache;
334 if (cache->header.length < sizeof(*cache)) {
335 pr_notice("HMAT: Unexpected cache header length: %d\n",
336 cache->header.length);
340 attrs = cache->cache_attributes;
341 pr_info("HMAT: Cache: Domain:%d Size:%llu Attrs:%08x SMBIOS Handles:%d\n",
342 cache->memory_PD, cache->cache_size, attrs,
343 cache->number_of_SMBIOShandles);
345 target = find_mem_target(cache->memory_PD);
349 tcache = kzalloc(sizeof(*tcache), GFP_KERNEL);
351 pr_notice_once("Failed to allocate HMAT cache info\n");
355 tcache->cache_attrs.size = cache->cache_size;
356 tcache->cache_attrs.level = (attrs & ACPI_HMAT_CACHE_LEVEL) >> 4;
357 tcache->cache_attrs.line_size = (attrs & ACPI_HMAT_CACHE_LINE_SIZE) >> 16;
359 switch ((attrs & ACPI_HMAT_CACHE_ASSOCIATIVITY) >> 8) {
360 case ACPI_HMAT_CA_DIRECT_MAPPED:
361 tcache->cache_attrs.indexing = NODE_CACHE_DIRECT_MAP;
363 case ACPI_HMAT_CA_COMPLEX_CACHE_INDEXING:
364 tcache->cache_attrs.indexing = NODE_CACHE_INDEXED;
366 case ACPI_HMAT_CA_NONE:
368 tcache->cache_attrs.indexing = NODE_CACHE_OTHER;
372 switch ((attrs & ACPI_HMAT_WRITE_POLICY) >> 12) {
373 case ACPI_HMAT_CP_WB:
374 tcache->cache_attrs.write_policy = NODE_CACHE_WRITE_BACK;
376 case ACPI_HMAT_CP_WT:
377 tcache->cache_attrs.write_policy = NODE_CACHE_WRITE_THROUGH;
379 case ACPI_HMAT_CP_NONE:
381 tcache->cache_attrs.write_policy = NODE_CACHE_WRITE_OTHER;
384 list_add_tail(&tcache->node, &target->caches);
389 static int __init hmat_parse_proximity_domain(union acpi_subtable_headers *header,
390 const unsigned long end)
392 struct acpi_hmat_proximity_domain *p = (void *)header;
393 struct memory_target *target = NULL;
395 if (p->header.length != sizeof(*p)) {
396 pr_notice("HMAT: Unexpected address range header length: %d\n",
401 if (hmat_revision == 1)
402 pr_info("HMAT: Memory (%#llx length %#llx) Flags:%04x Processor Domain:%d Memory Domain:%d\n",
403 p->reserved3, p->reserved4, p->flags, p->processor_PD,
406 pr_info("HMAT: Memory Flags:%04x Processor Domain:%d Memory Domain:%d\n",
407 p->flags, p->processor_PD, p->memory_PD);
409 if (p->flags & ACPI_HMAT_MEMORY_PD_VALID) {
410 target = find_mem_target(p->memory_PD);
412 pr_debug("HMAT: Memory Domain missing from SRAT\n");
416 if (target && p->flags & ACPI_HMAT_PROCESSOR_PD_VALID) {
417 int p_node = pxm_to_node(p->processor_PD);
419 if (p_node == NUMA_NO_NODE) {
420 pr_debug("HMAT: Invalid Processor Domain\n");
423 target->processor_pxm = p_node;
429 static int __init hmat_parse_subtable(union acpi_subtable_headers *header,
430 const unsigned long end)
432 struct acpi_hmat_structure *hdr = (void *)header;
438 case ACPI_HMAT_TYPE_PROXIMITY:
439 return hmat_parse_proximity_domain(header, end);
440 case ACPI_HMAT_TYPE_LOCALITY:
441 return hmat_parse_locality(header, end);
442 case ACPI_HMAT_TYPE_CACHE:
443 return hmat_parse_cache(header, end);
449 static __init int srat_parse_mem_affinity(union acpi_subtable_headers *header,
450 const unsigned long end)
452 struct acpi_srat_mem_affinity *ma = (void *)header;
456 if (!(ma->flags & ACPI_SRAT_MEM_ENABLED))
458 alloc_memory_target(ma->proximity_domain);
462 static u32 hmat_initiator_perf(struct memory_target *target,
463 struct memory_initiator *initiator,
464 struct acpi_hmat_locality *hmat_loc)
466 unsigned int ipds, tpds, i, idx = 0, tdx = 0;
470 ipds = hmat_loc->number_of_initiator_Pds;
471 tpds = hmat_loc->number_of_target_Pds;
472 inits = (u32 *)(hmat_loc + 1);
473 targs = inits + ipds;
474 entries = (u16 *)(targs + tpds);
476 for (i = 0; i < ipds; i++) {
477 if (inits[i] == initiator->processor_pxm) {
486 for (i = 0; i < tpds; i++) {
487 if (targs[i] == target->memory_pxm) {
495 return hmat_normalize(entries[idx * tpds + tdx],
496 hmat_loc->entry_base_unit,
497 hmat_loc->data_type);
500 static bool hmat_update_best(u8 type, u32 value, u32 *best)
502 bool updated = false;
508 case ACPI_HMAT_ACCESS_LATENCY:
509 case ACPI_HMAT_READ_LATENCY:
510 case ACPI_HMAT_WRITE_LATENCY:
511 if (!*best || *best > value) {
516 case ACPI_HMAT_ACCESS_BANDWIDTH:
517 case ACPI_HMAT_READ_BANDWIDTH:
518 case ACPI_HMAT_WRITE_BANDWIDTH:
519 if (!*best || *best < value) {
529 static int initiator_cmp(void *priv, struct list_head *a, struct list_head *b)
531 struct memory_initiator *ia;
532 struct memory_initiator *ib;
533 unsigned long *p_nodes = priv;
535 ia = list_entry(a, struct memory_initiator, node);
536 ib = list_entry(b, struct memory_initiator, node);
538 set_bit(ia->processor_pxm, p_nodes);
539 set_bit(ib->processor_pxm, p_nodes);
541 return ia->processor_pxm - ib->processor_pxm;
544 static void hmat_register_target_initiators(struct memory_target *target)
546 static DECLARE_BITMAP(p_nodes, MAX_NUMNODES);
547 struct memory_initiator *initiator;
548 unsigned int mem_nid, cpu_nid;
549 struct memory_locality *loc = NULL;
553 mem_nid = pxm_to_node(target->memory_pxm);
555 * If the Address Range Structure provides a local processor pxm, link
556 * only that one. Otherwise, find the best performance attributes and
557 * register all initiators that match.
559 if (target->processor_pxm != PXM_INVAL) {
560 cpu_nid = pxm_to_node(target->processor_pxm);
561 register_memory_node_under_compute_node(mem_nid, cpu_nid, 0);
565 if (list_empty(&localities))
569 * We need the initiator list sorted so we can use bitmap_clear for
570 * previously set initiators when we find a better memory accessor.
571 * We'll also use the sorting to prime the candidate nodes with known
574 bitmap_zero(p_nodes, MAX_NUMNODES);
575 list_sort(p_nodes, &initiators, initiator_cmp);
576 for (i = WRITE_LATENCY; i <= READ_BANDWIDTH; i++) {
577 loc = localities_types[i];
582 list_for_each_entry(initiator, &initiators, node) {
585 if (!test_bit(initiator->processor_pxm, p_nodes))
588 value = hmat_initiator_perf(target, initiator, loc->hmat_loc);
589 if (hmat_update_best(loc->hmat_loc->data_type, value, &best))
590 bitmap_clear(p_nodes, 0, initiator->processor_pxm);
592 clear_bit(initiator->processor_pxm, p_nodes);
595 hmat_update_target_access(target, loc->hmat_loc->data_type, best);
598 for_each_set_bit(i, p_nodes, MAX_NUMNODES) {
599 cpu_nid = pxm_to_node(i);
600 register_memory_node_under_compute_node(mem_nid, cpu_nid, 0);
604 static void hmat_register_target_cache(struct memory_target *target)
606 unsigned mem_nid = pxm_to_node(target->memory_pxm);
607 struct target_cache *tcache;
609 list_for_each_entry(tcache, &target->caches, node)
610 node_add_cache(mem_nid, &tcache->cache_attrs);
613 static void hmat_register_target_perf(struct memory_target *target)
615 unsigned mem_nid = pxm_to_node(target->memory_pxm);
616 node_set_perf_attrs(mem_nid, &target->hmem_attrs, 0);
619 static void hmat_register_target(struct memory_target *target)
621 if (!node_online(pxm_to_node(target->memory_pxm)))
624 mutex_lock(&target_lock);
625 if (!target->registered) {
626 hmat_register_target_initiators(target);
627 hmat_register_target_cache(target);
628 hmat_register_target_perf(target);
629 target->registered = true;
631 mutex_unlock(&target_lock);
634 static void hmat_register_targets(void)
636 struct memory_target *target;
638 list_for_each_entry(target, &targets, node)
639 hmat_register_target(target);
642 static int hmat_callback(struct notifier_block *self,
643 unsigned long action, void *arg)
645 struct memory_target *target;
646 struct memory_notify *mnb = arg;
647 int pxm, nid = mnb->status_change_nid;
649 if (nid == NUMA_NO_NODE || action != MEM_ONLINE)
652 pxm = node_to_pxm(nid);
653 target = find_mem_target(pxm);
657 hmat_register_target(target);
661 static struct notifier_block hmat_callback_nb = {
662 .notifier_call = hmat_callback,
666 static __init void hmat_free_structures(void)
668 struct memory_target *target, *tnext;
669 struct memory_locality *loc, *lnext;
670 struct memory_initiator *initiator, *inext;
671 struct target_cache *tcache, *cnext;
673 list_for_each_entry_safe(target, tnext, &targets, node) {
674 list_for_each_entry_safe(tcache, cnext, &target->caches, node) {
675 list_del(&tcache->node);
678 list_del(&target->node);
682 list_for_each_entry_safe(initiator, inext, &initiators, node) {
683 list_del(&initiator->node);
687 list_for_each_entry_safe(loc, lnext, &localities, node) {
688 list_del(&loc->node);
693 static __init int hmat_init(void)
695 struct acpi_table_header *tbl;
696 enum acpi_hmat_type i;
702 status = acpi_get_table(ACPI_SIG_SRAT, 0, &tbl);
703 if (ACPI_FAILURE(status))
706 if (acpi_table_parse_entries(ACPI_SIG_SRAT,
707 sizeof(struct acpi_table_srat),
708 ACPI_SRAT_TYPE_MEMORY_AFFINITY,
709 srat_parse_mem_affinity, 0) < 0)
713 status = acpi_get_table(ACPI_SIG_HMAT, 0, &tbl);
714 if (ACPI_FAILURE(status))
717 hmat_revision = tbl->revision;
718 switch (hmat_revision) {
723 pr_notice("Ignoring HMAT: Unknown revision:%d\n", hmat_revision);
727 for (i = ACPI_HMAT_TYPE_PROXIMITY; i < ACPI_HMAT_TYPE_RESERVED; i++) {
728 if (acpi_table_parse_entries(ACPI_SIG_HMAT,
729 sizeof(struct acpi_table_hmat), i,
730 hmat_parse_subtable, 0) < 0) {
731 pr_notice("Ignoring HMAT: Invalid table");
735 hmat_register_targets();
737 /* Keep the table and structures if the notifier may use them */
738 if (!register_hotmemory_notifier(&hmat_callback_nb))
741 hmat_free_structures();
745 subsys_initcall(hmat_init);