1 // SPDX-License-Identifier: GPL-2.0-or-later
5 * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM
7 #define pr_fmt(fmt) "numa: " fmt
9 #include <linux/threads.h>
10 #include <linux/memblock.h>
11 #include <linux/init.h>
13 #include <linux/mmzone.h>
14 #include <linux/export.h>
15 #include <linux/nodemask.h>
16 #include <linux/cpu.h>
17 #include <linux/notifier.h>
19 #include <linux/pfn.h>
20 #include <linux/cpuset.h>
21 #include <linux/node.h>
22 #include <linux/stop_machine.h>
23 #include <linux/proc_fs.h>
24 #include <linux/seq_file.h>
25 #include <linux/uaccess.h>
26 #include <linux/slab.h>
27 #include <asm/cputhreads.h>
28 #include <asm/sparsemem.h>
31 #include <asm/topology.h>
32 #include <asm/firmware.h>
34 #include <asm/hvcall.h>
35 #include <asm/setup.h>
37 #include <asm/drmem.h>
39 static int numa_enabled = 1;
41 static char *cmdline __initdata;
43 static int numa_debug;
44 #define dbg(args...) if (numa_debug) { printk(KERN_INFO args); }
46 int numa_cpu_lookup_table[NR_CPUS];
47 cpumask_var_t node_to_cpumask_map[MAX_NUMNODES];
48 struct pglist_data *node_data[MAX_NUMNODES];
50 EXPORT_SYMBOL(numa_cpu_lookup_table);
51 EXPORT_SYMBOL(node_to_cpumask_map);
52 EXPORT_SYMBOL(node_data);
54 static int min_common_depth;
55 static int n_mem_addr_cells, n_mem_size_cells;
56 static int form1_affinity;
58 #define MAX_DISTANCE_REF_POINTS 4
59 static int distance_ref_points_depth;
60 static const __be32 *distance_ref_points;
61 static int distance_lookup_table[MAX_NUMNODES][MAX_DISTANCE_REF_POINTS];
64 * Allocate node_to_cpumask_map based on number of available nodes
65 * Requires node_possible_map to be valid.
67 * Note: cpumask_of_node() is not valid until after this is done.
69 static void __init setup_node_to_cpumask_map(void)
73 /* setup nr_node_ids if not done yet */
74 if (nr_node_ids == MAX_NUMNODES)
77 /* allocate the map */
79 alloc_bootmem_cpumask_var(&node_to_cpumask_map[node]);
81 /* cpumask_of_node() will now work */
82 dbg("Node to cpumask map for %u nodes\n", nr_node_ids);
85 static int __init fake_numa_create_new_node(unsigned long end_pfn,
88 unsigned long long mem;
90 static unsigned int fake_nid;
91 static unsigned long long curr_boundary;
94 * Modify node id, iff we started creating NUMA nodes
95 * We want to continue from where we left of the last time
100 * In case there are no more arguments to parse, the
101 * node_id should be the same as the last fake node id
102 * (we've handled this above).
107 mem = memparse(p, &p);
111 if (mem < curr_boundary)
116 if ((end_pfn << PAGE_SHIFT) > mem) {
118 * Skip commas and spaces
120 while (*p == ',' || *p == ' ' || *p == '\t')
126 dbg("created new fake_node with id %d\n", fake_nid);
132 static void reset_numa_cpu_lookup_table(void)
136 for_each_possible_cpu(cpu)
137 numa_cpu_lookup_table[cpu] = -1;
140 static void map_cpu_to_node(int cpu, int node)
142 update_numa_cpu_lookup_table(cpu, node);
144 dbg("adding cpu %d to node %d\n", cpu, node);
146 if (!(cpumask_test_cpu(cpu, node_to_cpumask_map[node])))
147 cpumask_set_cpu(cpu, node_to_cpumask_map[node]);
150 #if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_PPC_SPLPAR)
151 static void unmap_cpu_from_node(unsigned long cpu)
153 int node = numa_cpu_lookup_table[cpu];
155 dbg("removing cpu %lu from node %d\n", cpu, node);
157 if (cpumask_test_cpu(cpu, node_to_cpumask_map[node])) {
158 cpumask_clear_cpu(cpu, node_to_cpumask_map[node]);
160 printk(KERN_ERR "WARNING: cpu %lu not found in node %d\n",
164 #endif /* CONFIG_HOTPLUG_CPU || CONFIG_PPC_SPLPAR */
166 int cpu_distance(__be32 *cpu1_assoc, __be32 *cpu2_assoc)
172 for (i = 0; i < distance_ref_points_depth; i++) {
173 index = be32_to_cpu(distance_ref_points[i]);
174 if (cpu1_assoc[index] == cpu2_assoc[index])
182 /* must hold reference to node during call */
183 static const __be32 *of_get_associativity(struct device_node *dev)
185 return of_get_property(dev, "ibm,associativity", NULL);
188 int __node_distance(int a, int b)
191 int distance = LOCAL_DISTANCE;
194 return ((a == b) ? LOCAL_DISTANCE : REMOTE_DISTANCE);
196 for (i = 0; i < distance_ref_points_depth; i++) {
197 if (distance_lookup_table[a][i] == distance_lookup_table[b][i])
200 /* Double the distance for each NUMA level */
206 EXPORT_SYMBOL(__node_distance);
208 static void initialize_distance_lookup_table(int nid,
209 const __be32 *associativity)
216 for (i = 0; i < distance_ref_points_depth; i++) {
219 entry = &associativity[be32_to_cpu(distance_ref_points[i]) - 1];
220 distance_lookup_table[nid][i] = of_read_number(entry, 1);
224 /* Returns nid in the range [0..MAX_NUMNODES-1], or -1 if no useful numa
227 static int associativity_to_nid(const __be32 *associativity)
229 int nid = NUMA_NO_NODE;
234 if (of_read_number(associativity, 1) >= min_common_depth)
235 nid = of_read_number(&associativity[min_common_depth], 1);
237 /* POWER4 LPAR uses 0xffff as invalid node */
238 if (nid == 0xffff || nid >= MAX_NUMNODES)
242 of_read_number(associativity, 1) >= distance_ref_points_depth) {
244 * Skip the length field and send start of associativity array
246 initialize_distance_lookup_table(nid, associativity + 1);
253 /* Returns the nid associated with the given device tree node,
254 * or -1 if not found.
256 static int of_node_to_nid_single(struct device_node *device)
258 int nid = NUMA_NO_NODE;
261 tmp = of_get_associativity(device);
263 nid = associativity_to_nid(tmp);
267 /* Walk the device tree upwards, looking for an associativity id */
268 int of_node_to_nid(struct device_node *device)
270 int nid = NUMA_NO_NODE;
274 nid = of_node_to_nid_single(device);
278 device = of_get_next_parent(device);
284 EXPORT_SYMBOL(of_node_to_nid);
286 static int __init find_min_common_depth(void)
289 struct device_node *root;
291 if (firmware_has_feature(FW_FEATURE_OPAL))
292 root = of_find_node_by_path("/ibm,opal");
294 root = of_find_node_by_path("/rtas");
296 root = of_find_node_by_path("/");
299 * This property is a set of 32-bit integers, each representing
300 * an index into the ibm,associativity nodes.
302 * With form 0 affinity the first integer is for an SMP configuration
303 * (should be all 0's) and the second is for a normal NUMA
304 * configuration. We have only one level of NUMA.
306 * With form 1 affinity the first integer is the most significant
307 * NUMA boundary and the following are progressively less significant
308 * boundaries. There can be more than one level of NUMA.
310 distance_ref_points = of_get_property(root,
311 "ibm,associativity-reference-points",
312 &distance_ref_points_depth);
314 if (!distance_ref_points) {
315 dbg("NUMA: ibm,associativity-reference-points not found.\n");
319 distance_ref_points_depth /= sizeof(int);
321 if (firmware_has_feature(FW_FEATURE_OPAL) ||
322 firmware_has_feature(FW_FEATURE_TYPE1_AFFINITY)) {
323 dbg("Using form 1 affinity\n");
327 if (form1_affinity) {
328 depth = of_read_number(distance_ref_points, 1);
330 if (distance_ref_points_depth < 2) {
331 printk(KERN_WARNING "NUMA: "
332 "short ibm,associativity-reference-points\n");
336 depth = of_read_number(&distance_ref_points[1], 1);
340 * Warn and cap if the hardware supports more than
341 * MAX_DISTANCE_REF_POINTS domains.
343 if (distance_ref_points_depth > MAX_DISTANCE_REF_POINTS) {
344 printk(KERN_WARNING "NUMA: distance array capped at "
345 "%d entries\n", MAX_DISTANCE_REF_POINTS);
346 distance_ref_points_depth = MAX_DISTANCE_REF_POINTS;
357 static void __init get_n_mem_cells(int *n_addr_cells, int *n_size_cells)
359 struct device_node *memory = NULL;
361 memory = of_find_node_by_type(memory, "memory");
363 panic("numa.c: No memory nodes found!");
365 *n_addr_cells = of_n_addr_cells(memory);
366 *n_size_cells = of_n_size_cells(memory);
370 static unsigned long read_n_cells(int n, const __be32 **buf)
372 unsigned long result = 0;
375 result = (result << 32) | of_read_number(*buf, 1);
381 struct assoc_arrays {
384 const __be32 *arrays;
388 * Retrieve and validate the list of associativity arrays for drconf
389 * memory from the ibm,associativity-lookup-arrays property of the
392 * The layout of the ibm,associativity-lookup-arrays property is a number N
393 * indicating the number of associativity arrays, followed by a number M
394 * indicating the size of each associativity array, followed by a list
395 * of N associativity arrays.
397 static int of_get_assoc_arrays(struct assoc_arrays *aa)
399 struct device_node *memory;
403 memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
407 prop = of_get_property(memory, "ibm,associativity-lookup-arrays", &len);
408 if (!prop || len < 2 * sizeof(unsigned int)) {
413 aa->n_arrays = of_read_number(prop++, 1);
414 aa->array_sz = of_read_number(prop++, 1);
418 /* Now that we know the number of arrays and size of each array,
419 * revalidate the size of the property read in.
421 if (len < (aa->n_arrays * aa->array_sz + 2) * sizeof(unsigned int))
429 * This is like of_node_to_nid_single() for memory represented in the
430 * ibm,dynamic-reconfiguration-memory node.
432 static int of_drconf_to_nid_single(struct drmem_lmb *lmb)
434 struct assoc_arrays aa = { .arrays = NULL };
435 int default_nid = NUMA_NO_NODE;
436 int nid = default_nid;
439 if ((min_common_depth < 0) || !numa_enabled)
442 rc = of_get_assoc_arrays(&aa);
446 if (min_common_depth <= aa.array_sz &&
447 !(lmb->flags & DRCONF_MEM_AI_INVALID) && lmb->aa_index < aa.n_arrays) {
448 index = lmb->aa_index * aa.array_sz + min_common_depth - 1;
449 nid = of_read_number(&aa.arrays[index], 1);
451 if (nid == 0xffff || nid >= MAX_NUMNODES)
455 index = lmb->aa_index * aa.array_sz;
456 initialize_distance_lookup_table(nid,
464 #ifdef CONFIG_PPC_SPLPAR
465 static int vphn_get_nid(long lcpu)
467 __be32 associativity[VPHN_ASSOC_BUFSIZE] = {0};
471 * On a shared lpar, device tree will not have node associativity.
472 * At this time lppaca, or its __old_status field may not be
473 * updated. Hence kernel cannot detect if its on a shared lpar. So
474 * request an explicit associativity irrespective of whether the
475 * lpar is shared or dedicated. Use the device tree property as a
476 * fallback. cpu_to_phys_id is only valid between
477 * smp_setup_cpu_maps() and smp_setup_pacas().
479 if (firmware_has_feature(FW_FEATURE_VPHN)) {
481 hwid = cpu_to_phys_id[lcpu];
483 hwid = get_hard_smp_processor_id(lcpu);
485 rc = hcall_vphn(hwid, VPHN_FLAG_VCPU, associativity);
487 return associativity_to_nid(associativity);
493 static int vphn_get_nid(long unused)
497 #endif /* CONFIG_PPC_SPLPAR */
500 * Figure out to which domain a cpu belongs and stick it there.
501 * Return the id of the domain used.
503 static int numa_setup_cpu(unsigned long lcpu)
505 struct device_node *cpu;
506 int fcpu = cpu_first_thread_sibling(lcpu);
507 int nid = NUMA_NO_NODE;
510 * If a valid cpu-to-node mapping is already available, use it
511 * directly instead of querying the firmware, since it represents
512 * the most recent mapping notified to us by the platform (eg: VPHN).
513 * Since cpu_to_node binding remains the same for all threads in the
514 * core. If a valid cpu-to-node mapping is already available, for
515 * the first thread in the core, use it.
517 nid = numa_cpu_lookup_table[fcpu];
519 map_cpu_to_node(lcpu, nid);
523 nid = vphn_get_nid(lcpu);
524 if (nid != NUMA_NO_NODE)
527 cpu = of_get_cpu_node(lcpu, NULL);
531 if (cpu_present(lcpu))
537 nid = of_node_to_nid_single(cpu);
541 if (nid < 0 || !node_possible(nid))
542 nid = first_online_node;
545 * Update for the first thread of the core. All threads of a core
546 * have to be part of the same node. This not only avoids querying
547 * for every other thread in the core, but always avoids a case
548 * where virtual node associativity change causes subsequent threads
549 * of a core to be associated with different nid. However if first
550 * thread is already online, expect it to have a valid mapping.
553 WARN_ON(cpu_online(fcpu));
554 map_cpu_to_node(fcpu, nid);
557 map_cpu_to_node(lcpu, nid);
562 static void verify_cpu_node_mapping(int cpu, int node)
564 int base, sibling, i;
566 /* Verify that all the threads in the core belong to the same node */
567 base = cpu_first_thread_sibling(cpu);
569 for (i = 0; i < threads_per_core; i++) {
572 if (sibling == cpu || cpu_is_offline(sibling))
575 if (cpu_to_node(sibling) != node) {
576 WARN(1, "CPU thread siblings %d and %d don't belong"
577 " to the same node!\n", cpu, sibling);
583 /* Must run before sched domains notifier. */
584 static int ppc_numa_cpu_prepare(unsigned int cpu)
588 nid = numa_setup_cpu(cpu);
589 verify_cpu_node_mapping(cpu, nid);
593 static int ppc_numa_cpu_dead(unsigned int cpu)
595 #ifdef CONFIG_HOTPLUG_CPU
596 unmap_cpu_from_node(cpu);
602 * Check and possibly modify a memory region to enforce the memory limit.
604 * Returns the size the region should have to enforce the memory limit.
605 * This will either be the original value of size, a truncated value,
606 * or zero. If the returned value of size is 0 the region should be
607 * discarded as it lies wholly above the memory limit.
609 static unsigned long __init numa_enforce_memory_limit(unsigned long start,
613 * We use memblock_end_of_DRAM() in here instead of memory_limit because
614 * we've already adjusted it for the limit and it takes care of
615 * having memory holes below the limit. Also, in the case of
616 * iommu_is_off, memory_limit is not set but is implicitly enforced.
619 if (start + size <= memblock_end_of_DRAM())
622 if (start >= memblock_end_of_DRAM())
625 return memblock_end_of_DRAM() - start;
629 * Reads the counter for a given entry in
630 * linux,drconf-usable-memory property
632 static inline int __init read_usm_ranges(const __be32 **usm)
635 * For each lmb in ibm,dynamic-memory a corresponding
636 * entry in linux,drconf-usable-memory property contains
637 * a counter followed by that many (base, size) duple.
638 * read the counter from linux,drconf-usable-memory
640 return read_n_cells(n_mem_size_cells, usm);
644 * Extract NUMA information from the ibm,dynamic-reconfiguration-memory
645 * node. This assumes n_mem_{addr,size}_cells have been set.
647 static void __init numa_setup_drmem_lmb(struct drmem_lmb *lmb,
650 unsigned int ranges, is_kexec_kdump = 0;
651 unsigned long base, size, sz;
655 * Skip this block if the reserved bit is set in flags (0x80)
656 * or if the block is not assigned to this partition (0x8)
658 if ((lmb->flags & DRCONF_MEM_RESERVED)
659 || !(lmb->flags & DRCONF_MEM_ASSIGNED))
665 base = lmb->base_addr;
666 size = drmem_lmb_size();
669 if (is_kexec_kdump) {
670 ranges = read_usm_ranges(usm);
671 if (!ranges) /* there are no (base, size) duple */
676 if (is_kexec_kdump) {
677 base = read_n_cells(n_mem_addr_cells, usm);
678 size = read_n_cells(n_mem_size_cells, usm);
681 nid = of_drconf_to_nid_single(lmb);
682 fake_numa_create_new_node(((base + size) >> PAGE_SHIFT),
684 node_set_online(nid);
685 sz = numa_enforce_memory_limit(base, size);
687 memblock_set_node(base, sz, &memblock.memory, nid);
691 static int __init parse_numa_properties(void)
693 struct device_node *memory;
697 if (numa_enabled == 0) {
698 printk(KERN_WARNING "NUMA disabled by user\n");
702 min_common_depth = find_min_common_depth();
704 if (min_common_depth < 0) {
706 * if we fail to parse min_common_depth from device tree
707 * mark the numa disabled, boot with numa disabled.
709 numa_enabled = false;
710 return min_common_depth;
713 dbg("NUMA associativity depth for CPU/Memory: %d\n", min_common_depth);
716 * Even though we connect cpus to numa domains later in SMP
717 * init, we need to know the node ids now. This is because
718 * each node to be onlined must have NODE_DATA etc backing it.
720 for_each_present_cpu(i) {
721 struct device_node *cpu;
724 cpu = of_get_cpu_node(i, NULL);
726 nid = of_node_to_nid_single(cpu);
730 * Don't fall back to default_nid yet -- we will plug
731 * cpus into nodes once the memory scan has discovered
736 node_set_online(nid);
739 get_n_mem_cells(&n_mem_addr_cells, &n_mem_size_cells);
741 for_each_node_by_type(memory, "memory") {
746 const __be32 *memcell_buf;
749 memcell_buf = of_get_property(memory,
750 "linux,usable-memory", &len);
751 if (!memcell_buf || len <= 0)
752 memcell_buf = of_get_property(memory, "reg", &len);
753 if (!memcell_buf || len <= 0)
757 ranges = (len >> 2) / (n_mem_addr_cells + n_mem_size_cells);
759 /* these are order-sensitive, and modify the buffer pointer */
760 start = read_n_cells(n_mem_addr_cells, &memcell_buf);
761 size = read_n_cells(n_mem_size_cells, &memcell_buf);
764 * Assumption: either all memory nodes or none will
765 * have associativity properties. If none, then
766 * everything goes to default_nid.
768 nid = of_node_to_nid_single(memory);
772 fake_numa_create_new_node(((start + size) >> PAGE_SHIFT), &nid);
773 node_set_online(nid);
775 size = numa_enforce_memory_limit(start, size);
777 memblock_set_node(start, size, &memblock.memory, nid);
784 * Now do the same thing for each MEMBLOCK listed in the
785 * ibm,dynamic-memory property in the
786 * ibm,dynamic-reconfiguration-memory node.
788 memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
790 walk_drmem_lmbs(memory, numa_setup_drmem_lmb);
797 static void __init setup_nonnuma(void)
799 unsigned long top_of_ram = memblock_end_of_DRAM();
800 unsigned long total_ram = memblock_phys_mem_size();
801 unsigned long start_pfn, end_pfn;
802 unsigned int nid = 0;
803 struct memblock_region *reg;
805 printk(KERN_DEBUG "Top of RAM: 0x%lx, Total RAM: 0x%lx\n",
806 top_of_ram, total_ram);
807 printk(KERN_DEBUG "Memory hole size: %ldMB\n",
808 (top_of_ram - total_ram) >> 20);
810 for_each_memblock(memory, reg) {
811 start_pfn = memblock_region_memory_base_pfn(reg);
812 end_pfn = memblock_region_memory_end_pfn(reg);
814 fake_numa_create_new_node(end_pfn, &nid);
815 memblock_set_node(PFN_PHYS(start_pfn),
816 PFN_PHYS(end_pfn - start_pfn),
817 &memblock.memory, nid);
818 node_set_online(nid);
822 void __init dump_numa_cpu_topology(void)
825 unsigned int cpu, count;
830 for_each_online_node(node) {
831 pr_info("Node %d CPUs:", node);
835 * If we used a CPU iterator here we would miss printing
836 * the holes in the cpumap.
838 for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
839 if (cpumask_test_cpu(cpu,
840 node_to_cpumask_map[node])) {
846 pr_cont("-%u", cpu - 1);
852 pr_cont("-%u", nr_cpu_ids - 1);
857 /* Initialize NODE_DATA for a node on the local memory */
858 static void __init setup_node_data(int nid, u64 start_pfn, u64 end_pfn)
860 u64 spanned_pages = end_pfn - start_pfn;
861 const size_t nd_size = roundup(sizeof(pg_data_t), SMP_CACHE_BYTES);
866 nd_pa = memblock_phys_alloc_try_nid(nd_size, SMP_CACHE_BYTES, nid);
868 panic("Cannot allocate %zu bytes for node %d data\n",
873 /* report and initialize */
874 pr_info(" NODE_DATA [mem %#010Lx-%#010Lx]\n",
875 nd_pa, nd_pa + nd_size - 1);
876 tnid = early_pfn_to_nid(nd_pa >> PAGE_SHIFT);
878 pr_info(" NODE_DATA(%d) on node %d\n", nid, tnid);
881 memset(NODE_DATA(nid), 0, sizeof(pg_data_t));
882 NODE_DATA(nid)->node_id = nid;
883 NODE_DATA(nid)->node_start_pfn = start_pfn;
884 NODE_DATA(nid)->node_spanned_pages = spanned_pages;
887 static void __init find_possible_nodes(void)
889 struct device_node *rtas;
895 rtas = of_find_node_by_path("/rtas");
899 if (of_property_read_u32_index(rtas,
900 "ibm,max-associativity-domains",
901 min_common_depth, &numnodes))
904 for (i = 0; i < numnodes; i++) {
905 if (!node_possible(i))
906 node_set(i, node_possible_map);
913 void __init mem_topology_setup(void)
917 if (parse_numa_properties())
921 * Modify the set of possible NUMA nodes to reflect information
922 * available about the set of online nodes, and the set of nodes
923 * that we expect to make use of for this platform's affinity
926 nodes_and(node_possible_map, node_possible_map, node_online_map);
928 find_possible_nodes();
930 setup_node_to_cpumask_map();
932 reset_numa_cpu_lookup_table();
934 for_each_present_cpu(cpu)
938 void __init initmem_init(void)
942 max_low_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
943 max_pfn = max_low_pfn;
947 for_each_online_node(nid) {
948 unsigned long start_pfn, end_pfn;
950 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
951 setup_node_data(nid, start_pfn, end_pfn);
957 * We need the numa_cpu_lookup_table to be accurate for all CPUs,
958 * even before we online them, so that we can use cpu_to_{node,mem}
959 * early in boot, cf. smp_prepare_cpus().
960 * _nocalls() + manual invocation is used because cpuhp is not yet
961 * initialized for the boot CPU.
963 cpuhp_setup_state_nocalls(CPUHP_POWER_NUMA_PREPARE, "powerpc/numa:prepare",
964 ppc_numa_cpu_prepare, ppc_numa_cpu_dead);
967 static int __init early_numa(char *p)
972 if (strstr(p, "off"))
975 if (strstr(p, "debug"))
978 p = strstr(p, "fake=");
980 cmdline = p + strlen("fake=");
984 early_param("numa", early_numa);
987 * The platform can inform us through one of several mechanisms
988 * (post-migration device tree updates, PRRN or VPHN) that the NUMA
989 * assignment of a resource has changed. This controls whether we act
990 * on that. Disabled by default.
992 static bool topology_updates_enabled;
994 static int __init early_topology_updates(char *p)
999 if (!strcmp(p, "on")) {
1000 pr_warn("Caution: enabling topology updates\n");
1001 topology_updates_enabled = true;
1006 early_param("topology_updates", early_topology_updates);
1008 #ifdef CONFIG_MEMORY_HOTPLUG
1010 * Find the node associated with a hot added memory section for
1011 * memory represented in the device tree by the property
1012 * ibm,dynamic-reconfiguration-memory/ibm,dynamic-memory.
1014 static int hot_add_drconf_scn_to_nid(unsigned long scn_addr)
1016 struct drmem_lmb *lmb;
1017 unsigned long lmb_size;
1018 int nid = NUMA_NO_NODE;
1020 lmb_size = drmem_lmb_size();
1022 for_each_drmem_lmb(lmb) {
1023 /* skip this block if it is reserved or not assigned to
1025 if ((lmb->flags & DRCONF_MEM_RESERVED)
1026 || !(lmb->flags & DRCONF_MEM_ASSIGNED))
1029 if ((scn_addr < lmb->base_addr)
1030 || (scn_addr >= (lmb->base_addr + lmb_size)))
1033 nid = of_drconf_to_nid_single(lmb);
1041 * Find the node associated with a hot added memory section for memory
1042 * represented in the device tree as a node (i.e. memory@XXXX) for
1045 static int hot_add_node_scn_to_nid(unsigned long scn_addr)
1047 struct device_node *memory;
1048 int nid = NUMA_NO_NODE;
1050 for_each_node_by_type(memory, "memory") {
1051 unsigned long start, size;
1053 const __be32 *memcell_buf;
1056 memcell_buf = of_get_property(memory, "reg", &len);
1057 if (!memcell_buf || len <= 0)
1060 /* ranges in cell */
1061 ranges = (len >> 2) / (n_mem_addr_cells + n_mem_size_cells);
1064 start = read_n_cells(n_mem_addr_cells, &memcell_buf);
1065 size = read_n_cells(n_mem_size_cells, &memcell_buf);
1067 if ((scn_addr < start) || (scn_addr >= (start + size)))
1070 nid = of_node_to_nid_single(memory);
1078 of_node_put(memory);
1084 * Find the node associated with a hot added memory section. Section
1085 * corresponds to a SPARSEMEM section, not an MEMBLOCK. It is assumed that
1086 * sections are fully contained within a single MEMBLOCK.
1088 int hot_add_scn_to_nid(unsigned long scn_addr)
1090 struct device_node *memory = NULL;
1094 return first_online_node;
1096 memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
1098 nid = hot_add_drconf_scn_to_nid(scn_addr);
1099 of_node_put(memory);
1101 nid = hot_add_node_scn_to_nid(scn_addr);
1104 if (nid < 0 || !node_possible(nid))
1105 nid = first_online_node;
1110 static u64 hot_add_drconf_memory_max(void)
1112 struct device_node *memory = NULL;
1113 struct device_node *dn = NULL;
1114 const __be64 *lrdr = NULL;
1116 dn = of_find_node_by_path("/rtas");
1118 lrdr = of_get_property(dn, "ibm,lrdr-capacity", NULL);
1121 return be64_to_cpup(lrdr);
1124 memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
1126 of_node_put(memory);
1127 return drmem_lmb_memory_max();
1133 * memory_hotplug_max - return max address of memory that may be added
1135 * This is currently only used on systems that support drconfig memory
1138 u64 memory_hotplug_max(void)
1140 return max(hot_add_drconf_memory_max(), memblock_end_of_DRAM());
1142 #endif /* CONFIG_MEMORY_HOTPLUG */
1144 /* Virtual Processor Home Node (VPHN) support */
1145 #ifdef CONFIG_PPC_SPLPAR
1146 struct topology_update_data {
1147 struct topology_update_data *next;
1153 #define TOPOLOGY_DEF_TIMER_SECS 60
1155 static u8 vphn_cpu_change_counts[NR_CPUS][MAX_DISTANCE_REF_POINTS];
1156 static cpumask_t cpu_associativity_changes_mask;
1157 static int vphn_enabled;
1158 static int prrn_enabled;
1159 static void reset_topology_timer(void);
1160 static int topology_timer_secs = 1;
1161 static int topology_inited;
1164 * Change polling interval for associativity changes.
1166 int timed_topology_update(int nsecs)
1170 topology_timer_secs = nsecs;
1172 topology_timer_secs = TOPOLOGY_DEF_TIMER_SECS;
1174 reset_topology_timer();
1181 * Store the current values of the associativity change counters in the
1184 static void setup_cpu_associativity_change_counters(void)
1188 /* The VPHN feature supports a maximum of 8 reference points */
1189 BUILD_BUG_ON(MAX_DISTANCE_REF_POINTS > 8);
1191 for_each_possible_cpu(cpu) {
1193 u8 *counts = vphn_cpu_change_counts[cpu];
1194 volatile u8 *hypervisor_counts = lppaca_of(cpu).vphn_assoc_counts;
1196 for (i = 0; i < distance_ref_points_depth; i++)
1197 counts[i] = hypervisor_counts[i];
1202 * The hypervisor maintains a set of 8 associativity change counters in
1203 * the VPA of each cpu that correspond to the associativity levels in the
1204 * ibm,associativity-reference-points property. When an associativity
1205 * level changes, the corresponding counter is incremented.
1207 * Set a bit in cpu_associativity_changes_mask for each cpu whose home
1208 * node associativity levels have changed.
1210 * Returns the number of cpus with unhandled associativity changes.
1212 static int update_cpu_associativity_changes_mask(void)
1215 cpumask_t *changes = &cpu_associativity_changes_mask;
1217 for_each_possible_cpu(cpu) {
1219 u8 *counts = vphn_cpu_change_counts[cpu];
1220 volatile u8 *hypervisor_counts = lppaca_of(cpu).vphn_assoc_counts;
1222 for (i = 0; i < distance_ref_points_depth; i++) {
1223 if (hypervisor_counts[i] != counts[i]) {
1224 counts[i] = hypervisor_counts[i];
1229 cpumask_or(changes, changes, cpu_sibling_mask(cpu));
1230 cpu = cpu_last_thread_sibling(cpu);
1234 return cpumask_weight(changes);
1238 * Retrieve the new associativity information for a virtual processor's
1241 static long vphn_get_associativity(unsigned long cpu,
1242 __be32 *associativity)
1246 rc = hcall_vphn(get_hard_smp_processor_id(cpu),
1247 VPHN_FLAG_VCPU, associativity);
1251 dbg("VPHN hcall succeeded. Reset polling...\n");
1252 timed_topology_update(0);
1256 pr_err_ratelimited("VPHN unsupported. Disabling polling...\n");
1259 pr_err_ratelimited("hcall_vphn() experienced a hardware fault "
1260 "preventing VPHN. Disabling polling...\n");
1263 pr_err_ratelimited("hcall_vphn() was passed an invalid parameter. "
1264 "Disabling polling...\n");
1267 pr_err_ratelimited("hcall_vphn() returned %ld. Disabling polling...\n"
1272 stop_topology_update();
1277 int find_and_online_cpu_nid(int cpu)
1279 __be32 associativity[VPHN_ASSOC_BUFSIZE] = {0};
1282 /* Use associativity from first thread for all siblings */
1283 if (vphn_get_associativity(cpu, associativity))
1284 return cpu_to_node(cpu);
1286 new_nid = associativity_to_nid(associativity);
1287 if (new_nid < 0 || !node_possible(new_nid))
1288 new_nid = first_online_node;
1290 if (NODE_DATA(new_nid) == NULL) {
1291 #ifdef CONFIG_MEMORY_HOTPLUG
1293 * Need to ensure that NODE_DATA is initialized for a node from
1294 * available memory (see memblock_alloc_try_nid). If unable to
1295 * init the node, then default to nearest node that has memory
1296 * installed. Skip onlining a node if the subsystems are not
1299 if (!topology_inited || try_online_node(new_nid))
1300 new_nid = first_online_node;
1303 * Default to using the nearest node that has memory installed.
1304 * Otherwise, it would be necessary to patch the kernel MM code
1305 * to deal with more memoryless-node error conditions.
1307 new_nid = first_online_node;
1311 pr_debug("%s:%d cpu %d nid %d\n", __FUNCTION__, __LINE__,
1317 * Update the CPU maps and sysfs entries for a single CPU when its NUMA
1318 * characteristics change. This function doesn't perform any locking and is
1319 * only safe to call from stop_machine().
1321 static int update_cpu_topology(void *data)
1323 struct topology_update_data *update;
1329 cpu = smp_processor_id();
1331 for (update = data; update; update = update->next) {
1332 int new_nid = update->new_nid;
1333 if (cpu != update->cpu)
1336 unmap_cpu_from_node(cpu);
1337 map_cpu_to_node(cpu, new_nid);
1338 set_cpu_numa_node(cpu, new_nid);
1339 set_cpu_numa_mem(cpu, local_memory_node(new_nid));
1346 static int update_lookup_table(void *data)
1348 struct topology_update_data *update;
1354 * Upon topology update, the numa-cpu lookup table needs to be updated
1355 * for all threads in the core, including offline CPUs, to ensure that
1356 * future hotplug operations respect the cpu-to-node associativity
1359 for (update = data; update; update = update->next) {
1362 nid = update->new_nid;
1363 base = cpu_first_thread_sibling(update->cpu);
1365 for (j = 0; j < threads_per_core; j++) {
1366 update_numa_cpu_lookup_table(base + j, nid);
1374 * Update the node maps and sysfs entries for each cpu whose home node
1375 * has changed. Returns 1 when the topology has changed, and 0 otherwise.
1377 * cpus_locked says whether we already hold cpu_hotplug_lock.
1379 int numa_update_cpu_topology(bool cpus_locked)
1381 unsigned int cpu, sibling, changed = 0;
1382 struct topology_update_data *updates, *ud;
1383 cpumask_t updated_cpus;
1385 int weight, new_nid, i = 0;
1387 if (!prrn_enabled && !vphn_enabled && topology_inited)
1390 weight = cpumask_weight(&cpu_associativity_changes_mask);
1394 updates = kcalloc(weight, sizeof(*updates), GFP_KERNEL);
1398 cpumask_clear(&updated_cpus);
1400 for_each_cpu(cpu, &cpu_associativity_changes_mask) {
1402 * If siblings aren't flagged for changes, updates list
1403 * will be too short. Skip on this update and set for next
1406 if (!cpumask_subset(cpu_sibling_mask(cpu),
1407 &cpu_associativity_changes_mask)) {
1408 pr_info("Sibling bits not set for associativity "
1409 "change, cpu%d\n", cpu);
1410 cpumask_or(&cpu_associativity_changes_mask,
1411 &cpu_associativity_changes_mask,
1412 cpu_sibling_mask(cpu));
1413 cpu = cpu_last_thread_sibling(cpu);
1417 new_nid = find_and_online_cpu_nid(cpu);
1419 if (new_nid == numa_cpu_lookup_table[cpu]) {
1420 cpumask_andnot(&cpu_associativity_changes_mask,
1421 &cpu_associativity_changes_mask,
1422 cpu_sibling_mask(cpu));
1423 dbg("Assoc chg gives same node %d for cpu%d\n",
1425 cpu = cpu_last_thread_sibling(cpu);
1429 for_each_cpu(sibling, cpu_sibling_mask(cpu)) {
1431 ud->next = &updates[i];
1433 ud->new_nid = new_nid;
1434 ud->old_nid = numa_cpu_lookup_table[sibling];
1435 cpumask_set_cpu(sibling, &updated_cpus);
1437 cpu = cpu_last_thread_sibling(cpu);
1441 * Prevent processing of 'updates' from overflowing array
1442 * where last entry filled in a 'next' pointer.
1445 updates[i-1].next = NULL;
1447 pr_debug("Topology update for the following CPUs:\n");
1448 if (cpumask_weight(&updated_cpus)) {
1449 for (ud = &updates[0]; ud; ud = ud->next) {
1450 pr_debug("cpu %d moving from node %d "
1452 ud->old_nid, ud->new_nid);
1457 * In cases where we have nothing to update (because the updates list
1458 * is too short or because the new topology is same as the old one),
1459 * skip invoking update_cpu_topology() via stop-machine(). This is
1460 * necessary (and not just a fast-path optimization) since stop-machine
1461 * can end up electing a random CPU to run update_cpu_topology(), and
1462 * thus trick us into setting up incorrect cpu-node mappings (since
1463 * 'updates' is kzalloc()'ed).
1465 * And for the similar reason, we will skip all the following updating.
1467 if (!cpumask_weight(&updated_cpus))
1471 stop_machine_cpuslocked(update_cpu_topology, &updates[0],
1474 stop_machine(update_cpu_topology, &updates[0], &updated_cpus);
1477 * Update the numa-cpu lookup table with the new mappings, even for
1478 * offline CPUs. It is best to perform this update from the stop-
1482 stop_machine_cpuslocked(update_lookup_table, &updates[0],
1483 cpumask_of(raw_smp_processor_id()));
1485 stop_machine(update_lookup_table, &updates[0],
1486 cpumask_of(raw_smp_processor_id()));
1488 for (ud = &updates[0]; ud; ud = ud->next) {
1489 unregister_cpu_under_node(ud->cpu, ud->old_nid);
1490 register_cpu_under_node(ud->cpu, ud->new_nid);
1492 dev = get_cpu_device(ud->cpu);
1494 kobject_uevent(&dev->kobj, KOBJ_CHANGE);
1495 cpumask_clear_cpu(ud->cpu, &cpu_associativity_changes_mask);
1504 int arch_update_cpu_topology(void)
1506 return numa_update_cpu_topology(true);
1509 static void topology_work_fn(struct work_struct *work)
1511 rebuild_sched_domains();
1513 static DECLARE_WORK(topology_work, topology_work_fn);
1515 static void topology_schedule_update(void)
1517 schedule_work(&topology_work);
1520 static void topology_timer_fn(struct timer_list *unused)
1522 if (prrn_enabled && cpumask_weight(&cpu_associativity_changes_mask))
1523 topology_schedule_update();
1524 else if (vphn_enabled) {
1525 if (update_cpu_associativity_changes_mask() > 0)
1526 topology_schedule_update();
1527 reset_topology_timer();
1530 static struct timer_list topology_timer;
1532 static void reset_topology_timer(void)
1535 mod_timer(&topology_timer, jiffies + topology_timer_secs * HZ);
1540 static int dt_update_callback(struct notifier_block *nb,
1541 unsigned long action, void *data)
1543 struct of_reconfig_data *update = data;
1544 int rc = NOTIFY_DONE;
1547 case OF_RECONFIG_UPDATE_PROPERTY:
1548 if (of_node_is_type(update->dn, "cpu") &&
1549 !of_prop_cmp(update->prop->name, "ibm,associativity")) {
1551 of_property_read_u32(update->dn, "reg", &core_id);
1552 rc = dlpar_cpu_readd(core_id);
1561 static struct notifier_block dt_update_nb = {
1562 .notifier_call = dt_update_callback,
1568 * Start polling for associativity changes.
1570 int start_topology_update(void)
1574 if (!topology_updates_enabled)
1577 if (firmware_has_feature(FW_FEATURE_PRRN)) {
1578 if (!prrn_enabled) {
1581 rc = of_reconfig_notifier_register(&dt_update_nb);
1585 if (firmware_has_feature(FW_FEATURE_VPHN) &&
1586 lppaca_shared_proc(get_lppaca())) {
1587 if (!vphn_enabled) {
1589 setup_cpu_associativity_change_counters();
1590 timer_setup(&topology_timer, topology_timer_fn,
1592 reset_topology_timer();
1596 pr_info("Starting topology update%s%s\n",
1597 (prrn_enabled ? " prrn_enabled" : ""),
1598 (vphn_enabled ? " vphn_enabled" : ""));
1604 * Disable polling for VPHN associativity changes.
1606 int stop_topology_update(void)
1610 if (!topology_updates_enabled)
1616 rc = of_reconfig_notifier_unregister(&dt_update_nb);
1621 rc = del_timer_sync(&topology_timer);
1624 pr_info("Stopping topology update\n");
1629 int prrn_is_enabled(void)
1631 return prrn_enabled;
1634 static int topology_read(struct seq_file *file, void *v)
1636 if (vphn_enabled || prrn_enabled)
1637 seq_puts(file, "on\n");
1639 seq_puts(file, "off\n");
1644 static int topology_open(struct inode *inode, struct file *file)
1646 return single_open(file, topology_read, NULL);
1649 static ssize_t topology_write(struct file *file, const char __user *buf,
1650 size_t count, loff_t *off)
1652 char kbuf[4]; /* "on" or "off" plus null. */
1655 read_len = count < 3 ? count : 3;
1656 if (copy_from_user(kbuf, buf, read_len))
1659 kbuf[read_len] = '\0';
1661 if (!strncmp(kbuf, "on", 2)) {
1662 topology_updates_enabled = true;
1663 start_topology_update();
1664 } else if (!strncmp(kbuf, "off", 3)) {
1665 stop_topology_update();
1666 topology_updates_enabled = false;
1673 static const struct proc_ops topology_proc_ops = {
1674 .proc_read = seq_read,
1675 .proc_write = topology_write,
1676 .proc_open = topology_open,
1677 .proc_release = single_release,
1680 static int topology_update_init(void)
1682 start_topology_update();
1685 topology_schedule_update();
1687 if (!proc_create("powerpc/topology_updates", 0644, NULL, &topology_proc_ops))
1690 topology_inited = 1;
1693 device_initcall(topology_update_init);
1694 #endif /* CONFIG_PPC_SPLPAR */