1 /* Common code for 32 and 64-bit NUMA */
2 #include <linux/acpi.h>
3 #include <linux/kernel.h>
5 #include <linux/string.h>
6 #include <linux/init.h>
7 #include <linux/memblock.h>
8 #include <linux/mmzone.h>
9 #include <linux/ctype.h>
10 #include <linux/nodemask.h>
11 #include <linux/sched.h>
12 #include <linux/topology.h>
14 #include <asm/e820/api.h>
15 #include <asm/proto.h>
17 #include <asm/amd_nb.h>
19 #include "numa_internal.h"
22 nodemask_t numa_nodes_parsed __initdata;
24 struct pglist_data *node_data[MAX_NUMNODES] __read_mostly;
25 EXPORT_SYMBOL(node_data);
27 static struct numa_meminfo numa_meminfo
28 #ifndef CONFIG_MEMORY_HOTPLUG
33 static int numa_distance_cnt;
34 static u8 *numa_distance;
36 static __init int numa_setup(char *opt)
40 if (!strncmp(opt, "off", 3))
42 #ifdef CONFIG_NUMA_EMU
43 if (!strncmp(opt, "fake=", 5))
44 numa_emu_cmdline(opt + 5);
46 #ifdef CONFIG_ACPI_NUMA
47 if (!strncmp(opt, "noacpi", 6))
52 early_param("numa", numa_setup);
55 * apicid, cpu, node mappings
57 s16 __apicid_to_node[MAX_LOCAL_APIC] = {
58 [0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE
61 int numa_cpu_node(int cpu)
63 int apicid = early_per_cpu(x86_cpu_to_apicid, cpu);
65 if (apicid != BAD_APICID)
66 return __apicid_to_node[apicid];
70 cpumask_var_t node_to_cpumask_map[MAX_NUMNODES];
71 EXPORT_SYMBOL(node_to_cpumask_map);
74 * Map cpu index to node index
76 DEFINE_EARLY_PER_CPU(int, x86_cpu_to_node_map, NUMA_NO_NODE);
77 EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_node_map);
79 void numa_set_node(int cpu, int node)
81 int *cpu_to_node_map = early_per_cpu_ptr(x86_cpu_to_node_map);
83 /* early setting, no percpu area yet */
84 if (cpu_to_node_map) {
85 cpu_to_node_map[cpu] = node;
89 #ifdef CONFIG_DEBUG_PER_CPU_MAPS
90 if (cpu >= nr_cpu_ids || !cpu_possible(cpu)) {
91 printk(KERN_ERR "numa_set_node: invalid cpu# (%d)\n", cpu);
96 per_cpu(x86_cpu_to_node_map, cpu) = node;
98 set_cpu_numa_node(cpu, node);
101 void numa_clear_node(int cpu)
103 numa_set_node(cpu, NUMA_NO_NODE);
107 * Allocate node_to_cpumask_map based on number of available nodes
108 * Requires node_possible_map to be valid.
110 * Note: cpumask_of_node() is not valid until after this is done.
111 * (Use CONFIG_DEBUG_PER_CPU_MAPS to check this.)
113 void __init setup_node_to_cpumask_map(void)
117 /* setup nr_node_ids if not done yet */
118 if (nr_node_ids == MAX_NUMNODES)
121 /* allocate the map */
122 for (node = 0; node < nr_node_ids; node++)
123 alloc_bootmem_cpumask_var(&node_to_cpumask_map[node]);
125 /* cpumask_of_node() will now work */
126 pr_debug("Node to cpumask map for %u nodes\n", nr_node_ids);
129 static int __init numa_add_memblk_to(int nid, u64 start, u64 end,
130 struct numa_meminfo *mi)
132 /* ignore zero length blks */
136 /* whine about and ignore invalid blks */
137 if (start > end || nid < 0 || nid >= MAX_NUMNODES) {
138 pr_warn("Warning: invalid memblk node %d [mem %#010Lx-%#010Lx]\n",
139 nid, start, end - 1);
143 if (mi->nr_blks >= NR_NODE_MEMBLKS) {
144 pr_err("too many memblk ranges\n");
148 mi->blk[mi->nr_blks].start = start;
149 mi->blk[mi->nr_blks].end = end;
150 mi->blk[mi->nr_blks].nid = nid;
156 * numa_remove_memblk_from - Remove one numa_memblk from a numa_meminfo
157 * @idx: Index of memblk to remove
158 * @mi: numa_meminfo to remove memblk from
160 * Remove @idx'th numa_memblk from @mi by shifting @mi->blk[] and
161 * decrementing @mi->nr_blks.
163 void __init numa_remove_memblk_from(int idx, struct numa_meminfo *mi)
166 memmove(&mi->blk[idx], &mi->blk[idx + 1],
167 (mi->nr_blks - idx) * sizeof(mi->blk[0]));
171 * numa_add_memblk - Add one numa_memblk to numa_meminfo
172 * @nid: NUMA node ID of the new memblk
173 * @start: Start address of the new memblk
174 * @end: End address of the new memblk
176 * Add a new memblk to the default numa_meminfo.
179 * 0 on success, -errno on failure.
181 int __init numa_add_memblk(int nid, u64 start, u64 end)
183 return numa_add_memblk_to(nid, start, end, &numa_meminfo);
186 /* Allocate NODE_DATA for a node on the local memory */
187 static void __init alloc_node_data(int nid)
189 const size_t nd_size = roundup(sizeof(pg_data_t), PAGE_SIZE);
195 * Allocate node data. Try node-local memory and then any node.
196 * Never allocate in DMA zone.
198 nd_pa = memblock_phys_alloc_try_nid(nd_size, SMP_CACHE_BYTES, nid);
200 pr_err("Cannot find %zu bytes in any node (initial node: %d)\n",
206 /* report and initialize */
207 printk(KERN_INFO "NODE_DATA(%d) allocated [mem %#010Lx-%#010Lx]\n", nid,
208 nd_pa, nd_pa + nd_size - 1);
209 tnid = early_pfn_to_nid(nd_pa >> PAGE_SHIFT);
211 printk(KERN_INFO " NODE_DATA(%d) on node %d\n", nid, tnid);
214 memset(NODE_DATA(nid), 0, sizeof(pg_data_t));
216 node_set_online(nid);
220 * numa_cleanup_meminfo - Cleanup a numa_meminfo
221 * @mi: numa_meminfo to clean up
223 * Sanitize @mi by merging and removing unnecessary memblks. Also check for
224 * conflicts and clear unused memblks.
227 * 0 on success, -errno on failure.
229 int __init numa_cleanup_meminfo(struct numa_meminfo *mi)
232 const u64 high = PFN_PHYS(max_pfn);
235 /* first, trim all entries */
236 for (i = 0; i < mi->nr_blks; i++) {
237 struct numa_memblk *bi = &mi->blk[i];
239 /* make sure all blocks are inside the limits */
240 bi->start = max(bi->start, low);
241 bi->end = min(bi->end, high);
243 /* and there's no empty or non-exist block */
244 if (bi->start >= bi->end ||
245 !memblock_overlaps_region(&memblock.memory,
246 bi->start, bi->end - bi->start))
247 numa_remove_memblk_from(i--, mi);
250 /* merge neighboring / overlapping entries */
251 for (i = 0; i < mi->nr_blks; i++) {
252 struct numa_memblk *bi = &mi->blk[i];
254 for (j = i + 1; j < mi->nr_blks; j++) {
255 struct numa_memblk *bj = &mi->blk[j];
259 * See whether there are overlapping blocks. Whine
260 * about but allow overlaps of the same nid. They
261 * will be merged below.
263 if (bi->end > bj->start && bi->start < bj->end) {
264 if (bi->nid != bj->nid) {
265 pr_err("node %d [mem %#010Lx-%#010Lx] overlaps with node %d [mem %#010Lx-%#010Lx]\n",
266 bi->nid, bi->start, bi->end - 1,
267 bj->nid, bj->start, bj->end - 1);
270 pr_warn("Warning: node %d [mem %#010Lx-%#010Lx] overlaps with itself [mem %#010Lx-%#010Lx]\n",
271 bi->nid, bi->start, bi->end - 1,
272 bj->start, bj->end - 1);
276 * Join together blocks on the same node, holes
277 * between which don't overlap with memory on other
280 if (bi->nid != bj->nid)
282 start = min(bi->start, bj->start);
283 end = max(bi->end, bj->end);
284 for (k = 0; k < mi->nr_blks; k++) {
285 struct numa_memblk *bk = &mi->blk[k];
287 if (bi->nid == bk->nid)
289 if (start < bk->end && end > bk->start)
294 printk(KERN_INFO "NUMA: Node %d [mem %#010Lx-%#010Lx] + [mem %#010Lx-%#010Lx] -> [mem %#010Lx-%#010Lx]\n",
295 bi->nid, bi->start, bi->end - 1, bj->start,
296 bj->end - 1, start, end - 1);
299 numa_remove_memblk_from(j--, mi);
303 /* clear unused ones */
304 for (i = mi->nr_blks; i < ARRAY_SIZE(mi->blk); i++) {
305 mi->blk[i].start = mi->blk[i].end = 0;
306 mi->blk[i].nid = NUMA_NO_NODE;
313 * Set nodes, which have memory in @mi, in *@nodemask.
315 static void __init numa_nodemask_from_meminfo(nodemask_t *nodemask,
316 const struct numa_meminfo *mi)
320 for (i = 0; i < ARRAY_SIZE(mi->blk); i++)
321 if (mi->blk[i].start != mi->blk[i].end &&
322 mi->blk[i].nid != NUMA_NO_NODE)
323 node_set(mi->blk[i].nid, *nodemask);
327 * numa_reset_distance - Reset NUMA distance table
329 * The current table is freed. The next numa_set_distance() call will
332 void __init numa_reset_distance(void)
334 size_t size = numa_distance_cnt * numa_distance_cnt * sizeof(numa_distance[0]);
336 /* numa_distance could be 1LU marking allocation failure, test cnt */
337 if (numa_distance_cnt)
338 memblock_free(__pa(numa_distance), size);
339 numa_distance_cnt = 0;
340 numa_distance = NULL; /* enable table creation */
343 static int __init numa_alloc_distance(void)
345 nodemask_t nodes_parsed;
350 /* size the new table and allocate it */
351 nodes_parsed = numa_nodes_parsed;
352 numa_nodemask_from_meminfo(&nodes_parsed, &numa_meminfo);
354 for_each_node_mask(i, nodes_parsed)
357 size = cnt * cnt * sizeof(numa_distance[0]);
359 phys = memblock_find_in_range(0, PFN_PHYS(max_pfn_mapped),
362 pr_warn("Warning: can't allocate distance table!\n");
363 /* don't retry until explicitly reset */
364 numa_distance = (void *)1LU;
367 memblock_reserve(phys, size);
369 numa_distance = __va(phys);
370 numa_distance_cnt = cnt;
372 /* fill with the default distances */
373 for (i = 0; i < cnt; i++)
374 for (j = 0; j < cnt; j++)
375 numa_distance[i * cnt + j] = i == j ?
376 LOCAL_DISTANCE : REMOTE_DISTANCE;
377 printk(KERN_DEBUG "NUMA: Initialized distance table, cnt=%d\n", cnt);
383 * numa_set_distance - Set NUMA distance from one NUMA to another
384 * @from: the 'from' node to set distance
385 * @to: the 'to' node to set distance
386 * @distance: NUMA distance
388 * Set the distance from node @from to @to to @distance. If distance table
389 * doesn't exist, one which is large enough to accommodate all the currently
390 * known nodes will be created.
392 * If such table cannot be allocated, a warning is printed and further
393 * calls are ignored until the distance table is reset with
394 * numa_reset_distance().
396 * If @from or @to is higher than the highest known node or lower than zero
397 * at the time of table creation or @distance doesn't make sense, the call
399 * This is to allow simplification of specific NUMA config implementations.
401 void __init numa_set_distance(int from, int to, int distance)
403 if (!numa_distance && numa_alloc_distance() < 0)
406 if (from >= numa_distance_cnt || to >= numa_distance_cnt ||
407 from < 0 || to < 0) {
408 pr_warn_once("Warning: node ids are out of bound, from=%d to=%d distance=%d\n",
413 if ((u8)distance != distance ||
414 (from == to && distance != LOCAL_DISTANCE)) {
415 pr_warn_once("Warning: invalid distance parameter, from=%d to=%d distance=%d\n",
420 numa_distance[from * numa_distance_cnt + to] = distance;
423 int __node_distance(int from, int to)
425 if (from >= numa_distance_cnt || to >= numa_distance_cnt)
426 return from == to ? LOCAL_DISTANCE : REMOTE_DISTANCE;
427 return numa_distance[from * numa_distance_cnt + to];
429 EXPORT_SYMBOL(__node_distance);
432 * Sanity check to catch more bad NUMA configurations (they are amazingly
433 * common). Make sure the nodes cover all memory.
435 static bool __init numa_meminfo_cover_memory(const struct numa_meminfo *mi)
437 u64 numaram, e820ram;
441 for (i = 0; i < mi->nr_blks; i++) {
442 u64 s = mi->blk[i].start >> PAGE_SHIFT;
443 u64 e = mi->blk[i].end >> PAGE_SHIFT;
445 numaram -= __absent_pages_in_range(mi->blk[i].nid, s, e);
446 if ((s64)numaram < 0)
450 e820ram = max_pfn - absent_pages_in_range(0, max_pfn);
452 /* We seem to lose 3 pages somewhere. Allow 1M of slack. */
453 if ((s64)(e820ram - numaram) >= (1 << (20 - PAGE_SHIFT))) {
454 printk(KERN_ERR "NUMA: nodes only cover %LuMB of your %LuMB e820 RAM. Not used.\n",
455 (numaram << PAGE_SHIFT) >> 20,
456 (e820ram << PAGE_SHIFT) >> 20);
463 * Mark all currently memblock-reserved physical memory (which covers the
464 * kernel's own memory ranges) as hot-unswappable.
466 static void __init numa_clear_kernel_node_hotplug(void)
468 nodemask_t reserved_nodemask = NODE_MASK_NONE;
469 struct memblock_region *mb_region;
473 * We have to do some preprocessing of memblock regions, to
474 * make them suitable for reservation.
476 * At this time, all memory regions reserved by memblock are
477 * used by the kernel, but those regions are not split up
478 * along node boundaries yet, and don't necessarily have their
479 * node ID set yet either.
481 * So iterate over all memory known to the x86 architecture,
482 * and use those ranges to set the nid in memblock.reserved.
483 * This will split up the memblock regions along node
484 * boundaries and will set the node IDs as well.
486 for (i = 0; i < numa_meminfo.nr_blks; i++) {
487 struct numa_memblk *mb = numa_meminfo.blk + i;
490 ret = memblock_set_node(mb->start, mb->end - mb->start, &memblock.reserved, mb->nid);
495 * Now go over all reserved memblock regions, to construct a
496 * node mask of all kernel reserved memory areas.
498 * [ Note, when booting with mem=nn[kMG] or in a kdump kernel,
499 * numa_meminfo might not include all memblock.reserved
500 * memory ranges, because quirks such as trim_snb_memory()
501 * reserve specific pages for Sandy Bridge graphics. ]
503 for_each_memblock(reserved, mb_region) {
504 if (mb_region->nid != MAX_NUMNODES)
505 node_set(mb_region->nid, reserved_nodemask);
509 * Finally, clear the MEMBLOCK_HOTPLUG flag for all memory
510 * belonging to the reserved node mask.
512 * Note that this will include memory regions that reside
513 * on nodes that contain kernel memory - entire nodes
514 * become hot-unpluggable:
516 for (i = 0; i < numa_meminfo.nr_blks; i++) {
517 struct numa_memblk *mb = numa_meminfo.blk + i;
519 if (!node_isset(mb->nid, reserved_nodemask))
522 memblock_clear_hotplug(mb->start, mb->end - mb->start);
526 static int __init numa_register_memblks(struct numa_meminfo *mi)
528 unsigned long uninitialized_var(pfn_align);
531 /* Account for nodes with cpus and no memory */
532 node_possible_map = numa_nodes_parsed;
533 numa_nodemask_from_meminfo(&node_possible_map, mi);
534 if (WARN_ON(nodes_empty(node_possible_map)))
537 for (i = 0; i < mi->nr_blks; i++) {
538 struct numa_memblk *mb = &mi->blk[i];
539 memblock_set_node(mb->start, mb->end - mb->start,
540 &memblock.memory, mb->nid);
544 * At very early time, the kernel have to use some memory such as
545 * loading the kernel image. We cannot prevent this anyway. So any
546 * node the kernel resides in should be un-hotpluggable.
548 * And when we come here, alloc node data won't fail.
550 numa_clear_kernel_node_hotplug();
553 * If sections array is gonna be used for pfn -> nid mapping, check
554 * whether its granularity is fine enough.
556 #ifdef NODE_NOT_IN_PAGE_FLAGS
557 pfn_align = node_map_pfn_alignment();
558 if (pfn_align && pfn_align < PAGES_PER_SECTION) {
559 printk(KERN_WARNING "Node alignment %LuMB < min %LuMB, rejecting NUMA config\n",
560 PFN_PHYS(pfn_align) >> 20,
561 PFN_PHYS(PAGES_PER_SECTION) >> 20);
565 if (!numa_meminfo_cover_memory(mi))
568 /* Finally register nodes. */
569 for_each_node_mask(nid, node_possible_map) {
570 u64 start = PFN_PHYS(max_pfn);
573 for (i = 0; i < mi->nr_blks; i++) {
574 if (nid != mi->blk[i].nid)
576 start = min(mi->blk[i].start, start);
577 end = max(mi->blk[i].end, end);
584 * Don't confuse VM with a node that doesn't have the
585 * minimum amount of memory:
587 if (end && (end - start) < NODE_MIN_SIZE)
590 alloc_node_data(nid);
593 /* Dump memblock with node info and return. */
599 * There are unfortunately some poorly designed mainboards around that
600 * only connect memory to a single CPU. This breaks the 1:1 cpu->node
601 * mapping. To avoid this fill in the mapping for all possible CPUs,
602 * as the number of CPUs is not known yet. We round robin the existing
605 static void __init numa_init_array(void)
609 rr = first_node(node_online_map);
610 for (i = 0; i < nr_cpu_ids; i++) {
611 if (early_cpu_to_node(i) != NUMA_NO_NODE)
613 numa_set_node(i, rr);
614 rr = next_node_in(rr, node_online_map);
618 static int __init numa_init(int (*init_func)(void))
623 for (i = 0; i < MAX_LOCAL_APIC; i++)
624 set_apicid_to_node(i, NUMA_NO_NODE);
626 nodes_clear(numa_nodes_parsed);
627 nodes_clear(node_possible_map);
628 nodes_clear(node_online_map);
629 memset(&numa_meminfo, 0, sizeof(numa_meminfo));
630 WARN_ON(memblock_set_node(0, ULLONG_MAX, &memblock.memory,
632 WARN_ON(memblock_set_node(0, ULLONG_MAX, &memblock.reserved,
634 /* In case that parsing SRAT failed. */
635 WARN_ON(memblock_clear_hotplug(0, ULLONG_MAX));
636 numa_reset_distance();
643 * We reset memblock back to the top-down direction
644 * here because if we configured ACPI_NUMA, we have
645 * parsed SRAT in init_func(). It is ok to have the
646 * reset here even if we did't configure ACPI_NUMA
647 * or acpi numa init fails and fallbacks to dummy
650 memblock_set_bottom_up(false);
652 ret = numa_cleanup_meminfo(&numa_meminfo);
656 numa_emulation(&numa_meminfo, numa_distance_cnt);
658 ret = numa_register_memblks(&numa_meminfo);
662 for (i = 0; i < nr_cpu_ids; i++) {
663 int nid = early_cpu_to_node(i);
665 if (nid == NUMA_NO_NODE)
667 if (!node_online(nid))
676 * dummy_numa_init - Fallback dummy NUMA init
678 * Used if there's no underlying NUMA architecture, NUMA initialization
679 * fails, or NUMA is disabled on the command line.
681 * Must online at least one node and add memory blocks that cover all
682 * allowed memory. This function must not fail.
684 static int __init dummy_numa_init(void)
686 printk(KERN_INFO "%s\n",
687 numa_off ? "NUMA turned off" : "No NUMA configuration found");
688 printk(KERN_INFO "Faking a node at [mem %#018Lx-%#018Lx]\n",
689 0LLU, PFN_PHYS(max_pfn) - 1);
691 node_set(0, numa_nodes_parsed);
692 numa_add_memblk(0, 0, PFN_PHYS(max_pfn));
698 * x86_numa_init - Initialize NUMA
700 * Try each configured NUMA initialization method until one succeeds. The
701 * last fallback is dummy single node config encomapssing whole memory and
704 void __init x86_numa_init(void)
707 #ifdef CONFIG_ACPI_NUMA
708 if (!numa_init(x86_acpi_numa_init))
711 #ifdef CONFIG_AMD_NUMA
712 if (!numa_init(amd_numa_init))
717 numa_init(dummy_numa_init);
720 static void __init init_memory_less_node(int nid)
722 unsigned long zones_size[MAX_NR_ZONES] = {0};
723 unsigned long zholes_size[MAX_NR_ZONES] = {0};
725 /* Allocate and initialize node data. Memory-less node is now online.*/
726 alloc_node_data(nid);
727 free_area_init_node(nid, zones_size, 0, zholes_size);
730 * All zonelists will be built later in start_kernel() after per cpu
731 * areas are initialized.
736 * Setup early cpu_to_node.
738 * Populate cpu_to_node[] only if x86_cpu_to_apicid[],
739 * and apicid_to_node[] tables have valid entries for a CPU.
740 * This means we skip cpu_to_node[] initialisation for NUMA
741 * emulation and faking node case (when running a kernel compiled
742 * for NUMA on a non NUMA box), which is OK as cpu_to_node[]
743 * is already initialized in a round robin manner at numa_init_array,
744 * prior to this call, and this initialization is good enough
745 * for the fake NUMA cases.
747 * Called before the per_cpu areas are setup.
749 void __init init_cpu_to_node(void)
752 u16 *cpu_to_apicid = early_per_cpu_ptr(x86_cpu_to_apicid);
754 BUG_ON(cpu_to_apicid == NULL);
756 for_each_possible_cpu(cpu) {
757 int node = numa_cpu_node(cpu);
759 if (node == NUMA_NO_NODE)
762 if (!node_online(node))
763 init_memory_less_node(node);
765 numa_set_node(cpu, node);
769 #ifndef CONFIG_DEBUG_PER_CPU_MAPS
771 # ifndef CONFIG_NUMA_EMU
772 void numa_add_cpu(int cpu)
774 cpumask_set_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
777 void numa_remove_cpu(int cpu)
779 cpumask_clear_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
781 # endif /* !CONFIG_NUMA_EMU */
783 #else /* !CONFIG_DEBUG_PER_CPU_MAPS */
785 int __cpu_to_node(int cpu)
787 if (early_per_cpu_ptr(x86_cpu_to_node_map)) {
789 "cpu_to_node(%d): usage too early!\n", cpu);
791 return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
793 return per_cpu(x86_cpu_to_node_map, cpu);
795 EXPORT_SYMBOL(__cpu_to_node);
798 * Same function as cpu_to_node() but used if called before the
799 * per_cpu areas are setup.
801 int early_cpu_to_node(int cpu)
803 if (early_per_cpu_ptr(x86_cpu_to_node_map))
804 return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
806 if (!cpu_possible(cpu)) {
808 "early_cpu_to_node(%d): no per_cpu area!\n", cpu);
812 return per_cpu(x86_cpu_to_node_map, cpu);
815 void debug_cpumask_set_cpu(int cpu, int node, bool enable)
817 struct cpumask *mask;
819 if (node == NUMA_NO_NODE) {
820 /* early_cpu_to_node() already emits a warning and trace */
823 mask = node_to_cpumask_map[node];
825 pr_err("node_to_cpumask_map[%i] NULL\n", node);
831 cpumask_set_cpu(cpu, mask);
833 cpumask_clear_cpu(cpu, mask);
835 printk(KERN_DEBUG "%s cpu %d node %d: mask now %*pbl\n",
836 enable ? "numa_add_cpu" : "numa_remove_cpu",
837 cpu, node, cpumask_pr_args(mask));
841 # ifndef CONFIG_NUMA_EMU
842 static void numa_set_cpumask(int cpu, bool enable)
844 debug_cpumask_set_cpu(cpu, early_cpu_to_node(cpu), enable);
847 void numa_add_cpu(int cpu)
849 numa_set_cpumask(cpu, true);
852 void numa_remove_cpu(int cpu)
854 numa_set_cpumask(cpu, false);
856 # endif /* !CONFIG_NUMA_EMU */
859 * Returns a pointer to the bitmask of CPUs on Node 'node'.
861 const struct cpumask *cpumask_of_node(int node)
863 if (node >= nr_node_ids) {
865 "cpumask_of_node(%d): node > nr_node_ids(%u)\n",
868 return cpu_none_mask;
870 if (node_to_cpumask_map[node] == NULL) {
872 "cpumask_of_node(%d): no node_to_cpumask_map!\n",
875 return cpu_online_mask;
877 return node_to_cpumask_map[node];
879 EXPORT_SYMBOL(cpumask_of_node);
881 #endif /* !CONFIG_DEBUG_PER_CPU_MAPS */
883 #ifdef CONFIG_MEMORY_HOTPLUG
884 int memory_add_physaddr_to_nid(u64 start)
886 struct numa_meminfo *mi = &numa_meminfo;
887 int nid = mi->blk[0].nid;
890 for (i = 0; i < mi->nr_blks; i++)
891 if (mi->blk[i].start <= start && mi->blk[i].end > start)
892 nid = mi->blk[i].nid;
895 EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);