1 // SPDX-License-Identifier: GPL-2.0-only
3 * linux/mm/memory_hotplug.c
8 #include <linux/stddef.h>
10 #include <linux/sched/signal.h>
11 #include <linux/swap.h>
12 #include <linux/interrupt.h>
13 #include <linux/pagemap.h>
14 #include <linux/compiler.h>
15 #include <linux/export.h>
16 #include <linux/pagevec.h>
17 #include <linux/writeback.h>
18 #include <linux/slab.h>
19 #include <linux/sysctl.h>
20 #include <linux/cpu.h>
21 #include <linux/memory.h>
22 #include <linux/memremap.h>
23 #include <linux/memory_hotplug.h>
24 #include <linux/highmem.h>
25 #include <linux/vmalloc.h>
26 #include <linux/ioport.h>
27 #include <linux/delay.h>
28 #include <linux/migrate.h>
29 #include <linux/page-isolation.h>
30 #include <linux/pfn.h>
31 #include <linux/suspend.h>
32 #include <linux/mm_inline.h>
33 #include <linux/firmware-map.h>
34 #include <linux/stop_machine.h>
35 #include <linux/hugetlb.h>
36 #include <linux/memblock.h>
37 #include <linux/compaction.h>
38 #include <linux/rmap.h>
40 #include <asm/tlbflush.h>
46 * online_page_callback contains pointer to current page onlining function.
47 * Initially it is generic_online_page(). If it is required it could be
48 * changed by calling set_online_page_callback() for callback registration
49 * and restore_online_page_callback() for generic callback restore.
52 static online_page_callback_t online_page_callback = generic_online_page;
53 static DEFINE_MUTEX(online_page_callback_lock);
55 DEFINE_STATIC_PERCPU_RWSEM(mem_hotplug_lock);
57 void get_online_mems(void)
59 percpu_down_read(&mem_hotplug_lock);
62 void put_online_mems(void)
64 percpu_up_read(&mem_hotplug_lock);
67 bool movable_node_enabled = false;
69 #ifndef CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE
70 int mhp_default_online_type = MMOP_OFFLINE;
72 int mhp_default_online_type = MMOP_ONLINE;
75 static int __init setup_memhp_default_state(char *str)
77 const int online_type = mhp_online_type_from_str(str);
80 mhp_default_online_type = online_type;
84 __setup("memhp_default_state=", setup_memhp_default_state);
86 void mem_hotplug_begin(void)
89 percpu_down_write(&mem_hotplug_lock);
92 void mem_hotplug_done(void)
94 percpu_up_write(&mem_hotplug_lock);
98 u64 max_mem_size = U64_MAX;
100 /* add this memory to iomem resource */
101 static struct resource *register_memory_resource(u64 start, u64 size,
102 const char *resource_name)
104 struct resource *res;
105 unsigned long flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
107 if (strcmp(resource_name, "System RAM"))
108 flags |= IORESOURCE_SYSRAM_DRIVER_MANAGED;
111 * Make sure value parsed from 'mem=' only restricts memory adding
112 * while booting, so that memory hotplug won't be impacted. Please
113 * refer to document of 'mem=' in kernel-parameters.txt for more
116 if (start + size > max_mem_size && system_state < SYSTEM_RUNNING)
117 return ERR_PTR(-E2BIG);
120 * Request ownership of the new memory range. This might be
121 * a child of an existing resource that was present but
122 * not marked as busy.
124 res = __request_region(&iomem_resource, start, size,
125 resource_name, flags);
128 pr_debug("Unable to reserve System RAM region: %016llx->%016llx\n",
129 start, start + size);
130 return ERR_PTR(-EEXIST);
135 static void release_memory_resource(struct resource *res)
139 release_resource(res);
143 #ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
144 void get_page_bootmem(unsigned long info, struct page *page,
147 page->freelist = (void *)type;
148 SetPagePrivate(page);
149 set_page_private(page, info);
153 void put_page_bootmem(struct page *page)
157 type = (unsigned long) page->freelist;
158 BUG_ON(type < MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE ||
159 type > MEMORY_HOTPLUG_MAX_BOOTMEM_TYPE);
161 if (page_ref_dec_return(page) == 1) {
162 page->freelist = NULL;
163 ClearPagePrivate(page);
164 set_page_private(page, 0);
165 INIT_LIST_HEAD(&page->lru);
166 free_reserved_page(page);
170 #ifdef CONFIG_HAVE_BOOTMEM_INFO_NODE
171 #ifndef CONFIG_SPARSEMEM_VMEMMAP
172 static void register_page_bootmem_info_section(unsigned long start_pfn)
174 unsigned long mapsize, section_nr, i;
175 struct mem_section *ms;
176 struct page *page, *memmap;
177 struct mem_section_usage *usage;
179 section_nr = pfn_to_section_nr(start_pfn);
180 ms = __nr_to_section(section_nr);
182 /* Get section's memmap address */
183 memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr);
186 * Get page for the memmap's phys address
187 * XXX: need more consideration for sparse_vmemmap...
189 page = virt_to_page(memmap);
190 mapsize = sizeof(struct page) * PAGES_PER_SECTION;
191 mapsize = PAGE_ALIGN(mapsize) >> PAGE_SHIFT;
193 /* remember memmap's page */
194 for (i = 0; i < mapsize; i++, page++)
195 get_page_bootmem(section_nr, page, SECTION_INFO);
198 page = virt_to_page(usage);
200 mapsize = PAGE_ALIGN(mem_section_usage_size()) >> PAGE_SHIFT;
202 for (i = 0; i < mapsize; i++, page++)
203 get_page_bootmem(section_nr, page, MIX_SECTION_INFO);
206 #else /* CONFIG_SPARSEMEM_VMEMMAP */
207 static void register_page_bootmem_info_section(unsigned long start_pfn)
209 unsigned long mapsize, section_nr, i;
210 struct mem_section *ms;
211 struct page *page, *memmap;
212 struct mem_section_usage *usage;
214 section_nr = pfn_to_section_nr(start_pfn);
215 ms = __nr_to_section(section_nr);
217 memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr);
219 register_page_bootmem_memmap(section_nr, memmap, PAGES_PER_SECTION);
222 page = virt_to_page(usage);
224 mapsize = PAGE_ALIGN(mem_section_usage_size()) >> PAGE_SHIFT;
226 for (i = 0; i < mapsize; i++, page++)
227 get_page_bootmem(section_nr, page, MIX_SECTION_INFO);
229 #endif /* !CONFIG_SPARSEMEM_VMEMMAP */
231 void __init register_page_bootmem_info_node(struct pglist_data *pgdat)
233 unsigned long i, pfn, end_pfn, nr_pages;
234 int node = pgdat->node_id;
237 nr_pages = PAGE_ALIGN(sizeof(struct pglist_data)) >> PAGE_SHIFT;
238 page = virt_to_page(pgdat);
240 for (i = 0; i < nr_pages; i++, page++)
241 get_page_bootmem(node, page, NODE_INFO);
243 pfn = pgdat->node_start_pfn;
244 end_pfn = pgdat_end_pfn(pgdat);
246 /* register section info */
247 for (; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
249 * Some platforms can assign the same pfn to multiple nodes - on
250 * node0 as well as nodeN. To avoid registering a pfn against
251 * multiple nodes we check that this pfn does not already
252 * reside in some other nodes.
254 if (pfn_valid(pfn) && (early_pfn_to_nid(pfn) == node))
255 register_page_bootmem_info_section(pfn);
258 #endif /* CONFIG_HAVE_BOOTMEM_INFO_NODE */
260 static int check_pfn_span(unsigned long pfn, unsigned long nr_pages,
264 * Disallow all operations smaller than a sub-section and only
265 * allow operations smaller than a section for
266 * SPARSEMEM_VMEMMAP. Note that check_hotplug_memory_range()
267 * enforces a larger memory_block_size_bytes() granularity for
268 * memory that will be marked online, so this check should only
269 * fire for direct arch_{add,remove}_memory() users outside of
270 * add_memory_resource().
272 unsigned long min_align;
274 if (IS_ENABLED(CONFIG_SPARSEMEM_VMEMMAP))
275 min_align = PAGES_PER_SUBSECTION;
277 min_align = PAGES_PER_SECTION;
278 if (!IS_ALIGNED(pfn, min_align)
279 || !IS_ALIGNED(nr_pages, min_align)) {
280 WARN(1, "Misaligned __%s_pages start: %#lx end: #%lx\n",
281 reason, pfn, pfn + nr_pages - 1);
287 static int check_hotplug_memory_addressable(unsigned long pfn,
288 unsigned long nr_pages)
290 const u64 max_addr = PFN_PHYS(pfn + nr_pages) - 1;
292 if (max_addr >> MAX_PHYSMEM_BITS) {
293 const u64 max_allowed = (1ull << (MAX_PHYSMEM_BITS + 1)) - 1;
295 "Hotplugged memory exceeds maximum addressable address, range=%#llx-%#llx, maximum=%#llx\n",
296 (u64)PFN_PHYS(pfn), max_addr, max_allowed);
304 * Return page for the valid pfn only if the page is online. All pfn
305 * walkers which rely on the fully initialized page->flags and others
306 * should use this rather than pfn_valid && pfn_to_page
308 struct page *pfn_to_online_page(unsigned long pfn)
310 unsigned long nr = pfn_to_section_nr(pfn);
311 struct dev_pagemap *pgmap;
312 struct mem_section *ms;
314 if (nr >= NR_MEM_SECTIONS)
317 ms = __nr_to_section(nr);
318 if (!online_section(ms))
322 * Save some code text when online_section() +
323 * pfn_section_valid() are sufficient.
325 if (IS_ENABLED(CONFIG_HAVE_ARCH_PFN_VALID) && !pfn_valid(pfn))
328 if (!pfn_section_valid(ms, pfn))
331 if (!online_device_section(ms))
332 return pfn_to_page(pfn);
335 * Slowpath: when ZONE_DEVICE collides with
336 * ZONE_{NORMAL,MOVABLE} within the same section some pfns in
337 * the section may be 'offline' but 'valid'. Only
338 * get_dev_pagemap() can determine sub-section online status.
340 pgmap = get_dev_pagemap(pfn, NULL);
341 put_dev_pagemap(pgmap);
343 /* The presence of a pgmap indicates ZONE_DEVICE offline pfn */
347 return pfn_to_page(pfn);
349 EXPORT_SYMBOL_GPL(pfn_to_online_page);
352 * Reasonably generic function for adding memory. It is
353 * expected that archs that support memory hotplug will
354 * call this function after deciding the zone to which to
357 int __ref __add_pages(int nid, unsigned long pfn, unsigned long nr_pages,
358 struct mhp_params *params)
360 const unsigned long end_pfn = pfn + nr_pages;
361 unsigned long cur_nr_pages;
363 struct vmem_altmap *altmap = params->altmap;
365 if (WARN_ON_ONCE(!params->pgprot.pgprot))
368 err = check_hotplug_memory_addressable(pfn, nr_pages);
374 * Validate altmap is within bounds of the total request
376 if (altmap->base_pfn != pfn
377 || vmem_altmap_offset(altmap) > nr_pages) {
378 pr_warn_once("memory add fail, invalid altmap\n");
384 err = check_pfn_span(pfn, nr_pages, "add");
388 for (; pfn < end_pfn; pfn += cur_nr_pages) {
389 /* Select all remaining pages up to the next section boundary */
390 cur_nr_pages = min(end_pfn - pfn,
391 SECTION_ALIGN_UP(pfn + 1) - pfn);
392 err = sparse_add_section(nid, pfn, cur_nr_pages, altmap);
397 vmemmap_populate_print_last();
401 /* find the smallest valid pfn in the range [start_pfn, end_pfn) */
402 static unsigned long find_smallest_section_pfn(int nid, struct zone *zone,
403 unsigned long start_pfn,
404 unsigned long end_pfn)
406 for (; start_pfn < end_pfn; start_pfn += PAGES_PER_SUBSECTION) {
407 if (unlikely(!pfn_to_online_page(start_pfn)))
410 if (unlikely(pfn_to_nid(start_pfn) != nid))
413 if (zone != page_zone(pfn_to_page(start_pfn)))
422 /* find the biggest valid pfn in the range [start_pfn, end_pfn). */
423 static unsigned long find_biggest_section_pfn(int nid, struct zone *zone,
424 unsigned long start_pfn,
425 unsigned long end_pfn)
429 /* pfn is the end pfn of a memory section. */
431 for (; pfn >= start_pfn; pfn -= PAGES_PER_SUBSECTION) {
432 if (unlikely(!pfn_to_online_page(pfn)))
435 if (unlikely(pfn_to_nid(pfn) != nid))
438 if (zone != page_zone(pfn_to_page(pfn)))
447 static void shrink_zone_span(struct zone *zone, unsigned long start_pfn,
448 unsigned long end_pfn)
451 int nid = zone_to_nid(zone);
453 zone_span_writelock(zone);
454 if (zone->zone_start_pfn == start_pfn) {
456 * If the section is smallest section in the zone, it need
457 * shrink zone->zone_start_pfn and zone->zone_spanned_pages.
458 * In this case, we find second smallest valid mem_section
459 * for shrinking zone.
461 pfn = find_smallest_section_pfn(nid, zone, end_pfn,
464 zone->spanned_pages = zone_end_pfn(zone) - pfn;
465 zone->zone_start_pfn = pfn;
467 zone->zone_start_pfn = 0;
468 zone->spanned_pages = 0;
470 } else if (zone_end_pfn(zone) == end_pfn) {
472 * If the section is biggest section in the zone, it need
473 * shrink zone->spanned_pages.
474 * In this case, we find second biggest valid mem_section for
477 pfn = find_biggest_section_pfn(nid, zone, zone->zone_start_pfn,
480 zone->spanned_pages = pfn - zone->zone_start_pfn + 1;
482 zone->zone_start_pfn = 0;
483 zone->spanned_pages = 0;
486 zone_span_writeunlock(zone);
489 static void update_pgdat_span(struct pglist_data *pgdat)
491 unsigned long node_start_pfn = 0, node_end_pfn = 0;
494 for (zone = pgdat->node_zones;
495 zone < pgdat->node_zones + MAX_NR_ZONES; zone++) {
496 unsigned long end_pfn = zone_end_pfn(zone);
498 /* No need to lock the zones, they can't change. */
499 if (!zone->spanned_pages)
502 node_start_pfn = zone->zone_start_pfn;
503 node_end_pfn = end_pfn;
507 if (end_pfn > node_end_pfn)
508 node_end_pfn = end_pfn;
509 if (zone->zone_start_pfn < node_start_pfn)
510 node_start_pfn = zone->zone_start_pfn;
513 pgdat->node_start_pfn = node_start_pfn;
514 pgdat->node_spanned_pages = node_end_pfn - node_start_pfn;
517 void __ref remove_pfn_range_from_zone(struct zone *zone,
518 unsigned long start_pfn,
519 unsigned long nr_pages)
521 const unsigned long end_pfn = start_pfn + nr_pages;
522 struct pglist_data *pgdat = zone->zone_pgdat;
523 unsigned long pfn, cur_nr_pages, flags;
525 /* Poison struct pages because they are now uninitialized again. */
526 for (pfn = start_pfn; pfn < end_pfn; pfn += cur_nr_pages) {
529 /* Select all remaining pages up to the next section boundary */
531 min(end_pfn - pfn, SECTION_ALIGN_UP(pfn + 1) - pfn);
532 page_init_poison(pfn_to_page(pfn),
533 sizeof(struct page) * cur_nr_pages);
536 #ifdef CONFIG_ZONE_DEVICE
538 * Zone shrinking code cannot properly deal with ZONE_DEVICE. So
539 * we will not try to shrink the zones - which is okay as
540 * set_zone_contiguous() cannot deal with ZONE_DEVICE either way.
542 if (zone_idx(zone) == ZONE_DEVICE)
546 clear_zone_contiguous(zone);
548 pgdat_resize_lock(zone->zone_pgdat, &flags);
549 shrink_zone_span(zone, start_pfn, start_pfn + nr_pages);
550 update_pgdat_span(pgdat);
551 pgdat_resize_unlock(zone->zone_pgdat, &flags);
553 set_zone_contiguous(zone);
556 static void __remove_section(unsigned long pfn, unsigned long nr_pages,
557 unsigned long map_offset,
558 struct vmem_altmap *altmap)
560 struct mem_section *ms = __pfn_to_section(pfn);
562 if (WARN_ON_ONCE(!valid_section(ms)))
565 sparse_remove_section(ms, pfn, nr_pages, map_offset, altmap);
569 * __remove_pages() - remove sections of pages
570 * @pfn: starting pageframe (must be aligned to start of a section)
571 * @nr_pages: number of pages to remove (must be multiple of section size)
572 * @altmap: alternative device page map or %NULL if default memmap is used
574 * Generic helper function to remove section mappings and sysfs entries
575 * for the section of the memory we are removing. Caller needs to make
576 * sure that pages are marked reserved and zones are adjust properly by
577 * calling offline_pages().
579 void __remove_pages(unsigned long pfn, unsigned long nr_pages,
580 struct vmem_altmap *altmap)
582 const unsigned long end_pfn = pfn + nr_pages;
583 unsigned long cur_nr_pages;
584 unsigned long map_offset = 0;
586 map_offset = vmem_altmap_offset(altmap);
588 if (check_pfn_span(pfn, nr_pages, "remove"))
591 for (; pfn < end_pfn; pfn += cur_nr_pages) {
593 /* Select all remaining pages up to the next section boundary */
594 cur_nr_pages = min(end_pfn - pfn,
595 SECTION_ALIGN_UP(pfn + 1) - pfn);
596 __remove_section(pfn, cur_nr_pages, map_offset, altmap);
601 int set_online_page_callback(online_page_callback_t callback)
606 mutex_lock(&online_page_callback_lock);
608 if (online_page_callback == generic_online_page) {
609 online_page_callback = callback;
613 mutex_unlock(&online_page_callback_lock);
618 EXPORT_SYMBOL_GPL(set_online_page_callback);
620 int restore_online_page_callback(online_page_callback_t callback)
625 mutex_lock(&online_page_callback_lock);
627 if (online_page_callback == callback) {
628 online_page_callback = generic_online_page;
632 mutex_unlock(&online_page_callback_lock);
637 EXPORT_SYMBOL_GPL(restore_online_page_callback);
639 void generic_online_page(struct page *page, unsigned int order)
642 * Freeing the page with debug_pagealloc enabled will try to unmap it,
643 * so we should map it first. This is better than introducing a special
644 * case in page freeing fast path.
646 debug_pagealloc_map_pages(page, 1 << order);
647 __free_pages_core(page, order);
648 totalram_pages_add(1UL << order);
649 #ifdef CONFIG_HIGHMEM
650 if (PageHighMem(page))
651 totalhigh_pages_add(1UL << order);
654 EXPORT_SYMBOL_GPL(generic_online_page);
656 static void online_pages_range(unsigned long start_pfn, unsigned long nr_pages)
658 const unsigned long end_pfn = start_pfn + nr_pages;
662 * Online the pages in MAX_ORDER - 1 aligned chunks. The callback might
663 * decide to not expose all pages to the buddy (e.g., expose them
664 * later). We account all pages as being online and belonging to this
667 for (pfn = start_pfn; pfn < end_pfn; pfn += MAX_ORDER_NR_PAGES)
668 (*online_page_callback)(pfn_to_page(pfn), MAX_ORDER - 1);
670 /* mark all involved sections as online */
671 online_mem_sections(start_pfn, end_pfn);
674 /* check which state of node_states will be changed when online memory */
675 static void node_states_check_changes_online(unsigned long nr_pages,
676 struct zone *zone, struct memory_notify *arg)
678 int nid = zone_to_nid(zone);
680 arg->status_change_nid = NUMA_NO_NODE;
681 arg->status_change_nid_normal = NUMA_NO_NODE;
682 arg->status_change_nid_high = NUMA_NO_NODE;
684 if (!node_state(nid, N_MEMORY))
685 arg->status_change_nid = nid;
686 if (zone_idx(zone) <= ZONE_NORMAL && !node_state(nid, N_NORMAL_MEMORY))
687 arg->status_change_nid_normal = nid;
688 #ifdef CONFIG_HIGHMEM
689 if (zone_idx(zone) <= ZONE_HIGHMEM && !node_state(nid, N_HIGH_MEMORY))
690 arg->status_change_nid_high = nid;
694 static void node_states_set_node(int node, struct memory_notify *arg)
696 if (arg->status_change_nid_normal >= 0)
697 node_set_state(node, N_NORMAL_MEMORY);
699 if (arg->status_change_nid_high >= 0)
700 node_set_state(node, N_HIGH_MEMORY);
702 if (arg->status_change_nid >= 0)
703 node_set_state(node, N_MEMORY);
706 static void __meminit resize_zone_range(struct zone *zone, unsigned long start_pfn,
707 unsigned long nr_pages)
709 unsigned long old_end_pfn = zone_end_pfn(zone);
711 if (zone_is_empty(zone) || start_pfn < zone->zone_start_pfn)
712 zone->zone_start_pfn = start_pfn;
714 zone->spanned_pages = max(start_pfn + nr_pages, old_end_pfn) - zone->zone_start_pfn;
717 static void __meminit resize_pgdat_range(struct pglist_data *pgdat, unsigned long start_pfn,
718 unsigned long nr_pages)
720 unsigned long old_end_pfn = pgdat_end_pfn(pgdat);
722 if (!pgdat->node_spanned_pages || start_pfn < pgdat->node_start_pfn)
723 pgdat->node_start_pfn = start_pfn;
725 pgdat->node_spanned_pages = max(start_pfn + nr_pages, old_end_pfn) - pgdat->node_start_pfn;
729 static void section_taint_zone_device(unsigned long pfn)
731 struct mem_section *ms = __pfn_to_section(pfn);
733 ms->section_mem_map |= SECTION_TAINT_ZONE_DEVICE;
737 * Associate the pfn range with the given zone, initializing the memmaps
738 * and resizing the pgdat/zone data to span the added pages. After this
739 * call, all affected pages are PG_reserved.
741 * All aligned pageblocks are initialized to the specified migratetype
742 * (usually MIGRATE_MOVABLE). Besides setting the migratetype, no related
743 * zone stats (e.g., nr_isolate_pageblock) are touched.
745 void __ref move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn,
746 unsigned long nr_pages,
747 struct vmem_altmap *altmap, int migratetype)
749 struct pglist_data *pgdat = zone->zone_pgdat;
750 int nid = pgdat->node_id;
753 clear_zone_contiguous(zone);
755 /* TODO Huh pgdat is irqsave while zone is not. It used to be like that before */
756 pgdat_resize_lock(pgdat, &flags);
757 zone_span_writelock(zone);
758 if (zone_is_empty(zone))
759 init_currently_empty_zone(zone, start_pfn, nr_pages);
760 resize_zone_range(zone, start_pfn, nr_pages);
761 zone_span_writeunlock(zone);
762 resize_pgdat_range(pgdat, start_pfn, nr_pages);
763 pgdat_resize_unlock(pgdat, &flags);
766 * Subsection population requires care in pfn_to_online_page().
767 * Set the taint to enable the slow path detection of
768 * ZONE_DEVICE pages in an otherwise ZONE_{NORMAL,MOVABLE}
771 if (zone_is_zone_device(zone)) {
772 if (!IS_ALIGNED(start_pfn, PAGES_PER_SECTION))
773 section_taint_zone_device(start_pfn);
774 if (!IS_ALIGNED(start_pfn + nr_pages, PAGES_PER_SECTION))
775 section_taint_zone_device(start_pfn + nr_pages);
779 * TODO now we have a visible range of pages which are not associated
780 * with their zone properly. Not nice but set_pfnblock_flags_mask
781 * expects the zone spans the pfn range. All the pages in the range
782 * are reserved so nobody should be touching them so we should be safe
784 memmap_init_range(nr_pages, nid, zone_idx(zone), start_pfn, 0,
785 MEMINIT_HOTPLUG, altmap, migratetype);
787 set_zone_contiguous(zone);
791 * Returns a default kernel memory zone for the given pfn range.
792 * If no kernel zone covers this pfn range it will automatically go
793 * to the ZONE_NORMAL.
795 static struct zone *default_kernel_zone_for_pfn(int nid, unsigned long start_pfn,
796 unsigned long nr_pages)
798 struct pglist_data *pgdat = NODE_DATA(nid);
801 for (zid = 0; zid <= ZONE_NORMAL; zid++) {
802 struct zone *zone = &pgdat->node_zones[zid];
804 if (zone_intersects(zone, start_pfn, nr_pages))
808 return &pgdat->node_zones[ZONE_NORMAL];
811 static inline struct zone *default_zone_for_pfn(int nid, unsigned long start_pfn,
812 unsigned long nr_pages)
814 struct zone *kernel_zone = default_kernel_zone_for_pfn(nid, start_pfn,
816 struct zone *movable_zone = &NODE_DATA(nid)->node_zones[ZONE_MOVABLE];
817 bool in_kernel = zone_intersects(kernel_zone, start_pfn, nr_pages);
818 bool in_movable = zone_intersects(movable_zone, start_pfn, nr_pages);
821 * We inherit the existing zone in a simple case where zones do not
822 * overlap in the given range
824 if (in_kernel ^ in_movable)
825 return (in_kernel) ? kernel_zone : movable_zone;
828 * If the range doesn't belong to any zone or two zones overlap in the
829 * given range then we use movable zone only if movable_node is
830 * enabled because we always online to a kernel zone by default.
832 return movable_node_enabled ? movable_zone : kernel_zone;
835 struct zone * zone_for_pfn_range(int online_type, int nid, unsigned start_pfn,
836 unsigned long nr_pages)
838 if (online_type == MMOP_ONLINE_KERNEL)
839 return default_kernel_zone_for_pfn(nid, start_pfn, nr_pages);
841 if (online_type == MMOP_ONLINE_MOVABLE)
842 return &NODE_DATA(nid)->node_zones[ZONE_MOVABLE];
844 return default_zone_for_pfn(nid, start_pfn, nr_pages);
847 int __ref online_pages(unsigned long pfn, unsigned long nr_pages,
848 int online_type, int nid)
852 int need_zonelists_rebuild = 0;
854 struct memory_notify arg;
856 /* We can only online full sections (e.g., SECTION_IS_ONLINE) */
857 if (WARN_ON_ONCE(!nr_pages ||
858 !IS_ALIGNED(pfn | nr_pages, PAGES_PER_SECTION)))
863 /* associate pfn range with the zone */
864 zone = zone_for_pfn_range(online_type, nid, pfn, nr_pages);
865 move_pfn_range_to_zone(zone, pfn, nr_pages, NULL, MIGRATE_ISOLATE);
868 arg.nr_pages = nr_pages;
869 node_states_check_changes_online(nr_pages, zone, &arg);
871 ret = memory_notify(MEM_GOING_ONLINE, &arg);
872 ret = notifier_to_errno(ret);
874 goto failed_addition;
877 * Fixup the number of isolated pageblocks before marking the sections
878 * onlining, such that undo_isolate_page_range() works correctly.
880 spin_lock_irqsave(&zone->lock, flags);
881 zone->nr_isolate_pageblock += nr_pages / pageblock_nr_pages;
882 spin_unlock_irqrestore(&zone->lock, flags);
885 * If this zone is not populated, then it is not in zonelist.
886 * This means the page allocator ignores this zone.
887 * So, zonelist must be updated after online.
889 if (!populated_zone(zone)) {
890 need_zonelists_rebuild = 1;
891 setup_zone_pageset(zone);
894 online_pages_range(pfn, nr_pages);
895 zone->present_pages += nr_pages;
897 pgdat_resize_lock(zone->zone_pgdat, &flags);
898 zone->zone_pgdat->node_present_pages += nr_pages;
899 pgdat_resize_unlock(zone->zone_pgdat, &flags);
901 node_states_set_node(nid, &arg);
902 if (need_zonelists_rebuild)
903 build_all_zonelists(NULL);
904 zone_pcp_update(zone);
906 /* Basic onlining is complete, allow allocation of onlined pages. */
907 undo_isolate_page_range(pfn, pfn + nr_pages, MIGRATE_MOVABLE);
910 * Freshly onlined pages aren't shuffled (e.g., all pages are placed to
911 * the tail of the freelist when undoing isolation). Shuffle the whole
912 * zone to make sure the just onlined pages are properly distributed
913 * across the whole freelist - to create an initial shuffle.
917 init_per_zone_wmark_min();
922 writeback_set_ratelimit();
924 memory_notify(MEM_ONLINE, &arg);
929 pr_debug("online_pages [mem %#010llx-%#010llx] failed\n",
930 (unsigned long long) pfn << PAGE_SHIFT,
931 (((unsigned long long) pfn + nr_pages) << PAGE_SHIFT) - 1);
932 memory_notify(MEM_CANCEL_ONLINE, &arg);
933 remove_pfn_range_from_zone(zone, pfn, nr_pages);
937 #endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */
939 static void reset_node_present_pages(pg_data_t *pgdat)
943 for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++)
944 z->present_pages = 0;
946 pgdat->node_present_pages = 0;
949 /* we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG */
950 static pg_data_t __ref *hotadd_new_pgdat(int nid)
952 struct pglist_data *pgdat;
954 pgdat = NODE_DATA(nid);
956 pgdat = arch_alloc_nodedata(nid);
960 pgdat->per_cpu_nodestats =
961 alloc_percpu(struct per_cpu_nodestat);
962 arch_refresh_nodedata(nid, pgdat);
966 * Reset the nr_zones, order and highest_zoneidx before reuse.
967 * Note that kswapd will init kswapd_highest_zoneidx properly
968 * when it starts in the near future.
971 pgdat->kswapd_order = 0;
972 pgdat->kswapd_highest_zoneidx = 0;
973 for_each_online_cpu(cpu) {
974 struct per_cpu_nodestat *p;
976 p = per_cpu_ptr(pgdat->per_cpu_nodestats, cpu);
977 memset(p, 0, sizeof(*p));
981 /* we can use NODE_DATA(nid) from here */
982 pgdat->node_id = nid;
983 pgdat->node_start_pfn = 0;
985 /* init node's zones as empty zones, we don't have any present pages.*/
986 free_area_init_core_hotplug(nid);
989 * The node we allocated has no zone fallback lists. For avoiding
990 * to access not-initialized zonelist, build here.
992 build_all_zonelists(pgdat);
995 * When memory is hot-added, all the memory is in offline state. So
996 * clear all zones' present_pages because they will be updated in
997 * online_pages() and offline_pages().
999 reset_node_managed_pages(pgdat);
1000 reset_node_present_pages(pgdat);
1005 static void rollback_node_hotadd(int nid)
1007 pg_data_t *pgdat = NODE_DATA(nid);
1009 arch_refresh_nodedata(nid, NULL);
1010 free_percpu(pgdat->per_cpu_nodestats);
1011 arch_free_nodedata(pgdat);
1016 * try_online_node - online a node if offlined
1018 * @set_node_online: Whether we want to online the node
1019 * called by cpu_up() to online a node without onlined memory.
1022 * 1 -> a new node has been allocated
1023 * 0 -> the node is already online
1024 * -ENOMEM -> the node could not be allocated
1026 static int __try_online_node(int nid, bool set_node_online)
1031 if (node_online(nid))
1034 pgdat = hotadd_new_pgdat(nid);
1036 pr_err("Cannot online node %d due to NULL pgdat\n", nid);
1041 if (set_node_online) {
1042 node_set_online(nid);
1043 ret = register_one_node(nid);
1051 * Users of this function always want to online/register the node
1053 int try_online_node(int nid)
1057 mem_hotplug_begin();
1058 ret = __try_online_node(nid, true);
1063 static int check_hotplug_memory_range(u64 start, u64 size)
1065 /* memory range must be block size aligned */
1066 if (!size || !IS_ALIGNED(start, memory_block_size_bytes()) ||
1067 !IS_ALIGNED(size, memory_block_size_bytes())) {
1068 pr_err("Block size [%#lx] unaligned hotplug range: start %#llx, size %#llx",
1069 memory_block_size_bytes(), start, size);
1076 static int online_memory_block(struct memory_block *mem, void *arg)
1078 mem->online_type = mhp_default_online_type;
1079 return device_online(&mem->dev);
1083 * NOTE: The caller must call lock_device_hotplug() to serialize hotplug
1084 * and online/offline operations (triggered e.g. by sysfs).
1086 * we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG
1088 int __ref add_memory_resource(int nid, struct resource *res, mhp_t mhp_flags)
1090 struct mhp_params params = { .pgprot = PAGE_KERNEL };
1092 bool new_node = false;
1096 size = resource_size(res);
1098 ret = check_hotplug_memory_range(start, size);
1102 if (!node_possible(nid)) {
1103 WARN(1, "node %d was absent from the node_possible_map\n", nid);
1107 mem_hotplug_begin();
1109 if (IS_ENABLED(CONFIG_ARCH_KEEP_MEMBLOCK))
1110 memblock_add_node(start, size, nid);
1112 ret = __try_online_node(nid, false);
1117 /* call arch's memory hotadd */
1118 ret = arch_add_memory(nid, start, size, ¶ms);
1122 /* create memory block devices after memory was added */
1123 ret = create_memory_block_devices(start, size);
1125 arch_remove_memory(nid, start, size, NULL);
1130 /* If sysfs file of new node can't be created, cpu on the node
1131 * can't be hot-added. There is no rollback way now.
1132 * So, check by BUG_ON() to catch it reluctantly..
1133 * We online node here. We can't roll back from here.
1135 node_set_online(nid);
1136 ret = __register_one_node(nid);
1140 /* link memory sections under this node.*/
1141 link_mem_sections(nid, PFN_DOWN(start), PFN_UP(start + size - 1),
1144 /* create new memmap entry */
1145 if (!strcmp(res->name, "System RAM"))
1146 firmware_map_add_hotplug(start, start + size, "System RAM");
1148 /* device_online() will take the lock when calling online_pages() */
1152 * In case we're allowed to merge the resource, flag it and trigger
1153 * merging now that adding succeeded.
1155 if (mhp_flags & MHP_MERGE_RESOURCE)
1156 merge_system_ram_resource(res);
1158 /* online pages if requested */
1159 if (mhp_default_online_type != MMOP_OFFLINE)
1160 walk_memory_blocks(start, size, NULL, online_memory_block);
1164 /* rollback pgdat allocation and others */
1166 rollback_node_hotadd(nid);
1167 if (IS_ENABLED(CONFIG_ARCH_KEEP_MEMBLOCK))
1168 memblock_remove(start, size);
1173 /* requires device_hotplug_lock, see add_memory_resource() */
1174 int __ref __add_memory(int nid, u64 start, u64 size, mhp_t mhp_flags)
1176 struct resource *res;
1179 res = register_memory_resource(start, size, "System RAM");
1181 return PTR_ERR(res);
1183 ret = add_memory_resource(nid, res, mhp_flags);
1185 release_memory_resource(res);
1189 int add_memory(int nid, u64 start, u64 size, mhp_t mhp_flags)
1193 lock_device_hotplug();
1194 rc = __add_memory(nid, start, size, mhp_flags);
1195 unlock_device_hotplug();
1199 EXPORT_SYMBOL_GPL(add_memory);
1202 * Add special, driver-managed memory to the system as system RAM. Such
1203 * memory is not exposed via the raw firmware-provided memmap as system
1204 * RAM, instead, it is detected and added by a driver - during cold boot,
1205 * after a reboot, and after kexec.
1207 * Reasons why this memory should not be used for the initial memmap of a
1208 * kexec kernel or for placing kexec images:
1209 * - The booting kernel is in charge of determining how this memory will be
1210 * used (e.g., use persistent memory as system RAM)
1211 * - Coordination with a hypervisor is required before this memory
1212 * can be used (e.g., inaccessible parts).
1214 * For this memory, no entries in /sys/firmware/memmap ("raw firmware-provided
1215 * memory map") are created. Also, the created memory resource is flagged
1216 * with IORESOURCE_SYSRAM_DRIVER_MANAGED, so in-kernel users can special-case
1217 * this memory as well (esp., not place kexec images onto it).
1219 * The resource_name (visible via /proc/iomem) has to have the format
1220 * "System RAM ($DRIVER)".
1222 int add_memory_driver_managed(int nid, u64 start, u64 size,
1223 const char *resource_name, mhp_t mhp_flags)
1225 struct resource *res;
1228 if (!resource_name ||
1229 strstr(resource_name, "System RAM (") != resource_name ||
1230 resource_name[strlen(resource_name) - 1] != ')')
1233 lock_device_hotplug();
1235 res = register_memory_resource(start, size, resource_name);
1241 rc = add_memory_resource(nid, res, mhp_flags);
1243 release_memory_resource(res);
1246 unlock_device_hotplug();
1249 EXPORT_SYMBOL_GPL(add_memory_driver_managed);
1251 #ifdef CONFIG_MEMORY_HOTREMOVE
1253 * Confirm all pages in a range [start, end) belong to the same zone (skipping
1254 * memory holes). When true, return the zone.
1256 struct zone *test_pages_in_a_zone(unsigned long start_pfn,
1257 unsigned long end_pfn)
1259 unsigned long pfn, sec_end_pfn;
1260 struct zone *zone = NULL;
1263 for (pfn = start_pfn, sec_end_pfn = SECTION_ALIGN_UP(start_pfn + 1);
1265 pfn = sec_end_pfn, sec_end_pfn += PAGES_PER_SECTION) {
1266 /* Make sure the memory section is present first */
1267 if (!present_section_nr(pfn_to_section_nr(pfn)))
1269 for (; pfn < sec_end_pfn && pfn < end_pfn;
1270 pfn += MAX_ORDER_NR_PAGES) {
1272 /* This is just a CONFIG_HOLES_IN_ZONE check.*/
1273 while ((i < MAX_ORDER_NR_PAGES) &&
1274 !pfn_valid_within(pfn + i))
1276 if (i == MAX_ORDER_NR_PAGES || pfn + i >= end_pfn)
1278 /* Check if we got outside of the zone */
1279 if (zone && !zone_spans_pfn(zone, pfn + i))
1281 page = pfn_to_page(pfn + i);
1282 if (zone && page_zone(page) != zone)
1284 zone = page_zone(page);
1292 * Scan pfn range [start,end) to find movable/migratable pages (LRU pages,
1293 * non-lru movable pages and hugepages). Will skip over most unmovable
1294 * pages (esp., pages that can be skipped when offlining), but bail out on
1295 * definitely unmovable pages.
1298 * 0 in case a movable page is found and movable_pfn was updated.
1299 * -ENOENT in case no movable page was found.
1300 * -EBUSY in case a definitely unmovable page was found.
1302 static int scan_movable_pages(unsigned long start, unsigned long end,
1303 unsigned long *movable_pfn)
1307 for (pfn = start; pfn < end; pfn++) {
1308 struct page *page, *head;
1311 if (!pfn_valid(pfn))
1313 page = pfn_to_page(pfn);
1316 if (__PageMovable(page))
1320 * PageOffline() pages that are not marked __PageMovable() and
1321 * have a reference count > 0 (after MEM_GOING_OFFLINE) are
1322 * definitely unmovable. If their reference count would be 0,
1323 * they could at least be skipped when offlining memory.
1325 if (PageOffline(page) && page_count(page))
1328 if (!PageHuge(page))
1330 head = compound_head(page);
1332 * This test is racy as we hold no reference or lock. The
1333 * hugetlb page could have been free'ed and head is no longer
1334 * a hugetlb page before the following check. In such unlikely
1335 * cases false positives and negatives are possible. Calling
1336 * code must deal with these scenarios.
1338 if (HPageMigratable(head))
1340 skip = compound_nr(head) - (page - head);
1350 do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
1353 struct page *page, *head;
1357 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
1358 if (!pfn_valid(pfn))
1360 page = pfn_to_page(pfn);
1361 head = compound_head(page);
1363 if (PageHuge(page)) {
1364 pfn = page_to_pfn(head) + compound_nr(head) - 1;
1365 isolate_huge_page(head, &source);
1367 } else if (PageTransHuge(page))
1368 pfn = page_to_pfn(head) + thp_nr_pages(page) - 1;
1371 * HWPoison pages have elevated reference counts so the migration would
1372 * fail on them. It also doesn't make any sense to migrate them in the
1373 * first place. Still try to unmap such a page in case it is still mapped
1374 * (e.g. current hwpoison implementation doesn't unmap KSM pages but keep
1375 * the unmap as the catch all safety net).
1377 if (PageHWPoison(page)) {
1378 if (WARN_ON(PageLRU(page)))
1379 isolate_lru_page(page);
1380 if (page_mapped(page))
1381 try_to_unmap(page, TTU_IGNORE_MLOCK);
1385 if (!get_page_unless_zero(page))
1388 * We can skip free pages. And we can deal with pages on
1389 * LRU and non-lru movable pages.
1392 ret = isolate_lru_page(page);
1394 ret = isolate_movable_page(page, ISOLATE_UNEVICTABLE);
1395 if (!ret) { /* Success */
1396 list_add_tail(&page->lru, &source);
1397 if (!__PageMovable(page))
1398 inc_node_page_state(page, NR_ISOLATED_ANON +
1399 page_is_file_lru(page));
1402 pr_warn("failed to isolate pfn %lx\n", pfn);
1403 dump_page(page, "isolation failed");
1407 if (!list_empty(&source)) {
1408 nodemask_t nmask = node_states[N_MEMORY];
1409 struct migration_target_control mtc = {
1411 .gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL,
1415 * We have checked that migration range is on a single zone so
1416 * we can use the nid of the first page to all the others.
1418 mtc.nid = page_to_nid(list_first_entry(&source, struct page, lru));
1421 * try to allocate from a different node but reuse this node
1422 * if there are no other online nodes to be used (e.g. we are
1423 * offlining a part of the only existing node)
1425 node_clear(mtc.nid, nmask);
1426 if (nodes_empty(nmask))
1427 node_set(mtc.nid, nmask);
1428 ret = migrate_pages(&source, alloc_migration_target, NULL,
1429 (unsigned long)&mtc, MIGRATE_SYNC, MR_MEMORY_HOTPLUG);
1431 list_for_each_entry(page, &source, lru) {
1432 pr_warn("migrating pfn %lx failed ret:%d ",
1433 page_to_pfn(page), ret);
1434 dump_page(page, "migration failure");
1436 putback_movable_pages(&source);
1443 static int __init cmdline_parse_movable_node(char *p)
1445 movable_node_enabled = true;
1448 early_param("movable_node", cmdline_parse_movable_node);
1450 /* check which state of node_states will be changed when offline memory */
1451 static void node_states_check_changes_offline(unsigned long nr_pages,
1452 struct zone *zone, struct memory_notify *arg)
1454 struct pglist_data *pgdat = zone->zone_pgdat;
1455 unsigned long present_pages = 0;
1458 arg->status_change_nid = NUMA_NO_NODE;
1459 arg->status_change_nid_normal = NUMA_NO_NODE;
1460 arg->status_change_nid_high = NUMA_NO_NODE;
1463 * Check whether node_states[N_NORMAL_MEMORY] will be changed.
1464 * If the memory to be offline is within the range
1465 * [0..ZONE_NORMAL], and it is the last present memory there,
1466 * the zones in that range will become empty after the offlining,
1467 * thus we can determine that we need to clear the node from
1468 * node_states[N_NORMAL_MEMORY].
1470 for (zt = 0; zt <= ZONE_NORMAL; zt++)
1471 present_pages += pgdat->node_zones[zt].present_pages;
1472 if (zone_idx(zone) <= ZONE_NORMAL && nr_pages >= present_pages)
1473 arg->status_change_nid_normal = zone_to_nid(zone);
1475 #ifdef CONFIG_HIGHMEM
1477 * node_states[N_HIGH_MEMORY] contains nodes which
1478 * have normal memory or high memory.
1479 * Here we add the present_pages belonging to ZONE_HIGHMEM.
1480 * If the zone is within the range of [0..ZONE_HIGHMEM), and
1481 * we determine that the zones in that range become empty,
1482 * we need to clear the node for N_HIGH_MEMORY.
1484 present_pages += pgdat->node_zones[ZONE_HIGHMEM].present_pages;
1485 if (zone_idx(zone) <= ZONE_HIGHMEM && nr_pages >= present_pages)
1486 arg->status_change_nid_high = zone_to_nid(zone);
1490 * We have accounted the pages from [0..ZONE_NORMAL), and
1491 * in case of CONFIG_HIGHMEM the pages from ZONE_HIGHMEM
1493 * Here we count the possible pages from ZONE_MOVABLE.
1494 * If after having accounted all the pages, we see that the nr_pages
1495 * to be offlined is over or equal to the accounted pages,
1496 * we know that the node will become empty, and so, we can clear
1497 * it for N_MEMORY as well.
1499 present_pages += pgdat->node_zones[ZONE_MOVABLE].present_pages;
1501 if (nr_pages >= present_pages)
1502 arg->status_change_nid = zone_to_nid(zone);
1505 static void node_states_clear_node(int node, struct memory_notify *arg)
1507 if (arg->status_change_nid_normal >= 0)
1508 node_clear_state(node, N_NORMAL_MEMORY);
1510 if (arg->status_change_nid_high >= 0)
1511 node_clear_state(node, N_HIGH_MEMORY);
1513 if (arg->status_change_nid >= 0)
1514 node_clear_state(node, N_MEMORY);
1517 static int count_system_ram_pages_cb(unsigned long start_pfn,
1518 unsigned long nr_pages, void *data)
1520 unsigned long *nr_system_ram_pages = data;
1522 *nr_system_ram_pages += nr_pages;
1526 int __ref offline_pages(unsigned long start_pfn, unsigned long nr_pages)
1528 const unsigned long end_pfn = start_pfn + nr_pages;
1529 unsigned long pfn, system_ram_pages = 0;
1530 unsigned long flags;
1532 struct memory_notify arg;
1536 /* We can only offline full sections (e.g., SECTION_IS_ONLINE) */
1537 if (WARN_ON_ONCE(!nr_pages ||
1538 !IS_ALIGNED(start_pfn | nr_pages, PAGES_PER_SECTION)))
1541 mem_hotplug_begin();
1544 * Don't allow to offline memory blocks that contain holes.
1545 * Consequently, memory blocks with holes can never get onlined
1546 * via the hotplug path - online_pages() - as hotplugged memory has
1547 * no holes. This way, we e.g., don't have to worry about marking
1548 * memory holes PG_reserved, don't need pfn_valid() checks, and can
1549 * avoid using walk_system_ram_range() later.
1551 walk_system_ram_range(start_pfn, nr_pages, &system_ram_pages,
1552 count_system_ram_pages_cb);
1553 if (system_ram_pages != nr_pages) {
1555 reason = "memory holes";
1556 goto failed_removal;
1559 /* This makes hotplug much easier...and readable.
1560 we assume this for now. .*/
1561 zone = test_pages_in_a_zone(start_pfn, end_pfn);
1564 reason = "multizone range";
1565 goto failed_removal;
1567 node = zone_to_nid(zone);
1570 * Disable pcplists so that page isolation cannot race with freeing
1571 * in a way that pages from isolated pageblock are left on pcplists.
1573 zone_pcp_disable(zone);
1575 /* set above range as isolated */
1576 ret = start_isolate_page_range(start_pfn, end_pfn,
1578 MEMORY_OFFLINE | REPORT_FAILURE);
1580 reason = "failure to isolate range";
1581 goto failed_removal_pcplists_disabled;
1584 arg.start_pfn = start_pfn;
1585 arg.nr_pages = nr_pages;
1586 node_states_check_changes_offline(nr_pages, zone, &arg);
1588 ret = memory_notify(MEM_GOING_OFFLINE, &arg);
1589 ret = notifier_to_errno(ret);
1591 reason = "notifier failure";
1592 goto failed_removal_isolated;
1598 if (signal_pending(current)) {
1600 reason = "signal backoff";
1601 goto failed_removal_isolated;
1605 lru_add_drain_all();
1607 ret = scan_movable_pages(pfn, end_pfn, &pfn);
1610 * TODO: fatal migration failures should bail
1613 do_migrate_range(pfn, end_pfn);
1617 if (ret != -ENOENT) {
1618 reason = "unmovable page";
1619 goto failed_removal_isolated;
1623 * Dissolve free hugepages in the memory block before doing
1624 * offlining actually in order to make hugetlbfs's object
1625 * counting consistent.
1627 ret = dissolve_free_huge_pages(start_pfn, end_pfn);
1629 reason = "failure to dissolve huge pages";
1630 goto failed_removal_isolated;
1633 ret = test_pages_isolated(start_pfn, end_pfn, MEMORY_OFFLINE);
1637 /* Mark all sections offline and remove free pages from the buddy. */
1638 __offline_isolated_pages(start_pfn, end_pfn);
1639 pr_debug("Offlined Pages %ld\n", nr_pages);
1642 * The memory sections are marked offline, and the pageblock flags
1643 * effectively stale; nobody should be touching them. Fixup the number
1644 * of isolated pageblocks, memory onlining will properly revert this.
1646 spin_lock_irqsave(&zone->lock, flags);
1647 zone->nr_isolate_pageblock -= nr_pages / pageblock_nr_pages;
1648 spin_unlock_irqrestore(&zone->lock, flags);
1650 zone_pcp_enable(zone);
1652 /* removal success */
1653 adjust_managed_page_count(pfn_to_page(start_pfn), -nr_pages);
1654 zone->present_pages -= nr_pages;
1656 pgdat_resize_lock(zone->zone_pgdat, &flags);
1657 zone->zone_pgdat->node_present_pages -= nr_pages;
1658 pgdat_resize_unlock(zone->zone_pgdat, &flags);
1660 init_per_zone_wmark_min();
1662 if (!populated_zone(zone)) {
1663 zone_pcp_reset(zone);
1664 build_all_zonelists(NULL);
1666 zone_pcp_update(zone);
1668 node_states_clear_node(node, &arg);
1669 if (arg.status_change_nid >= 0) {
1671 kcompactd_stop(node);
1674 writeback_set_ratelimit();
1676 memory_notify(MEM_OFFLINE, &arg);
1677 remove_pfn_range_from_zone(zone, start_pfn, nr_pages);
1681 failed_removal_isolated:
1682 undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE);
1683 memory_notify(MEM_CANCEL_OFFLINE, &arg);
1684 failed_removal_pcplists_disabled:
1685 zone_pcp_enable(zone);
1687 pr_debug("memory offlining [mem %#010llx-%#010llx] failed due to %s\n",
1688 (unsigned long long) start_pfn << PAGE_SHIFT,
1689 ((unsigned long long) end_pfn << PAGE_SHIFT) - 1,
1691 /* pushback to free area */
1696 static int check_memblock_offlined_cb(struct memory_block *mem, void *arg)
1698 int ret = !is_memblock_offlined(mem);
1700 if (unlikely(ret)) {
1701 phys_addr_t beginpa, endpa;
1703 beginpa = PFN_PHYS(section_nr_to_pfn(mem->start_section_nr));
1704 endpa = beginpa + memory_block_size_bytes() - 1;
1705 pr_warn("removing memory fails, because memory [%pa-%pa] is onlined\n",
1713 static int check_cpu_on_node(pg_data_t *pgdat)
1717 for_each_present_cpu(cpu) {
1718 if (cpu_to_node(cpu) == pgdat->node_id)
1720 * the cpu on this node isn't removed, and we can't
1721 * offline this node.
1729 static int check_no_memblock_for_node_cb(struct memory_block *mem, void *arg)
1731 int nid = *(int *)arg;
1734 * If a memory block belongs to multiple nodes, the stored nid is not
1735 * reliable. However, such blocks are always online (e.g., cannot get
1736 * offlined) and, therefore, are still spanned by the node.
1738 return mem->nid == nid ? -EEXIST : 0;
1745 * Offline a node if all memory sections and cpus of the node are removed.
1747 * NOTE: The caller must call lock_device_hotplug() to serialize hotplug
1748 * and online/offline operations before this call.
1750 void try_offline_node(int nid)
1752 pg_data_t *pgdat = NODE_DATA(nid);
1756 * If the node still spans pages (especially ZONE_DEVICE), don't
1757 * offline it. A node spans memory after move_pfn_range_to_zone(),
1758 * e.g., after the memory block was onlined.
1760 if (pgdat->node_spanned_pages)
1764 * Especially offline memory blocks might not be spanned by the
1765 * node. They will get spanned by the node once they get onlined.
1766 * However, they link to the node in sysfs and can get onlined later.
1768 rc = for_each_memory_block(&nid, check_no_memblock_for_node_cb);
1772 if (check_cpu_on_node(pgdat))
1776 * all memory/cpu of this node are removed, we can offline this
1779 node_set_offline(nid);
1780 unregister_one_node(nid);
1782 EXPORT_SYMBOL(try_offline_node);
1784 static int __ref try_remove_memory(int nid, u64 start, u64 size)
1788 BUG_ON(check_hotplug_memory_range(start, size));
1791 * All memory blocks must be offlined before removing memory. Check
1792 * whether all memory blocks in question are offline and return error
1793 * if this is not the case.
1795 rc = walk_memory_blocks(start, size, NULL, check_memblock_offlined_cb);
1799 /* remove memmap entry */
1800 firmware_map_remove(start, start + size, "System RAM");
1803 * Memory block device removal under the device_hotplug_lock is
1804 * a barrier against racing online attempts.
1806 remove_memory_block_devices(start, size);
1808 mem_hotplug_begin();
1810 arch_remove_memory(nid, start, size, NULL);
1812 if (IS_ENABLED(CONFIG_ARCH_KEEP_MEMBLOCK)) {
1813 memblock_free(start, size);
1814 memblock_remove(start, size);
1817 release_mem_region_adjustable(start, size);
1819 try_offline_node(nid);
1828 * @start: physical address of the region to remove
1829 * @size: size of the region to remove
1831 * NOTE: The caller must call lock_device_hotplug() to serialize hotplug
1832 * and online/offline operations before this call, as required by
1833 * try_offline_node().
1835 void __remove_memory(int nid, u64 start, u64 size)
1839 * trigger BUG() if some memory is not offlined prior to calling this
1842 if (try_remove_memory(nid, start, size))
1847 * Remove memory if every memory block is offline, otherwise return -EBUSY is
1848 * some memory is not offline
1850 int remove_memory(int nid, u64 start, u64 size)
1854 lock_device_hotplug();
1855 rc = try_remove_memory(nid, start, size);
1856 unlock_device_hotplug();
1860 EXPORT_SYMBOL_GPL(remove_memory);
1862 static int try_offline_memory_block(struct memory_block *mem, void *arg)
1864 uint8_t online_type = MMOP_ONLINE_KERNEL;
1865 uint8_t **online_types = arg;
1870 * Sense the online_type via the zone of the memory block. Offlining
1871 * with multiple zones within one memory block will be rejected
1872 * by offlining code ... so we don't care about that.
1874 page = pfn_to_online_page(section_nr_to_pfn(mem->start_section_nr));
1875 if (page && zone_idx(page_zone(page)) == ZONE_MOVABLE)
1876 online_type = MMOP_ONLINE_MOVABLE;
1878 rc = device_offline(&mem->dev);
1880 * Default is MMOP_OFFLINE - change it only if offlining succeeded,
1881 * so try_reonline_memory_block() can do the right thing.
1884 **online_types = online_type;
1887 /* Ignore if already offline. */
1888 return rc < 0 ? rc : 0;
1891 static int try_reonline_memory_block(struct memory_block *mem, void *arg)
1893 uint8_t **online_types = arg;
1896 if (**online_types != MMOP_OFFLINE) {
1897 mem->online_type = **online_types;
1898 rc = device_online(&mem->dev);
1900 pr_warn("%s: Failed to re-online memory: %d",
1904 /* Continue processing all remaining memory blocks. */
1910 * Try to offline and remove memory. Might take a long time to finish in case
1911 * memory is still in use. Primarily useful for memory devices that logically
1912 * unplugged all memory (so it's no longer in use) and want to offline + remove
1915 int offline_and_remove_memory(int nid, u64 start, u64 size)
1917 const unsigned long mb_count = size / memory_block_size_bytes();
1918 uint8_t *online_types, *tmp;
1921 if (!IS_ALIGNED(start, memory_block_size_bytes()) ||
1922 !IS_ALIGNED(size, memory_block_size_bytes()) || !size)
1926 * We'll remember the old online type of each memory block, so we can
1927 * try to revert whatever we did when offlining one memory block fails
1928 * after offlining some others succeeded.
1930 online_types = kmalloc_array(mb_count, sizeof(*online_types),
1935 * Initialize all states to MMOP_OFFLINE, so when we abort processing in
1936 * try_offline_memory_block(), we'll skip all unprocessed blocks in
1937 * try_reonline_memory_block().
1939 memset(online_types, MMOP_OFFLINE, mb_count);
1941 lock_device_hotplug();
1944 rc = walk_memory_blocks(start, size, &tmp, try_offline_memory_block);
1947 * In case we succeeded to offline all memory, remove it.
1948 * This cannot fail as it cannot get onlined in the meantime.
1951 rc = try_remove_memory(nid, start, size);
1953 pr_err("%s: Failed to remove memory: %d", __func__, rc);
1957 * Rollback what we did. While memory onlining might theoretically fail
1958 * (nacked by a notifier), it barely ever happens.
1962 walk_memory_blocks(start, size, &tmp,
1963 try_reonline_memory_block);
1965 unlock_device_hotplug();
1967 kfree(online_types);
1970 EXPORT_SYMBOL_GPL(offline_and_remove_memory);
1971 #endif /* CONFIG_MEMORY_HOTREMOVE */