Merge tag 's390-5.15-1' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux
[linux-2.6-microblaze.git] / mm / memory_hotplug.c
index 974a565..86c3af7 100644 (file)
@@ -154,122 +154,6 @@ static void release_memory_resource(struct resource *res)
 }
 
 #ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
-void get_page_bootmem(unsigned long info,  struct page *page,
-                     unsigned long type)
-{
-       page->freelist = (void *)type;
-       SetPagePrivate(page);
-       set_page_private(page, info);
-       page_ref_inc(page);
-}
-
-void put_page_bootmem(struct page *page)
-{
-       unsigned long type;
-
-       type = (unsigned long) page->freelist;
-       BUG_ON(type < MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE ||
-              type > MEMORY_HOTPLUG_MAX_BOOTMEM_TYPE);
-
-       if (page_ref_dec_return(page) == 1) {
-               page->freelist = NULL;
-               ClearPagePrivate(page);
-               set_page_private(page, 0);
-               INIT_LIST_HEAD(&page->lru);
-               free_reserved_page(page);
-       }
-}
-
-#ifdef CONFIG_HAVE_BOOTMEM_INFO_NODE
-#ifndef CONFIG_SPARSEMEM_VMEMMAP
-static void register_page_bootmem_info_section(unsigned long start_pfn)
-{
-       unsigned long mapsize, section_nr, i;
-       struct mem_section *ms;
-       struct page *page, *memmap;
-       struct mem_section_usage *usage;
-
-       section_nr = pfn_to_section_nr(start_pfn);
-       ms = __nr_to_section(section_nr);
-
-       /* Get section's memmap address */
-       memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr);
-
-       /*
-        * Get page for the memmap's phys address
-        * XXX: need more consideration for sparse_vmemmap...
-        */
-       page = virt_to_page(memmap);
-       mapsize = sizeof(struct page) * PAGES_PER_SECTION;
-       mapsize = PAGE_ALIGN(mapsize) >> PAGE_SHIFT;
-
-       /* remember memmap's page */
-       for (i = 0; i < mapsize; i++, page++)
-               get_page_bootmem(section_nr, page, SECTION_INFO);
-
-       usage = ms->usage;
-       page = virt_to_page(usage);
-
-       mapsize = PAGE_ALIGN(mem_section_usage_size()) >> PAGE_SHIFT;
-
-       for (i = 0; i < mapsize; i++, page++)
-               get_page_bootmem(section_nr, page, MIX_SECTION_INFO);
-
-}
-#else /* CONFIG_SPARSEMEM_VMEMMAP */
-static void register_page_bootmem_info_section(unsigned long start_pfn)
-{
-       unsigned long mapsize, section_nr, i;
-       struct mem_section *ms;
-       struct page *page, *memmap;
-       struct mem_section_usage *usage;
-
-       section_nr = pfn_to_section_nr(start_pfn);
-       ms = __nr_to_section(section_nr);
-
-       memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr);
-
-       register_page_bootmem_memmap(section_nr, memmap, PAGES_PER_SECTION);
-
-       usage = ms->usage;
-       page = virt_to_page(usage);
-
-       mapsize = PAGE_ALIGN(mem_section_usage_size()) >> PAGE_SHIFT;
-
-       for (i = 0; i < mapsize; i++, page++)
-               get_page_bootmem(section_nr, page, MIX_SECTION_INFO);
-}
-#endif /* !CONFIG_SPARSEMEM_VMEMMAP */
-
-void __init register_page_bootmem_info_node(struct pglist_data *pgdat)
-{
-       unsigned long i, pfn, end_pfn, nr_pages;
-       int node = pgdat->node_id;
-       struct page *page;
-
-       nr_pages = PAGE_ALIGN(sizeof(struct pglist_data)) >> PAGE_SHIFT;
-       page = virt_to_page(pgdat);
-
-       for (i = 0; i < nr_pages; i++, page++)
-               get_page_bootmem(node, page, NODE_INFO);
-
-       pfn = pgdat->node_start_pfn;
-       end_pfn = pgdat_end_pfn(pgdat);
-
-       /* register section info */
-       for (; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
-               /*
-                * Some platforms can assign the same pfn to multiple nodes - on
-                * node0 as well as nodeN.  To avoid registering a pfn against
-                * multiple nodes we check that this pfn does not already
-                * reside in some other nodes.
-                */
-               if (pfn_valid(pfn) && (early_pfn_to_nid(pfn) == node))
-                       register_page_bootmem_info_section(pfn);
-       }
-}
-#endif /* CONFIG_HAVE_BOOTMEM_INFO_NODE */
-
 static int check_pfn_span(unsigned long pfn, unsigned long nr_pages,
                const char *reason)
 {
@@ -445,7 +329,6 @@ static void shrink_zone_span(struct zone *zone, unsigned long start_pfn,
        unsigned long pfn;
        int nid = zone_to_nid(zone);
 
-       zone_span_writelock(zone);
        if (zone->zone_start_pfn == start_pfn) {
                /*
                 * If the section is smallest section in the zone, it need
@@ -478,7 +361,6 @@ static void shrink_zone_span(struct zone *zone, unsigned long start_pfn,
                        zone->spanned_pages = 0;
                }
        }
-       zone_span_writeunlock(zone);
 }
 
 static void update_pgdat_span(struct pglist_data *pgdat)
@@ -515,7 +397,7 @@ void __ref remove_pfn_range_from_zone(struct zone *zone,
 {
        const unsigned long end_pfn = start_pfn + nr_pages;
        struct pglist_data *pgdat = zone->zone_pgdat;
-       unsigned long pfn, cur_nr_pages, flags;
+       unsigned long pfn, cur_nr_pages;
 
        /* Poison struct pages because they are now uninitialized again. */
        for (pfn = start_pfn; pfn < end_pfn; pfn += cur_nr_pages) {
@@ -540,10 +422,8 @@ void __ref remove_pfn_range_from_zone(struct zone *zone,
 
        clear_zone_contiguous(zone);
 
-       pgdat_resize_lock(zone->zone_pgdat, &flags);
        shrink_zone_span(zone, start_pfn, start_pfn + nr_pages);
        update_pgdat_span(pgdat);
-       pgdat_resize_unlock(zone->zone_pgdat, &flags);
 
        set_zone_contiguous(zone);
 }
@@ -750,19 +630,13 @@ void __ref move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn,
 {
        struct pglist_data *pgdat = zone->zone_pgdat;
        int nid = pgdat->node_id;
-       unsigned long flags;
 
        clear_zone_contiguous(zone);
 
-       /* TODO Huh pgdat is irqsave while zone is not. It used to be like that before */
-       pgdat_resize_lock(pgdat, &flags);
-       zone_span_writelock(zone);
        if (zone_is_empty(zone))
                init_currently_empty_zone(zone, start_pfn, nr_pages);
        resize_zone_range(zone, start_pfn, nr_pages);
-       zone_span_writeunlock(zone);
        resize_pgdat_range(pgdat, start_pfn, nr_pages);
-       pgdat_resize_unlock(pgdat, &flags);
 
        /*
         * Subsection population requires care in pfn_to_online_page().
@@ -852,12 +726,8 @@ struct zone *zone_for_pfn_range(int online_type, int nid, unsigned start_pfn,
  */
 void adjust_present_page_count(struct zone *zone, long nr_pages)
 {
-       unsigned long flags;
-
        zone->present_pages += nr_pages;
-       pgdat_resize_lock(zone->zone_pgdat, &flags);
        zone->zone_pgdat->node_present_pages += nr_pages;
-       pgdat_resize_unlock(zone->zone_pgdat, &flags);
 }
 
 int mhp_init_memmap_on_memory(unsigned long pfn, unsigned long nr_pages,
@@ -913,7 +783,7 @@ int __ref online_pages(unsigned long pfn, unsigned long nr_pages, struct zone *z
 
        /*
         * {on,off}lining is constrained to full memory sections (or more
-        * precisly to memory blocks from the user space POV).
+        * precisely to memory blocks from the user space POV).
         * memmap_on_memory is an exception because it reserves initial part
         * of the physical memory space for vmemmaps. That space is pageblock
         * aligned.
@@ -1072,8 +942,8 @@ static void rollback_node_hotadd(int nid)
 }
 
 
-/**
- * try_online_node - online a node if offlined
+/*
+ * __try_online_node - online a node if offlined
  * @nid: the node ID
  * @set_node_online: Whether we want to online the node
  * called by cpu_up() to online a node without onlined memory.
@@ -1172,6 +1042,7 @@ bool mhp_supports_memmap_on_memory(unsigned long size)
         *       populate a single PMD.
         */
        return memmap_on_memory &&
+              !hugetlb_free_vmemmap_enabled &&
               IS_ENABLED(CONFIG_MHP_MEMMAP_ON_MEMORY) &&
               size == memory_block_size_bytes() &&
               IS_ALIGNED(vmemmap_size, PMD_SIZE) &&
@@ -1521,6 +1392,8 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
        struct page *page, *head;
        int ret = 0;
        LIST_HEAD(source);
+       static DEFINE_RATELIMIT_STATE(migrate_rs, DEFAULT_RATELIMIT_INTERVAL,
+                                     DEFAULT_RATELIMIT_BURST);
 
        for (pfn = start_pfn; pfn < end_pfn; pfn++) {
                if (!pfn_valid(pfn))
@@ -1567,8 +1440,10 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
                                                    page_is_file_lru(page));
 
                } else {
-                       pr_warn("failed to isolate pfn %lx\n", pfn);
-                       dump_page(page, "isolation failed");
+                       if (__ratelimit(&migrate_rs)) {
+                               pr_warn("failed to isolate pfn %lx\n", pfn);
+                               dump_page(page, "isolation failed");
+                       }
                }
                put_page(page);
        }
@@ -1597,9 +1472,11 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
                        (unsigned long)&mtc, MIGRATE_SYNC, MR_MEMORY_HOTPLUG);
                if (ret) {
                        list_for_each_entry(page, &source, lru) {
-                               pr_warn("migrating pfn %lx failed ret:%d ",
-                                      page_to_pfn(page), ret);
-                               dump_page(page, "migration failure");
+                               if (__ratelimit(&migrate_rs)) {
+                                       pr_warn("migrating pfn %lx failed ret:%d\n",
+                                               page_to_pfn(page), ret);
+                                       dump_page(page, "migration failure");
+                               }
                        }
                        putback_movable_pages(&source);
                }
@@ -1703,7 +1580,7 @@ int __ref offline_pages(unsigned long start_pfn, unsigned long nr_pages)
 
        /*
         * {on,off}lining is constrained to full memory sections (or more
-        * precisly to memory blocks from the user space POV).
+        * precisely to memory blocks from the user space POV).
         * memmap_on_memory is an exception because it reserves initial part
         * of the physical memory space for vmemmaps. That space is pageblock
         * aligned.
@@ -1854,6 +1731,7 @@ failed_removal_isolated:
        undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE);
        memory_notify(MEM_CANCEL_OFFLINE, &arg);
 failed_removal_pcplists_disabled:
+       lru_cache_enable();
        zone_pcp_enable(zone);
 failed_removal:
        pr_debug("memory offlining [mem %#010llx-%#010llx] failed due to %s\n",
@@ -2031,7 +1909,7 @@ static int __ref try_remove_memory(int nid, u64 start, u64 size)
 }
 
 /**
- * remove_memory
+ * __remove_memory - Remove memory if every memory block is offline
  * @nid: the node ID
  * @start: physical address of the region to remove
  * @size: size of the region to remove