libnvdimm: Enable unit test infrastructure compile checks
[linux-2.6-microblaze.git] / mm / sparse.c
index 26b48ee..72f010d 100644 (file)
@@ -83,8 +83,15 @@ static int __meminit sparse_index_init(unsigned long section_nr, int nid)
        unsigned long root = SECTION_NR_TO_ROOT(section_nr);
        struct mem_section *section;
 
+       /*
+        * An existing section is possible in the sub-section hotplug
+        * case. First hot-add instantiates, follow-on hot-add reuses
+        * the existing section.
+        *
+        * The mem_hotplug_lock resolves the apparent race below.
+        */
        if (mem_section[root])
-               return -EEXIST;
+               return 0;
 
        section = sparse_index_alloc(nid);
        if (!section)
@@ -222,21 +229,21 @@ void subsection_mask_set(unsigned long *map, unsigned long pfn,
 void __init subsection_map_init(unsigned long pfn, unsigned long nr_pages)
 {
        int end_sec = pfn_to_section_nr(pfn + nr_pages - 1);
-       int i, start_sec = pfn_to_section_nr(pfn);
+       unsigned long nr, start_sec = pfn_to_section_nr(pfn);
 
        if (!nr_pages)
                return;
 
-       for (i = start_sec; i <= end_sec; i++) {
+       for (nr = start_sec; nr <= end_sec; nr++) {
                struct mem_section *ms;
                unsigned long pfns;
 
                pfns = min(nr_pages, PAGES_PER_SECTION
                                - (pfn & ~PAGE_SECTION_MASK));
-               ms = __nr_to_section(i);
+               ms = __nr_to_section(nr);
                subsection_mask_set(ms->usage->subsection_map, pfn, pfns);
 
-               pr_debug("%s: sec: %d pfns: %ld set(%d, %d)\n", __func__, i,
+               pr_debug("%s: sec: %lu pfns: %lu set(%d, %d)\n", __func__, nr,
                                pfns, subsection_map_index(pfn),
                                subsection_map_index(pfn + pfns - 1));
 
@@ -439,8 +446,8 @@ static unsigned long __init section_map_size(void)
        return PAGE_ALIGN(sizeof(struct page) * PAGES_PER_SECTION);
 }
 
-struct page __init *sparse_mem_map_populate(unsigned long pnum, int nid,
-               struct vmem_altmap *altmap)
+struct page __init *__populate_section_memmap(unsigned long pfn,
+               unsigned long nr_pages, int nid, struct vmem_altmap *altmap)
 {
        unsigned long size = section_map_size();
        struct page *map = sparse_buffer_alloc(size);
@@ -521,10 +528,13 @@ static void __init sparse_init_nid(int nid, unsigned long pnum_begin,
        }
        sparse_buffer_init(map_count * section_map_size(), nid);
        for_each_present_section_nr(pnum_begin, pnum) {
+               unsigned long pfn = section_nr_to_pfn(pnum);
+
                if (pnum >= pnum_end)
                        break;
 
-               map = sparse_mem_map_populate(pnum, nid, NULL);
+               map = __populate_section_memmap(pfn, PAGES_PER_SECTION,
+                               nid, NULL);
                if (!map) {
                        pr_err("%s: node[%d] memory map backing failed. Some memory will not be available.",
                               __func__, nid);
@@ -625,17 +635,17 @@ void offline_mem_sections(unsigned long start_pfn, unsigned long end_pfn)
 #endif
 
 #ifdef CONFIG_SPARSEMEM_VMEMMAP
-static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid,
-               struct vmem_altmap *altmap)
+static struct page *populate_section_memmap(unsigned long pfn,
+               unsigned long nr_pages, int nid, struct vmem_altmap *altmap)
 {
-       /* This will make the necessary allocations eventually. */
-       return sparse_mem_map_populate(pnum, nid, altmap);
+       return __populate_section_memmap(pfn, nr_pages, nid, altmap);
 }
-static void __kfree_section_memmap(struct page *memmap,
+
+static void depopulate_section_memmap(unsigned long pfn, unsigned long nr_pages,
                struct vmem_altmap *altmap)
 {
-       unsigned long start = (unsigned long)memmap;
-       unsigned long end = (unsigned long)(memmap + PAGES_PER_SECTION);
+       unsigned long start = (unsigned long) pfn_to_page(pfn);
+       unsigned long end = start + nr_pages * sizeof(struct page);
 
        vmemmap_free(start, end, altmap);
 }
@@ -647,7 +657,8 @@ static void free_map_bootmem(struct page *memmap)
        vmemmap_free(start, end, NULL);
 }
 #else
-static struct page *__kmalloc_section_memmap(void)
+struct page *populate_section_memmap(unsigned long pfn,
+               unsigned long nr_pages, int nid, struct vmem_altmap *altmap)
 {
        struct page *page, *ret;
        unsigned long memmap_size = sizeof(struct page) * PAGES_PER_SECTION;
@@ -668,15 +679,11 @@ got_map_ptr:
        return ret;
 }
 
-static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid,
+static void depopulate_section_memmap(unsigned long pfn, unsigned long nr_pages,
                struct vmem_altmap *altmap)
 {
-       return __kmalloc_section_memmap();
-}
+       struct page *memmap = pfn_to_page(pfn);
 
-static void __kfree_section_memmap(struct page *memmap,
-               struct vmem_altmap *altmap)
-{
        if (is_vmalloc_addr(memmap))
                vfree(memmap);
        else
@@ -715,10 +722,120 @@ static void free_map_bootmem(struct page *memmap)
 }
 #endif /* CONFIG_SPARSEMEM_VMEMMAP */
 
+static void section_deactivate(unsigned long pfn, unsigned long nr_pages,
+               struct vmem_altmap *altmap)
+{
+       DECLARE_BITMAP(map, SUBSECTIONS_PER_SECTION) = { 0 };
+       DECLARE_BITMAP(tmp, SUBSECTIONS_PER_SECTION) = { 0 };
+       struct mem_section *ms = __pfn_to_section(pfn);
+       bool section_is_early = early_section(ms);
+       struct page *memmap = NULL;
+       unsigned long *subsection_map = ms->usage
+               ? &ms->usage->subsection_map[0] : NULL;
+
+       subsection_mask_set(map, pfn, nr_pages);
+       if (subsection_map)
+               bitmap_and(tmp, map, subsection_map, SUBSECTIONS_PER_SECTION);
+
+       if (WARN(!subsection_map || !bitmap_equal(tmp, map, SUBSECTIONS_PER_SECTION),
+                               "section already deactivated (%#lx + %ld)\n",
+                               pfn, nr_pages))
+               return;
+
+       /*
+        * There are 3 cases to handle across two configurations
+        * (SPARSEMEM_VMEMMAP={y,n}):
+        *
+        * 1/ deactivation of a partial hot-added section (only possible
+        * in the SPARSEMEM_VMEMMAP=y case).
+        *    a/ section was present at memory init
+        *    b/ section was hot-added post memory init
+        * 2/ deactivation of a complete hot-added section
+        * 3/ deactivation of a complete section from memory init
+        *
+        * For 1/, when subsection_map does not empty we will not be
+        * freeing the usage map, but still need to free the vmemmap
+        * range.
+        *
+        * For 2/ and 3/ the SPARSEMEM_VMEMMAP={y,n} cases are unified
+        */
+       bitmap_xor(subsection_map, map, subsection_map, SUBSECTIONS_PER_SECTION);
+       if (bitmap_empty(subsection_map, SUBSECTIONS_PER_SECTION)) {
+               unsigned long section_nr = pfn_to_section_nr(pfn);
+
+               if (!section_is_early) {
+                       kfree(ms->usage);
+                       ms->usage = NULL;
+               }
+               memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr);
+               ms->section_mem_map = sparse_encode_mem_map(NULL, section_nr);
+       }
+
+       if (section_is_early && memmap)
+               free_map_bootmem(memmap);
+       else
+               depopulate_section_memmap(pfn, nr_pages, altmap);
+}
+
+static struct page * __meminit section_activate(int nid, unsigned long pfn,
+               unsigned long nr_pages, struct vmem_altmap *altmap)
+{
+       DECLARE_BITMAP(map, SUBSECTIONS_PER_SECTION) = { 0 };
+       struct mem_section *ms = __pfn_to_section(pfn);
+       struct mem_section_usage *usage = NULL;
+       unsigned long *subsection_map;
+       struct page *memmap;
+       int rc = 0;
+
+       subsection_mask_set(map, pfn, nr_pages);
+
+       if (!ms->usage) {
+               usage = kzalloc(mem_section_usage_size(), GFP_KERNEL);
+               if (!usage)
+                       return ERR_PTR(-ENOMEM);
+               ms->usage = usage;
+       }
+       subsection_map = &ms->usage->subsection_map[0];
+
+       if (bitmap_empty(map, SUBSECTIONS_PER_SECTION))
+               rc = -EINVAL;
+       else if (bitmap_intersects(map, subsection_map, SUBSECTIONS_PER_SECTION))
+               rc = -EEXIST;
+       else
+               bitmap_or(subsection_map, map, subsection_map,
+                               SUBSECTIONS_PER_SECTION);
+
+       if (rc) {
+               if (usage)
+                       ms->usage = NULL;
+               kfree(usage);
+               return ERR_PTR(rc);
+       }
+
+       /*
+        * The early init code does not consider partially populated
+        * initial sections, it simply assumes that memory will never be
+        * referenced.  If we hot-add memory into such a section then we
+        * do not need to populate the memmap and can simply reuse what
+        * is already there.
+        */
+       if (nr_pages < PAGES_PER_SECTION && early_section(ms))
+               return pfn_to_page(pfn);
+
+       memmap = populate_section_memmap(pfn, nr_pages, nid, altmap);
+       if (!memmap) {
+               section_deactivate(pfn, nr_pages, altmap);
+               return ERR_PTR(-ENOMEM);
+       }
+
+       return memmap;
+}
+
 /**
- * sparse_add_one_section - add a memory section
+ * sparse_add_section - add a memory section, or populate an existing one
  * @nid: The node to add section on
  * @start_pfn: start pfn of the memory range
+ * @nr_pages: number of pfns to add in the section
  * @altmap: device page map
  *
  * This is only intended for hotplug.
@@ -728,54 +845,38 @@ static void free_map_bootmem(struct page *memmap)
  * * -EEXIST   - Section has been present.
  * * -ENOMEM   - Out of memory.
  */
-int __meminit sparse_add_one_section(int nid, unsigned long start_pfn,
-                                    struct vmem_altmap *altmap)
+int __meminit sparse_add_section(int nid, unsigned long start_pfn,
+               unsigned long nr_pages, struct vmem_altmap *altmap)
 {
        unsigned long section_nr = pfn_to_section_nr(start_pfn);
-       struct mem_section_usage *usage;
        struct mem_section *ms;
        struct page *memmap;
        int ret;
 
-       /*
-        * no locking for this, because it does its own
-        * plus, it does a kmalloc
-        */
        ret = sparse_index_init(section_nr, nid);
-       if (ret < 0 && ret != -EEXIST)
+       if (ret < 0)
                return ret;
-       ret = 0;
-       memmap = kmalloc_section_memmap(section_nr, nid, altmap);
-       if (!memmap)
-               return -ENOMEM;
-       usage = kzalloc(mem_section_usage_size(), GFP_KERNEL);
-       if (!usage) {
-               __kfree_section_memmap(memmap, altmap);
-               return -ENOMEM;
-       }
 
-       ms = __pfn_to_section(start_pfn);
-       if (ms->section_mem_map & SECTION_MARKED_PRESENT) {
-               ret = -EEXIST;
-               goto out;
-       }
+       memmap = section_activate(nid, start_pfn, nr_pages, altmap);
+       if (IS_ERR(memmap))
+               return PTR_ERR(memmap);
 
        /*
         * Poison uninitialized struct pages in order to catch invalid flags
         * combinations.
         */
-       page_init_poison(memmap, sizeof(struct page) * PAGES_PER_SECTION);
+       page_init_poison(pfn_to_page(start_pfn), sizeof(struct page) * nr_pages);
 
+       ms = __pfn_to_section(start_pfn);
        set_section_nid(section_nr, nid);
        section_mark_present(ms);
-       sparse_init_one_section(ms, section_nr, memmap, usage, 0);
 
-out:
-       if (ret < 0) {
-               kfree(usage);
-               __kfree_section_memmap(memmap, altmap);
-       }
-       return ret;
+       /* Align memmap to section boundary in the subsection case */
+       if (section_nr_to_pfn(section_nr) != start_pfn)
+               memmap = pfn_to_kaddr(section_nr_to_pfn(section_nr));
+       sparse_init_one_section(ms, section_nr, memmap, ms->usage, 0);
+
+       return 0;
 }
 
 #ifdef CONFIG_MEMORY_FAILURE
@@ -808,47 +909,12 @@ static inline void clear_hwpoisoned_pages(struct page *memmap, int nr_pages)
 }
 #endif
 
-static void free_section_usage(struct mem_section *ms, struct page *memmap,
-               struct mem_section_usage *usage, struct vmem_altmap *altmap)
-{
-       if (!usage)
-               return;
-
-       /*
-        * Check to see if allocation came from hot-plug-add
-        */
-       if (!early_section(ms)) {
-               kfree(usage);
-               if (memmap)
-                       __kfree_section_memmap(memmap, altmap);
-               return;
-       }
-
-       /*
-        * The usemap came from bootmem. This is packed with other usemaps
-        * on the section which has pgdat at boot time. Just keep it as is now.
-        */
-
-       if (memmap)
-               free_map_bootmem(memmap);
-}
-
-void sparse_remove_one_section(struct mem_section *ms, unsigned long map_offset,
-                              struct vmem_altmap *altmap)
+void sparse_remove_section(struct mem_section *ms, unsigned long pfn,
+               unsigned long nr_pages, unsigned long map_offset,
+               struct vmem_altmap *altmap)
 {
-       struct page *memmap = NULL;
-       struct mem_section_usage *usage = NULL;
-
-       if (ms->section_mem_map) {
-               usage = ms->usage;
-               memmap = sparse_decode_mem_map(ms->section_mem_map,
-                                               __section_nr(ms));
-               ms->section_mem_map = 0;
-               ms->usage = NULL;
-       }
-
-       clear_hwpoisoned_pages(memmap + map_offset,
-                       PAGES_PER_SECTION - map_offset);
-       free_section_usage(ms, memmap, usage, altmap);
+       clear_hwpoisoned_pages(pfn_to_page(pfn) + map_offset,
+                       nr_pages - map_offset);
+       section_deactivate(pfn, nr_pages, altmap);
 }
 #endif /* CONFIG_MEMORY_HOTPLUG */