mm/sparse.c: move subsection_map related functions together
[linux-2.6-microblaze.git] / mm / sparse.c
index 65599e8..1aee5a4 100644 (file)
@@ -209,6 +209,7 @@ static inline unsigned long first_present_section_nr(void)
        return next_present_section_nr(-1);
 }
 
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
 static void subsection_mask_set(unsigned long *map, unsigned long pfn,
                unsigned long nr_pages)
 {
@@ -243,6 +244,11 @@ void __init subsection_map_init(unsigned long pfn, unsigned long nr_pages)
                nr_pages -= pfns;
        }
 }
+#else
+void __init subsection_map_init(unsigned long pfn, unsigned long nr_pages)
+{
+}
+#endif
 
 /* Record a memory area against a node. */
 void __init memory_present(int nid, unsigned long start, unsigned long end)
@@ -660,39 +666,67 @@ static void free_map_bootmem(struct page *memmap)
 
        vmemmap_free(start, end, NULL);
 }
-#else
-struct page * __meminit populate_section_memmap(unsigned long pfn,
-               unsigned long nr_pages, int nid, struct vmem_altmap *altmap)
+
+static int clear_subsection_map(unsigned long pfn, unsigned long nr_pages)
 {
-       struct page *page, *ret;
-       unsigned long memmap_size = sizeof(struct page) * PAGES_PER_SECTION;
+       DECLARE_BITMAP(map, SUBSECTIONS_PER_SECTION) = { 0 };
+       DECLARE_BITMAP(tmp, SUBSECTIONS_PER_SECTION) = { 0 };
+       struct mem_section *ms = __pfn_to_section(pfn);
+       unsigned long *subsection_map = ms->usage
+               ? &ms->usage->subsection_map[0] : NULL;
 
-       page = alloc_pages(GFP_KERNEL|__GFP_NOWARN, get_order(memmap_size));
-       if (page)
-               goto got_map_page;
+       subsection_mask_set(map, pfn, nr_pages);
+       if (subsection_map)
+               bitmap_and(tmp, map, subsection_map, SUBSECTIONS_PER_SECTION);
 
-       ret = vmalloc(memmap_size);
-       if (ret)
-               goto got_map_ptr;
+       if (WARN(!subsection_map || !bitmap_equal(tmp, map, SUBSECTIONS_PER_SECTION),
+                               "section already deactivated (%#lx + %ld)\n",
+                               pfn, nr_pages))
+               return -EINVAL;
 
-       return NULL;
-got_map_page:
-       ret = (struct page *)pfn_to_kaddr(page_to_pfn(page));
-got_map_ptr:
+       bitmap_xor(subsection_map, map, subsection_map, SUBSECTIONS_PER_SECTION);
+       return 0;
+}
 
-       return ret;
+static bool is_subsection_map_empty(struct mem_section *ms)
+{
+       return bitmap_empty(&ms->usage->subsection_map[0],
+                           SUBSECTIONS_PER_SECTION);
 }
 
-static void depopulate_section_memmap(unsigned long pfn, unsigned long nr_pages,
-               struct vmem_altmap *altmap)
+static int fill_subsection_map(unsigned long pfn, unsigned long nr_pages)
 {
-       struct page *memmap = pfn_to_page(pfn);
+       struct mem_section *ms = __pfn_to_section(pfn);
+       DECLARE_BITMAP(map, SUBSECTIONS_PER_SECTION) = { 0 };
+       unsigned long *subsection_map;
+       int rc = 0;
 
-       if (is_vmalloc_addr(memmap))
-               vfree(memmap);
+       subsection_mask_set(map, pfn, nr_pages);
+
+       subsection_map = &ms->usage->subsection_map[0];
+
+       if (bitmap_empty(map, SUBSECTIONS_PER_SECTION))
+               rc = -EINVAL;
+       else if (bitmap_intersects(map, subsection_map, SUBSECTIONS_PER_SECTION))
+               rc = -EEXIST;
        else
-               free_pages((unsigned long)memmap,
-                          get_order(sizeof(struct page) * PAGES_PER_SECTION));
+               bitmap_or(subsection_map, map, subsection_map,
+                               SUBSECTIONS_PER_SECTION);
+
+       return rc;
+}
+#else
+struct page * __meminit populate_section_memmap(unsigned long pfn,
+               unsigned long nr_pages, int nid, struct vmem_altmap *altmap)
+{
+       return kvmalloc_node(array_size(sizeof(struct page),
+                                       PAGES_PER_SECTION), GFP_KERNEL, nid);
+}
+
+static void depopulate_section_memmap(unsigned long pfn, unsigned long nr_pages,
+               struct vmem_altmap *altmap)
+{
+       kvfree(pfn_to_page(pfn));
 }
 
 static void free_map_bootmem(struct page *memmap)
@@ -724,48 +758,51 @@ static void free_map_bootmem(struct page *memmap)
                        put_page_bootmem(page);
        }
 }
+
+static int clear_subsection_map(unsigned long pfn, unsigned long nr_pages)
+{
+       return 0;
+}
+
+static bool is_subsection_map_empty(struct mem_section *ms)
+{
+       return true;
+}
+
+static int fill_subsection_map(unsigned long pfn, unsigned long nr_pages)
+{
+       return 0;
+}
 #endif /* CONFIG_SPARSEMEM_VMEMMAP */
 
+/*
+ * To deactivate a memory region, there are 3 cases to handle across
+ * two configurations (SPARSEMEM_VMEMMAP={y,n}):
+ *
+ * 1. deactivation of a partial hot-added section (only possible in
+ *    the SPARSEMEM_VMEMMAP=y case).
+ *      a) section was present at memory init.
+ *      b) section was hot-added post memory init.
+ * 2. deactivation of a complete hot-added section.
+ * 3. deactivation of a complete section from memory init.
+ *
+ * For 1, when subsection_map does not empty we will not be freeing the
+ * usage map, but still need to free the vmemmap range.
+ *
+ * For 2 and 3, the SPARSEMEM_VMEMMAP={y,n} cases are unified
+ */
 static void section_deactivate(unsigned long pfn, unsigned long nr_pages,
                struct vmem_altmap *altmap)
 {
-       DECLARE_BITMAP(map, SUBSECTIONS_PER_SECTION) = { 0 };
-       DECLARE_BITMAP(tmp, SUBSECTIONS_PER_SECTION) = { 0 };
        struct mem_section *ms = __pfn_to_section(pfn);
        bool section_is_early = early_section(ms);
        struct page *memmap = NULL;
        bool empty;
-       unsigned long *subsection_map = ms->usage
-               ? &ms->usage->subsection_map[0] : NULL;
-
-       subsection_mask_set(map, pfn, nr_pages);
-       if (subsection_map)
-               bitmap_and(tmp, map, subsection_map, SUBSECTIONS_PER_SECTION);
 
-       if (WARN(!subsection_map || !bitmap_equal(tmp, map, SUBSECTIONS_PER_SECTION),
-                               "section already deactivated (%#lx + %ld)\n",
-                               pfn, nr_pages))
+       if (clear_subsection_map(pfn, nr_pages))
                return;
 
-       /*
-        * There are 3 cases to handle across two configurations
-        * (SPARSEMEM_VMEMMAP={y,n}):
-        *
-        * 1/ deactivation of a partial hot-added section (only possible
-        * in the SPARSEMEM_VMEMMAP=y case).
-        *    a/ section was present at memory init
-        *    b/ section was hot-added post memory init
-        * 2/ deactivation of a complete hot-added section
-        * 3/ deactivation of a complete section from memory init
-        *
-        * For 1/, when subsection_map does not empty we will not be
-        * freeing the usage map, but still need to free the vmemmap
-        * range.
-        *
-        * For 2/ and 3/ the SPARSEMEM_VMEMMAP={y,n} cases are unified
-        */
-       bitmap_xor(subsection_map, map, subsection_map, SUBSECTIONS_PER_SECTION);
-       empty = bitmap_empty(subsection_map, SUBSECTIONS_PER_SECTION);
+       empty = is_subsection_map_empty(ms);
        if (empty) {
                unsigned long section_nr = pfn_to_section_nr(pfn);
 
@@ -801,31 +838,19 @@ static void section_deactivate(unsigned long pfn, unsigned long nr_pages,
 static struct page * __meminit section_activate(int nid, unsigned long pfn,
                unsigned long nr_pages, struct vmem_altmap *altmap)
 {
-       DECLARE_BITMAP(map, SUBSECTIONS_PER_SECTION) = { 0 };
        struct mem_section *ms = __pfn_to_section(pfn);
        struct mem_section_usage *usage = NULL;
-       unsigned long *subsection_map;
        struct page *memmap;
        int rc = 0;
 
-       subsection_mask_set(map, pfn, nr_pages);
-
        if (!ms->usage) {
                usage = kzalloc(mem_section_usage_size(), GFP_KERNEL);
                if (!usage)
                        return ERR_PTR(-ENOMEM);
                ms->usage = usage;
        }
-       subsection_map = &ms->usage->subsection_map[0];
-
-       if (bitmap_empty(map, SUBSECTIONS_PER_SECTION))
-               rc = -EINVAL;
-       else if (bitmap_intersects(map, subsection_map, SUBSECTIONS_PER_SECTION))
-               rc = -EEXIST;
-       else
-               bitmap_or(subsection_map, map, subsection_map,
-                               SUBSECTIONS_PER_SECTION);
 
+       rc = fill_subsection_map(pfn, nr_pages);
        if (rc) {
                if (usage)
                        ms->usage = NULL;
@@ -861,6 +886,10 @@ static struct page * __meminit section_activate(int nid, unsigned long pfn,
  *
  * This is only intended for hotplug.
  *
+ * Note that only VMEMMAP supports sub-section aligned hotplug,
+ * the proper alignment and size are gated by check_pfn_span().
+ *
+ *
  * Return:
  * * 0         - On success.
  * * -EEXIST   - Section has been present.
@@ -894,7 +923,7 @@ int __meminit sparse_add_section(int nid, unsigned long start_pfn,
 
        /* Align memmap to section boundary in the subsection case */
        if (section_nr_to_pfn(section_nr) != start_pfn)
-               memmap = pfn_to_kaddr(section_nr_to_pfn(section_nr));
+               memmap = pfn_to_page(section_nr_to_pfn(section_nr));
        sparse_init_one_section(ms, section_nr, memmap, ms->usage, 0);
 
        return 0;