kasan: introduce CONFIG_KASAN_HW_TAGS
[linux-2.6-microblaze.git] / mm / memblock.c
index b68ee86..d24bcfa 100644 (file)
@@ -871,7 +871,7 @@ int __init_memblock memblock_physmem_add(phys_addr_t base, phys_addr_t size)
  * @base: base address of the region
  * @size: size of the region
  * @set: set or clear the flag
- * @flag: the flag to udpate
+ * @flag: the flag to update
  *
  * This function isolates region [@base, @base + @size), and sets/clears flag
  *
@@ -1419,6 +1419,9 @@ phys_addr_t __init memblock_phys_alloc_range(phys_addr_t size,
                                             phys_addr_t start,
                                             phys_addr_t end)
 {
+       memblock_dbg("%s: %llu bytes align=0x%llx from=%pa max_addr=%pa %pS\n",
+                    __func__, (u64)size, (u64)align, &start, &end,
+                    (void *)_RET_IP_);
        return memblock_alloc_range_nid(size, align, start, end, NUMA_NO_NODE,
                                        false);
 }
@@ -1926,6 +1929,85 @@ static int __init early_memblock(char *p)
 }
 early_param("memblock", early_memblock);
 
+static void __init free_memmap(unsigned long start_pfn, unsigned long end_pfn)
+{
+       struct page *start_pg, *end_pg;
+       phys_addr_t pg, pgend;
+
+       /*
+        * Convert start_pfn/end_pfn to a struct page pointer.
+        */
+       start_pg = pfn_to_page(start_pfn - 1) + 1;
+       end_pg = pfn_to_page(end_pfn - 1) + 1;
+
+       /*
+        * Convert to physical addresses, and round start upwards and end
+        * downwards.
+        */
+       pg = PAGE_ALIGN(__pa(start_pg));
+       pgend = __pa(end_pg) & PAGE_MASK;
+
+       /*
+        * If there are free pages between these, free the section of the
+        * memmap array.
+        */
+       if (pg < pgend)
+               memblock_free(pg, pgend - pg);
+}
+
+/*
+ * The mem_map array can get very big.  Free the unused area of the memory map.
+ */
+static void __init free_unused_memmap(void)
+{
+       unsigned long start, end, prev_end = 0;
+       int i;
+
+       if (!IS_ENABLED(CONFIG_HAVE_ARCH_PFN_VALID) ||
+           IS_ENABLED(CONFIG_SPARSEMEM_VMEMMAP))
+               return;
+
+       /*
+        * This relies on each bank being in address order.
+        * The banks are sorted previously in bootmem_init().
+        */
+       for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, NULL) {
+#ifdef CONFIG_SPARSEMEM
+               /*
+                * Take care not to free memmap entries that don't exist
+                * due to SPARSEMEM sections which aren't present.
+                */
+               start = min(start, ALIGN(prev_end, PAGES_PER_SECTION));
+#else
+               /*
+                * Align down here since the VM subsystem insists that the
+                * memmap entries are valid from the bank start aligned to
+                * MAX_ORDER_NR_PAGES.
+                */
+               start = round_down(start, MAX_ORDER_NR_PAGES);
+#endif
+
+               /*
+                * If we had a previous bank, and there is a space
+                * between the current bank and the previous, free it.
+                */
+               if (prev_end && prev_end < start)
+                       free_memmap(prev_end, start);
+
+               /*
+                * Align up here since the VM subsystem insists that the
+                * memmap entries are valid from the bank end aligned to
+                * MAX_ORDER_NR_PAGES.
+                */
+               prev_end = ALIGN(end, MAX_ORDER_NR_PAGES);
+       }
+
+#ifdef CONFIG_SPARSEMEM
+       if (!IS_ALIGNED(prev_end, PAGES_PER_SECTION))
+               free_memmap(prev_end, ALIGN(prev_end, PAGES_PER_SECTION));
+#endif
+}
+
 static void __init __free_pages_memory(unsigned long start, unsigned long end)
 {
        int order;
@@ -2012,6 +2094,7 @@ unsigned long __init memblock_free_all(void)
 {
        unsigned long pages;
 
+       free_unused_memmap();
        reset_all_zones_managed_pages();
 
        pages = free_low_memory_core_early();