clk: zynqmp: fix memory leak in zynqmp_register_clocks
[linux-2.6-microblaze.git] / mm / hugetlb.c
index f9ea1e5..cd45915 100644 (file)
@@ -28,6 +28,7 @@
 #include <linux/jhash.h>
 #include <linux/numa.h>
 #include <linux/llist.h>
+#include <linux/cma.h>
 
 #include <asm/page.h>
 #include <asm/pgtable.h>
@@ -44,6 +45,9 @@
 int hugetlb_max_hstate __read_mostly;
 unsigned int default_hstate_idx;
 struct hstate hstates[HUGE_MAX_HSTATE];
+
+static struct cma *hugetlb_cma[MAX_NUMNODES];
+
 /*
  * Minimum page order among possible hugepage sizes, set to a proper value
  * at boot time.
@@ -1228,6 +1232,14 @@ static void destroy_compound_gigantic_page(struct page *page,
 
 static void free_gigantic_page(struct page *page, unsigned int order)
 {
+       /*
+        * If the page isn't allocated using the cma allocator,
+        * cma_release() returns false.
+        */
+       if (IS_ENABLED(CONFIG_CMA) &&
+           cma_release(hugetlb_cma[page_to_nid(page)], page, 1 << order))
+               return;
+
        free_contig_range(page_to_pfn(page), 1 << order);
 }
 
@@ -1237,6 +1249,21 @@ static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask,
 {
        unsigned long nr_pages = 1UL << huge_page_order(h);
 
+       if (IS_ENABLED(CONFIG_CMA)) {
+               struct page *page;
+               int node;
+
+               for_each_node_mask(node, *nodemask) {
+                       if (!hugetlb_cma[node])
+                               continue;
+
+                       page = cma_alloc(hugetlb_cma[node], nr_pages,
+                                        huge_page_order(h), true);
+                       if (page)
+                               return page;
+               }
+       }
+
        return alloc_contig_pages(nr_pages, gfp_mask, nid, nodemask);
 }
 
@@ -1281,8 +1308,14 @@ static void update_and_free_page(struct hstate *h, struct page *page)
        set_compound_page_dtor(page, NULL_COMPOUND_DTOR);
        set_page_refcounted(page);
        if (hstate_is_gigantic(h)) {
+               /*
+                * Temporarily drop the hugetlb_lock, because
+                * we might block in free_gigantic_page().
+                */
+               spin_unlock(&hugetlb_lock);
                destroy_compound_gigantic_page(page, huge_page_order(h));
                free_gigantic_page(page, huge_page_order(h));
+               spin_lock(&hugetlb_lock);
        } else {
                __free_pages(page, huge_page_order(h));
        }
@@ -2010,6 +2043,7 @@ struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma,
  * of size 'delta'.
  */
 static int gather_surplus_pages(struct hstate *h, int delta)
+       __must_hold(&hugetlb_lock)
 {
        struct list_head surplus_list;
        struct page *page, *tmp;
@@ -2538,6 +2572,10 @@ static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
 
        for (i = 0; i < h->max_huge_pages; ++i) {
                if (hstate_is_gigantic(h)) {
+                       if (IS_ENABLED(CONFIG_CMA) && hugetlb_cma[0]) {
+                               pr_warn_once("HugeTLB: hugetlb_cma is enabled, skip boot time allocation\n");
+                               break;
+                       }
                        if (!alloc_bootmem_huge_page(h))
                                break;
                } else if (!alloc_pool_huge_page(h,
@@ -3193,6 +3231,7 @@ static int __init hugetlb_init(void)
                        default_hstate.max_huge_pages = default_hstate_max_huge_pages;
        }
 
+       hugetlb_cma_check();
        hugetlb_init_hstates();
        gather_bootmem_prealloc();
        report_hugepages();
@@ -5505,3 +5544,74 @@ void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason)
                spin_unlock(&hugetlb_lock);
        }
 }
+
+#ifdef CONFIG_CMA
+static unsigned long hugetlb_cma_size __initdata;
+static bool cma_reserve_called __initdata;
+
+static int __init cmdline_parse_hugetlb_cma(char *p)
+{
+       hugetlb_cma_size = memparse(p, &p);
+       return 0;
+}
+
+early_param("hugetlb_cma", cmdline_parse_hugetlb_cma);
+
+void __init hugetlb_cma_reserve(int order)
+{
+       unsigned long size, reserved, per_node;
+       int nid;
+
+       cma_reserve_called = true;
+
+       if (!hugetlb_cma_size)
+               return;
+
+       if (hugetlb_cma_size < (PAGE_SIZE << order)) {
+               pr_warn("hugetlb_cma: cma area should be at least %lu MiB\n",
+                       (PAGE_SIZE << order) / SZ_1M);
+               return;
+       }
+
+       /*
+        * If 3 GB area is requested on a machine with 4 numa nodes,
+        * let's allocate 1 GB on first three nodes and ignore the last one.
+        */
+       per_node = DIV_ROUND_UP(hugetlb_cma_size, nr_online_nodes);
+       pr_info("hugetlb_cma: reserve %lu MiB, up to %lu MiB per node\n",
+               hugetlb_cma_size / SZ_1M, per_node / SZ_1M);
+
+       reserved = 0;
+       for_each_node_state(nid, N_ONLINE) {
+               int res;
+
+               size = min(per_node, hugetlb_cma_size - reserved);
+               size = round_up(size, PAGE_SIZE << order);
+
+               res = cma_declare_contiguous_nid(0, size, 0, PAGE_SIZE << order,
+                                                0, false, "hugetlb",
+                                                &hugetlb_cma[nid], nid);
+               if (res) {
+                       pr_warn("hugetlb_cma: reservation failed: err %d, node %d",
+                               res, nid);
+                       continue;
+               }
+
+               reserved += size;
+               pr_info("hugetlb_cma: reserved %lu MiB on node %d\n",
+                       size / SZ_1M, nid);
+
+               if (reserved >= hugetlb_cma_size)
+                       break;
+       }
+}
+
+void __init hugetlb_cma_check(void)
+{
+       if (!hugetlb_cma_size || cma_reserve_called)
+               return;
+
+       pr_warn("hugetlb_cma: the option isn't supported by current arch\n");
+}
+
+#endif /* CONFIG_CMA */