kasan, x86, arm64, s390: rename functions for modules shadow
[linux-2.6-microblaze.git] / mm / vmalloc.c
index 4165304..feecd61 100644 (file)
@@ -118,7 +118,6 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
                if (size != PAGE_SIZE) {
                        pte_t entry = pfn_pte(pfn, prot);
 
-                       entry = pte_mkhuge(entry);
                        entry = arch_make_huge_pte(entry, ilog2(size), 0);
                        set_huge_pte_at(&init_mm, addr, pte, entry);
                        pfn += PFN_DOWN(size);
@@ -776,23 +775,13 @@ get_subtree_max_size(struct rb_node *node)
        return va ? va->subtree_max_size : 0;
 }
 
-/*
- * Gets called when remove the node and rotate.
- */
-static __always_inline unsigned long
-compute_subtree_max_size(struct vmap_area *va)
-{
-       return max3(va_size(va),
-               get_subtree_max_size(va->rb_node.rb_left),
-               get_subtree_max_size(va->rb_node.rb_right));
-}
-
 RB_DECLARE_CALLBACKS_MAX(static, free_vmap_area_rb_augment_cb,
        struct vmap_area, rb_node, unsigned long, subtree_max_size, va_size)
 
 static void purge_vmap_area_lazy(void);
 static BLOCKING_NOTIFIER_HEAD(vmap_notify_list);
-static unsigned long lazy_max_pages(void);
+static void drain_vmap_area_work(struct work_struct *work);
+static DECLARE_WORK(drain_vmap_work, drain_vmap_area_work);
 
 static atomic_long_t nr_vmalloc_pages;
 
@@ -973,6 +962,17 @@ unlink_va(struct vmap_area *va, struct rb_root *root)
 }
 
 #if DEBUG_AUGMENT_PROPAGATE_CHECK
+/*
+ * Gets called when remove the node and rotate.
+ */
+static __always_inline unsigned long
+compute_subtree_max_size(struct vmap_area *va)
+{
+       return max3(va_size(va),
+               get_subtree_max_size(va->rb_node.rb_left),
+               get_subtree_max_size(va->rb_node.rb_right));
+}
+
 static void
 augment_tree_propagate_check(void)
 {
@@ -1189,22 +1189,28 @@ is_within_this_va(struct vmap_area *va, unsigned long size,
 /*
  * Find the first free block(lowest start address) in the tree,
  * that will accomplish the request corresponding to passing
- * parameters.
+ * parameters. Please note, with an alignment bigger than PAGE_SIZE,
+ * a search length is adjusted to account for worst case alignment
+ * overhead.
  */
 static __always_inline struct vmap_area *
-find_vmap_lowest_match(unsigned long size,
-       unsigned long align, unsigned long vstart)
+find_vmap_lowest_match(unsigned long size, unsigned long align,
+       unsigned long vstart, bool adjust_search_size)
 {
        struct vmap_area *va;
        struct rb_node *node;
+       unsigned long length;
 
        /* Start from the root. */
        node = free_vmap_area_root.rb_node;
 
+       /* Adjust the search size for alignment overhead. */
+       length = adjust_search_size ? size + align - 1 : size;
+
        while (node) {
                va = rb_entry(node, struct vmap_area, rb_node);
 
-               if (get_subtree_max_size(node->rb_left) >= size &&
+               if (get_subtree_max_size(node->rb_left) >= length &&
                                vstart < va->va_start) {
                        node = node->rb_left;
                } else {
@@ -1214,9 +1220,9 @@ find_vmap_lowest_match(unsigned long size,
                        /*
                         * Does not make sense to go deeper towards the right
                         * sub-tree if it does not have a free block that is
-                        * equal or bigger to the requested search size.
+                        * equal or bigger to the requested search length.
                         */
-                       if (get_subtree_max_size(node->rb_right) >= size) {
+                       if (get_subtree_max_size(node->rb_right) >= length) {
                                node = node->rb_right;
                                continue;
                        }
@@ -1232,7 +1238,7 @@ find_vmap_lowest_match(unsigned long size,
                                if (is_within_this_va(va, size, align, vstart))
                                        return va;
 
-                               if (get_subtree_max_size(node->rb_right) >= size &&
+                               if (get_subtree_max_size(node->rb_right) >= length &&
                                                vstart <= va->va_start) {
                                        /*
                                         * Shift the vstart forward. Please note, we update it with
@@ -1280,7 +1286,7 @@ find_vmap_lowest_match_check(unsigned long size, unsigned long align)
        get_random_bytes(&rnd, sizeof(rnd));
        vstart = VMALLOC_START + rnd;
 
-       va_1 = find_vmap_lowest_match(size, align, vstart);
+       va_1 = find_vmap_lowest_match(size, align, vstart, false);
        va_2 = find_vmap_lowest_linear_match(size, align, vstart);
 
        if (va_1 != va_2)
@@ -1431,12 +1437,25 @@ static __always_inline unsigned long
 __alloc_vmap_area(unsigned long size, unsigned long align,
        unsigned long vstart, unsigned long vend)
 {
+       bool adjust_search_size = true;
        unsigned long nva_start_addr;
        struct vmap_area *va;
        enum fit_type type;
        int ret;
 
-       va = find_vmap_lowest_match(size, align, vstart);
+       /*
+        * Do not adjust when:
+        *   a) align <= PAGE_SIZE, because it does not make any sense.
+        *      All blocks(their start addresses) are at least PAGE_SIZE
+        *      aligned anyway;
+        *   b) a short range where a requested size corresponds to exactly
+        *      specified [vstart:vend] interval and an alignment > PAGE_SIZE.
+        *      With adjusted search length an allocation would not succeed.
+        */
+       if (align <= PAGE_SIZE || (align > PAGE_SIZE && (vend - vstart) == size))
+               adjust_search_size = false;
+
+       va = find_vmap_lowest_match(size, align, vstart, adjust_search_size);
        if (unlikely(!va))
                return vend;
 
@@ -1719,18 +1738,6 @@ static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end)
        return true;
 }
 
-/*
- * Kick off a purge of the outstanding lazy areas. Don't bother if somebody
- * is already purging.
- */
-static void try_purge_vmap_area_lazy(void)
-{
-       if (mutex_trylock(&vmap_purge_lock)) {
-               __purge_vmap_area_lazy(ULONG_MAX, 0);
-               mutex_unlock(&vmap_purge_lock);
-       }
-}
-
 /*
  * Kick off a purge of the outstanding lazy areas.
  */
@@ -1742,6 +1749,20 @@ static void purge_vmap_area_lazy(void)
        mutex_unlock(&vmap_purge_lock);
 }
 
+static void drain_vmap_area_work(struct work_struct *work)
+{
+       unsigned long nr_lazy;
+
+       do {
+               mutex_lock(&vmap_purge_lock);
+               __purge_vmap_area_lazy(ULONG_MAX, 0);
+               mutex_unlock(&vmap_purge_lock);
+
+               /* Recheck if further work is required. */
+               nr_lazy = atomic_long_read(&vmap_lazy_nr);
+       } while (nr_lazy > lazy_max_pages());
+}
+
 /*
  * Free a vmap area, caller ensuring that the area has been unmapped
  * and flush_cache_vunmap had been called for the correct range
@@ -1768,7 +1789,7 @@ static void free_vmap_area_noflush(struct vmap_area *va)
 
        /* After this point, we may free va at any time */
        if (unlikely(nr_lazy > lazy_max_pages()))
-               try_purge_vmap_area_lazy();
+               schedule_work(&drain_vmap_work);
 }
 
 /*
@@ -2526,7 +2547,7 @@ struct vm_struct *remove_vm_area(const void *addr)
                va->vm = NULL;
                spin_unlock(&vmap_area_lock);
 
-               kasan_free_shadow(vm);
+               kasan_free_module_shadow(vm);
                free_unmap_vmap_area(va);
 
                return vm;
@@ -2925,7 +2946,6 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
                                 int node)
 {
        const gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO;
-       const gfp_t orig_gfp_mask = gfp_mask;
        bool nofail = gfp_mask & __GFP_NOFAIL;
        unsigned long addr = (unsigned long)area->addr;
        unsigned long size = get_vm_area_size(area);
@@ -2949,7 +2969,7 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
        }
 
        if (!area->pages) {
-               warn_alloc(orig_gfp_mask, NULL,
+               warn_alloc(gfp_mask, NULL,
                        "vmalloc error: size %lu, failed to allocated page array size %lu",
                        nr_small_pages * PAGE_SIZE, array_size);
                free_vm_area(area);
@@ -2959,8 +2979,8 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
        set_vm_area_page_order(area, page_shift - PAGE_SHIFT);
        page_order = vm_area_page_order(area);
 
-       area->nr_pages = vm_area_alloc_pages(gfp_mask, node,
-               page_order, nr_small_pages, area->pages);
+       area->nr_pages = vm_area_alloc_pages(gfp_mask | __GFP_NOWARN,
+               node, page_order, nr_small_pages, area->pages);
 
        atomic_long_add(area->nr_pages, &nr_vmalloc_pages);
        if (gfp_mask & __GFP_ACCOUNT) {
@@ -2976,7 +2996,7 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
         * allocation request, free them via __vfree() if any.
         */
        if (area->nr_pages != nr_small_pages) {
-               warn_alloc(orig_gfp_mask, NULL,
+               warn_alloc(gfp_mask, NULL,
                        "vmalloc error: size %lu, page order %u, failed to allocate pages",
                        area->nr_pages * PAGE_SIZE, page_order);
                goto fail;
@@ -3004,7 +3024,7 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
                memalloc_noio_restore(flags);
 
        if (ret < 0) {
-               warn_alloc(orig_gfp_mask, NULL,
+               warn_alloc(gfp_mask, NULL,
                        "vmalloc error: size %lu, failed to map pages",
                        area->nr_pages * PAGE_SIZE);
                goto fail;