Merge tag 'armsoc-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/soc/soc
[linux-2.6-microblaze.git] / mm / vmalloc.c
index c1246d7..a3c70e2 100644 (file)
@@ -329,8 +329,6 @@ EXPORT_SYMBOL(vmalloc_to_pfn);
 #define DEBUG_AUGMENT_PROPAGATE_CHECK 0
 #define DEBUG_AUGMENT_LOWEST_MATCH_CHECK 0
 
-#define VM_LAZY_FREE   0x02
-#define VM_VM_AREA     0x04
 
 static DEFINE_SPINLOCK(vmap_area_lock);
 /* Export for kexec only */
@@ -398,9 +396,8 @@ compute_subtree_max_size(struct vmap_area *va)
                get_subtree_max_size(va->rb_node.rb_right));
 }
 
-RB_DECLARE_CALLBACKS(static, free_vmap_area_rb_augment_cb,
-       struct vmap_area, rb_node, unsigned long, subtree_max_size,
-       compute_subtree_max_size)
+RB_DECLARE_CALLBACKS_MAX(static, free_vmap_area_rb_augment_cb,
+       struct vmap_area, rb_node, unsigned long, subtree_max_size, va_size)
 
 static void purge_vmap_area_lazy(void);
 static BLOCKING_NOTIFIER_HEAD(vmap_notify_list);
@@ -1116,7 +1113,7 @@ retry:
 
        va->va_start = addr;
        va->va_end = addr + size;
-       va->flags = 0;
+       va->vm = NULL;
        insert_vmap_area(va, &vmap_area_root, &vmap_area_list);
 
        spin_unlock(&vmap_area_lock);
@@ -1282,7 +1279,14 @@ static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end)
        llist_for_each_entry_safe(va, n_va, valist, purge_list) {
                unsigned long nr = (va->va_end - va->va_start) >> PAGE_SHIFT;
 
-               __free_vmap_area(va);
+               /*
+                * Finally insert or merge lazily-freed area. It is
+                * detached and there is no need to "unlink" it from
+                * anything.
+                */
+               merge_or_add_vmap_area(va,
+                       &free_vmap_area_root, &free_vmap_area_list);
+
                atomic_long_sub(nr, &vmap_lazy_nr);
 
                if (atomic_long_read(&vmap_lazy_nr) < resched_threshold)
@@ -1324,6 +1328,10 @@ static void free_vmap_area_noflush(struct vmap_area *va)
 {
        unsigned long nr_lazy;
 
+       spin_lock(&vmap_area_lock);
+       unlink_va(va, &vmap_area_root);
+       spin_unlock(&vmap_area_lock);
+
        nr_lazy = atomic_long_add_return((va->va_end - va->va_start) >>
                                PAGE_SHIFT, &vmap_lazy_nr);
 
@@ -1918,7 +1926,6 @@ void __init vmalloc_init(void)
                if (WARN_ON_ONCE(!va))
                        continue;
 
-               va->flags = VM_VM_AREA;
                va->va_start = (unsigned long)tmp->addr;
                va->va_end = va->va_start + tmp->size;
                va->vm = tmp;
@@ -2016,7 +2023,6 @@ static void setup_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va,
        vm->size = va->va_end - va->va_start;
        vm->caller = caller;
        va->vm = vm;
-       va->flags |= VM_VM_AREA;
        spin_unlock(&vmap_area_lock);
 }
 
@@ -2121,10 +2127,10 @@ struct vm_struct *find_vm_area(const void *addr)
        struct vmap_area *va;
 
        va = find_vmap_area((unsigned long)addr);
-       if (va && va->flags & VM_VM_AREA)
-               return va->vm;
+       if (!va)
+               return NULL;
 
-       return NULL;
+       return va->vm;
 }
 
 /**
@@ -2143,14 +2149,12 @@ struct vm_struct *remove_vm_area(const void *addr)
 
        might_sleep();
 
-       va = find_vmap_area((unsigned long)addr);
-       if (va && va->flags & VM_VM_AREA) {
+       spin_lock(&vmap_area_lock);
+       va = __find_vmap_area((unsigned long)addr);
+       if (va && va->vm) {
                struct vm_struct *vm = va->vm;
 
-               spin_lock(&vmap_area_lock);
                va->vm = NULL;
-               va->flags &= ~VM_VM_AREA;
-               va->flags |= VM_LAZY_FREE;
                spin_unlock(&vmap_area_lock);
 
                kasan_free_shadow(vm);
@@ -2158,6 +2162,8 @@ struct vm_struct *remove_vm_area(const void *addr)
 
                return vm;
        }
+
+       spin_unlock(&vmap_area_lock);
        return NULL;
 }
 
@@ -2402,7 +2408,6 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
        nr_pages = get_vm_area_size(area) >> PAGE_SHIFT;
        array_size = (nr_pages * sizeof(struct page *));
 
-       area->nr_pages = nr_pages;
        /* Please note that the recursion is strictly bounded. */
        if (array_size > PAGE_SIZE) {
                pages = __vmalloc_node(array_size, 1, nested_gfp|highmem_mask,
@@ -2410,13 +2415,16 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
        } else {
                pages = kmalloc_node(array_size, nested_gfp, node);
        }
-       area->pages = pages;
-       if (!area->pages) {
+
+       if (!pages) {
                remove_vm_area(area->addr);
                kfree(area);
                return NULL;
        }
 
+       area->pages = pages;
+       area->nr_pages = nr_pages;
+
        for (i = 0; i < area->nr_pages; i++) {
                struct page *page;
 
@@ -2851,7 +2859,7 @@ long vread(char *buf, char *addr, unsigned long count)
                if (!count)
                        break;
 
-               if (!(va->flags & VM_VM_AREA))
+               if (!va->vm)
                        continue;
 
                vm = va->vm;
@@ -2931,7 +2939,7 @@ long vwrite(char *buf, char *addr, unsigned long count)
                if (!count)
                        break;
 
-               if (!(va->flags & VM_VM_AREA))
+               if (!va->vm)
                        continue;
 
                vm = va->vm;
@@ -3450,6 +3458,22 @@ static void show_numa_info(struct seq_file *m, struct vm_struct *v)
        }
 }
 
+static void show_purge_info(struct seq_file *m)
+{
+       struct llist_node *head;
+       struct vmap_area *va;
+
+       head = READ_ONCE(vmap_purge_list.first);
+       if (head == NULL)
+               return;
+
+       llist_for_each_entry(va, head, purge_list) {
+               seq_printf(m, "0x%pK-0x%pK %7ld unpurged vm_area\n",
+                       (void *)va->va_start, (void *)va->va_end,
+                       va->va_end - va->va_start);
+       }
+}
+
 static int s_show(struct seq_file *m, void *p)
 {
        struct vmap_area *va;
@@ -3458,14 +3482,13 @@ static int s_show(struct seq_file *m, void *p)
        va = list_entry(p, struct vmap_area, list);
 
        /*
-        * s_show can encounter race with remove_vm_area, !VM_VM_AREA on
-        * behalf of vmap area is being tear down or vm_map_ram allocation.
+        * s_show can encounter race with remove_vm_area, !vm on behalf
+        * of vmap area is being tear down or vm_map_ram allocation.
         */
-       if (!(va->flags & VM_VM_AREA)) {
-               seq_printf(m, "0x%pK-0x%pK %7ld %s\n",
+       if (!va->vm) {
+               seq_printf(m, "0x%pK-0x%pK %7ld vm_map_ram\n",
                        (void *)va->va_start, (void *)va->va_end,
-                       va->va_end - va->va_start,
-                       va->flags & VM_LAZY_FREE ? "unpurged vm_area" : "vm_map_ram");
+                       va->va_end - va->va_start);
 
                return 0;
        }
@@ -3504,6 +3527,16 @@ static int s_show(struct seq_file *m, void *p)
 
        show_numa_info(m, v);
        seq_putc(m, '\n');
+
+       /*
+        * As a final step, dump "unpurged" areas. Note,
+        * that entire "/proc/vmallocinfo" output will not
+        * be address sorted, because the purge list is not
+        * sorted.
+        */
+       if (list_is_last(&va->list, &vmap_area_list))
+               show_purge_info(m);
+
        return 0;
 }