Merge tag 'mm-stable-2024-03-13-20-04' of git://git.kernel.org/pub/scm/linux/kernel...
[linux-2.6-microblaze.git] / mm / vmalloc.c
index 1e36322..22aa63f 100644 (file)
@@ -800,17 +800,9 @@ EXPORT_SYMBOL(vmalloc_to_pfn);
 #define DEBUG_AUGMENT_LOWEST_MATCH_CHECK 0
 
 
-static DEFINE_SPINLOCK(vmap_area_lock);
 static DEFINE_SPINLOCK(free_vmap_area_lock);
-/* Export for kexec only */
-LIST_HEAD(vmap_area_list);
-static struct rb_root vmap_area_root = RB_ROOT;
 static bool vmap_initialized __read_mostly;
 
-static struct rb_root purge_vmap_area_root = RB_ROOT;
-static LIST_HEAD(purge_vmap_area_list);
-static DEFINE_SPINLOCK(purge_vmap_area_lock);
-
 /*
  * This kmem_cache is used for vmap_area objects. Instead of
  * allocating from slab we reuse an object from this cache to
@@ -844,6 +836,129 @@ static struct rb_root free_vmap_area_root = RB_ROOT;
  */
 static DEFINE_PER_CPU(struct vmap_area *, ne_fit_preload_node);
 
+/*
+ * This structure defines a single, solid model where a list and
+ * rb-tree are part of one entity protected by the lock. Nodes are
+ * sorted in ascending order, thus for O(1) access to left/right
+ * neighbors a list is used as well as for sequential traversal.
+ */
+struct rb_list {
+       struct rb_root root;
+       struct list_head head;
+       spinlock_t lock;
+};
+
+/*
+ * A fast size storage contains VAs up to 1M size. A pool consists
+ * of linked between each other ready to go VAs of certain sizes.
+ * An index in the pool-array corresponds to number of pages + 1.
+ */
+#define MAX_VA_SIZE_PAGES 256
+
+struct vmap_pool {
+       struct list_head head;
+       unsigned long len;
+};
+
+/*
+ * An effective vmap-node logic. Users make use of nodes instead
+ * of a global heap. It allows to balance an access and mitigate
+ * contention.
+ */
+static struct vmap_node {
+       /* Simple size segregated storage. */
+       struct vmap_pool pool[MAX_VA_SIZE_PAGES];
+       spinlock_t pool_lock;
+       bool skip_populate;
+
+       /* Bookkeeping data of this node. */
+       struct rb_list busy;
+       struct rb_list lazy;
+
+       /*
+        * Ready-to-free areas.
+        */
+       struct list_head purge_list;
+       struct work_struct purge_work;
+       unsigned long nr_purged;
+} single;
+
+/*
+ * Initial setup consists of one single node, i.e. a balancing
+ * is fully disabled. Later on, after vmap is initialized these
+ * parameters are updated based on a system capacity.
+ */
+static struct vmap_node *vmap_nodes = &single;
+static __read_mostly unsigned int nr_vmap_nodes = 1;
+static __read_mostly unsigned int vmap_zone_size = 1;
+
+static inline unsigned int
+addr_to_node_id(unsigned long addr)
+{
+       return (addr / vmap_zone_size) % nr_vmap_nodes;
+}
+
+static inline struct vmap_node *
+addr_to_node(unsigned long addr)
+{
+       return &vmap_nodes[addr_to_node_id(addr)];
+}
+
+static inline struct vmap_node *
+id_to_node(unsigned int id)
+{
+       return &vmap_nodes[id % nr_vmap_nodes];
+}
+
+/*
+ * We use the value 0 to represent "no node", that is why
+ * an encoded value will be the node-id incremented by 1.
+ * It is always greater then 0. A valid node_id which can
+ * be encoded is [0:nr_vmap_nodes - 1]. If a passed node_id
+ * is not valid 0 is returned.
+ */
+static unsigned int
+encode_vn_id(unsigned int node_id)
+{
+       /* Can store U8_MAX [0:254] nodes. */
+       if (node_id < nr_vmap_nodes)
+               return (node_id + 1) << BITS_PER_BYTE;
+
+       /* Warn and no node encoded. */
+       WARN_ONCE(1, "Encode wrong node id (%u)\n", node_id);
+       return 0;
+}
+
+/*
+ * Returns an encoded node-id, the valid range is within
+ * [0:nr_vmap_nodes-1] values. Otherwise nr_vmap_nodes is
+ * returned if extracted data is wrong.
+ */
+static unsigned int
+decode_vn_id(unsigned int val)
+{
+       unsigned int node_id = (val >> BITS_PER_BYTE) - 1;
+
+       /* Can store U8_MAX [0:254] nodes. */
+       if (node_id < nr_vmap_nodes)
+               return node_id;
+
+       /* If it was _not_ zero, warn. */
+       WARN_ONCE(node_id != UINT_MAX,
+               "Decode wrong node id (%d)\n", node_id);
+
+       return nr_vmap_nodes;
+}
+
+static bool
+is_vn_id_valid(unsigned int node_id)
+{
+       if (node_id < nr_vmap_nodes)
+               return true;
+
+       return false;
+}
+
 static __always_inline unsigned long
 va_size(struct vmap_area *va)
 {
@@ -875,10 +990,11 @@ unsigned long vmalloc_nr_pages(void)
 }
 
 /* Look up the first VA which satisfies addr < va_end, NULL if none. */
-static struct vmap_area *find_vmap_area_exceed_addr(unsigned long addr)
+static struct vmap_area *
+__find_vmap_area_exceed_addr(unsigned long addr, struct rb_root *root)
 {
        struct vmap_area *va = NULL;
-       struct rb_node *n = vmap_area_root.rb_node;
+       struct rb_node *n = root->rb_node;
 
        addr = (unsigned long)kasan_reset_tag((void *)addr);
 
@@ -899,6 +1015,41 @@ static struct vmap_area *find_vmap_area_exceed_addr(unsigned long addr)
        return va;
 }
 
+/*
+ * Returns a node where a first VA, that satisfies addr < va_end, resides.
+ * If success, a node is locked. A user is responsible to unlock it when a
+ * VA is no longer needed to be accessed.
+ *
+ * Returns NULL if nothing found.
+ */
+static struct vmap_node *
+find_vmap_area_exceed_addr_lock(unsigned long addr, struct vmap_area **va)
+{
+       struct vmap_node *vn, *va_node = NULL;
+       struct vmap_area *va_lowest;
+       int i;
+
+       for (i = 0; i < nr_vmap_nodes; i++) {
+               vn = &vmap_nodes[i];
+
+               spin_lock(&vn->busy.lock);
+               va_lowest = __find_vmap_area_exceed_addr(addr, &vn->busy.root);
+               if (va_lowest) {
+                       if (!va_node || va_lowest->va_start < (*va)->va_start) {
+                               if (va_node)
+                                       spin_unlock(&va_node->busy.lock);
+
+                               *va = va_lowest;
+                               va_node = vn;
+                               continue;
+                       }
+               }
+               spin_unlock(&vn->busy.lock);
+       }
+
+       return va_node;
+}
+
 static struct vmap_area *__find_vmap_area(unsigned long addr, struct rb_root *root)
 {
        struct rb_node *n = root->rb_node;
@@ -1454,9 +1605,9 @@ classify_va_fit_type(struct vmap_area *va,
 }
 
 static __always_inline int
-adjust_va_to_fit_type(struct rb_root *root, struct list_head *head,
-                     struct vmap_area *va, unsigned long nva_start_addr,
-                     unsigned long size)
+va_clip(struct rb_root *root, struct list_head *head,
+               struct vmap_area *va, unsigned long nva_start_addr,
+               unsigned long size)
 {
        struct vmap_area *lva = NULL;
        enum fit_type type = classify_va_fit_type(va, nva_start_addr, size);
@@ -1553,6 +1704,32 @@ adjust_va_to_fit_type(struct rb_root *root, struct list_head *head,
        return 0;
 }
 
+static unsigned long
+va_alloc(struct vmap_area *va,
+               struct rb_root *root, struct list_head *head,
+               unsigned long size, unsigned long align,
+               unsigned long vstart, unsigned long vend)
+{
+       unsigned long nva_start_addr;
+       int ret;
+
+       if (va->va_start > vstart)
+               nva_start_addr = ALIGN(va->va_start, align);
+       else
+               nva_start_addr = ALIGN(vstart, align);
+
+       /* Check the "vend" restriction. */
+       if (nva_start_addr + size > vend)
+               return vend;
+
+       /* Update the free vmap_area. */
+       ret = va_clip(root, head, va, nva_start_addr, size);
+       if (WARN_ON_ONCE(ret))
+               return vend;
+
+       return nva_start_addr;
+}
+
 /*
  * Returns a start address of the newly allocated area, if success.
  * Otherwise a vend is returned that indicates failure.
@@ -1565,7 +1742,6 @@ __alloc_vmap_area(struct rb_root *root, struct list_head *head,
        bool adjust_search_size = true;
        unsigned long nva_start_addr;
        struct vmap_area *va;
-       int ret;
 
        /*
         * Do not adjust when:
@@ -1583,18 +1759,8 @@ __alloc_vmap_area(struct rb_root *root, struct list_head *head,
        if (unlikely(!va))
                return vend;
 
-       if (va->va_start > vstart)
-               nva_start_addr = ALIGN(va->va_start, align);
-       else
-               nva_start_addr = ALIGN(vstart, align);
-
-       /* Check the "vend" restriction. */
-       if (nva_start_addr + size > vend)
-               return vend;
-
-       /* Update the free vmap_area. */
-       ret = adjust_va_to_fit_type(root, head, va, nva_start_addr, size);
-       if (WARN_ON_ONCE(ret))
+       nva_start_addr = va_alloc(va, root, head, size, align, vstart, vend);
+       if (nva_start_addr == vend)
                return vend;
 
 #if DEBUG_AUGMENT_LOWEST_MATCH_CHECK
@@ -1609,12 +1775,14 @@ __alloc_vmap_area(struct rb_root *root, struct list_head *head,
  */
 static void free_vmap_area(struct vmap_area *va)
 {
+       struct vmap_node *vn = addr_to_node(va->va_start);
+
        /*
         * Remove from the busy tree/list.
         */
-       spin_lock(&vmap_area_lock);
-       unlink_va(va, &vmap_area_root);
-       spin_unlock(&vmap_area_lock);
+       spin_lock(&vn->busy.lock);
+       unlink_va(va, &vn->busy.root);
+       spin_unlock(&vn->busy.lock);
 
        /*
         * Insert/Merge it back to the free tree/list.
@@ -1647,6 +1815,104 @@ preload_this_cpu_lock(spinlock_t *lock, gfp_t gfp_mask, int node)
                kmem_cache_free(vmap_area_cachep, va);
 }
 
+static struct vmap_pool *
+size_to_va_pool(struct vmap_node *vn, unsigned long size)
+{
+       unsigned int idx = (size - 1) / PAGE_SIZE;
+
+       if (idx < MAX_VA_SIZE_PAGES)
+               return &vn->pool[idx];
+
+       return NULL;
+}
+
+static bool
+node_pool_add_va(struct vmap_node *n, struct vmap_area *va)
+{
+       struct vmap_pool *vp;
+
+       vp = size_to_va_pool(n, va_size(va));
+       if (!vp)
+               return false;
+
+       spin_lock(&n->pool_lock);
+       list_add(&va->list, &vp->head);
+       WRITE_ONCE(vp->len, vp->len + 1);
+       spin_unlock(&n->pool_lock);
+
+       return true;
+}
+
+static struct vmap_area *
+node_pool_del_va(struct vmap_node *vn, unsigned long size,
+               unsigned long align, unsigned long vstart,
+               unsigned long vend)
+{
+       struct vmap_area *va = NULL;
+       struct vmap_pool *vp;
+       int err = 0;
+
+       vp = size_to_va_pool(vn, size);
+       if (!vp || list_empty(&vp->head))
+               return NULL;
+
+       spin_lock(&vn->pool_lock);
+       if (!list_empty(&vp->head)) {
+               va = list_first_entry(&vp->head, struct vmap_area, list);
+
+               if (IS_ALIGNED(va->va_start, align)) {
+                       /*
+                        * Do some sanity check and emit a warning
+                        * if one of below checks detects an error.
+                        */
+                       err |= (va_size(va) != size);
+                       err |= (va->va_start < vstart);
+                       err |= (va->va_end > vend);
+
+                       if (!WARN_ON_ONCE(err)) {
+                               list_del_init(&va->list);
+                               WRITE_ONCE(vp->len, vp->len - 1);
+                       } else {
+                               va = NULL;
+                       }
+               } else {
+                       list_move_tail(&va->list, &vp->head);
+                       va = NULL;
+               }
+       }
+       spin_unlock(&vn->pool_lock);
+
+       return va;
+}
+
+static struct vmap_area *
+node_alloc(unsigned long size, unsigned long align,
+               unsigned long vstart, unsigned long vend,
+               unsigned long *addr, unsigned int *vn_id)
+{
+       struct vmap_area *va;
+
+       *vn_id = 0;
+       *addr = vend;
+
+       /*
+        * Fallback to a global heap if not vmalloc or there
+        * is only one node.
+        */
+       if (vstart != VMALLOC_START || vend != VMALLOC_END ||
+                       nr_vmap_nodes == 1)
+               return NULL;
+
+       *vn_id = raw_smp_processor_id() % nr_vmap_nodes;
+       va = node_pool_del_va(id_to_node(*vn_id), size, align, vstart, vend);
+       *vn_id = encode_vn_id(*vn_id);
+
+       if (va)
+               *addr = va->va_start;
+
+       return va;
+}
+
 /*
  * Allocate a region of KVA of the specified size and alignment, within the
  * vstart and vend.
@@ -1657,9 +1923,11 @@ static struct vmap_area *alloc_vmap_area(unsigned long size,
                                int node, gfp_t gfp_mask,
                                unsigned long va_flags)
 {
+       struct vmap_node *vn;
        struct vmap_area *va;
        unsigned long freed;
        unsigned long addr;
+       unsigned int vn_id;
        int purged = 0;
        int ret;
 
@@ -1670,23 +1938,37 @@ static struct vmap_area *alloc_vmap_area(unsigned long size,
                return ERR_PTR(-EBUSY);
 
        might_sleep();
-       gfp_mask = gfp_mask & GFP_RECLAIM_MASK;
-
-       va = kmem_cache_alloc_node(vmap_area_cachep, gfp_mask, node);
-       if (unlikely(!va))
-               return ERR_PTR(-ENOMEM);
 
        /*
-        * Only scan the relevant parts containing pointers to other objects
-        * to avoid false negatives.
+        * If a VA is obtained from a global heap(if it fails here)
+        * it is anyway marked with this "vn_id" so it is returned
+        * to this pool's node later. Such way gives a possibility
+        * to populate pools based on users demand.
+        *
+        * On success a ready to go VA is returned.
         */
-       kmemleak_scan_area(&va->rb_node, SIZE_MAX, gfp_mask);
+       va = node_alloc(size, align, vstart, vend, &addr, &vn_id);
+       if (!va) {
+               gfp_mask = gfp_mask & GFP_RECLAIM_MASK;
+
+               va = kmem_cache_alloc_node(vmap_area_cachep, gfp_mask, node);
+               if (unlikely(!va))
+                       return ERR_PTR(-ENOMEM);
+
+               /*
+                * Only scan the relevant parts containing pointers to other objects
+                * to avoid false negatives.
+                */
+               kmemleak_scan_area(&va->rb_node, SIZE_MAX, gfp_mask);
+       }
 
 retry:
-       preload_this_cpu_lock(&free_vmap_area_lock, gfp_mask, node);
-       addr = __alloc_vmap_area(&free_vmap_area_root, &free_vmap_area_list,
-               size, align, vstart, vend);
-       spin_unlock(&free_vmap_area_lock);
+       if (addr == vend) {
+               preload_this_cpu_lock(&free_vmap_area_lock, gfp_mask, node);
+               addr = __alloc_vmap_area(&free_vmap_area_root, &free_vmap_area_list,
+                       size, align, vstart, vend);
+               spin_unlock(&free_vmap_area_lock);
+       }
 
        trace_alloc_vmap_area(addr, size, align, vstart, vend, addr == vend);
 
@@ -1700,11 +1982,13 @@ retry:
        va->va_start = addr;
        va->va_end = addr + size;
        va->vm = NULL;
-       va->flags = va_flags;
+       va->flags = (va_flags | vn_id);
 
-       spin_lock(&vmap_area_lock);
-       insert_vmap_area(va, &vmap_area_root, &vmap_area_list);
-       spin_unlock(&vmap_area_lock);
+       vn = addr_to_node(va->va_start);
+
+       spin_lock(&vn->busy.lock);
+       insert_vmap_area(va, &vn->busy.root, &vn->busy.head);
+       spin_unlock(&vn->busy.lock);
 
        BUG_ON(!IS_ALIGNED(va->va_start, align));
        BUG_ON(va->va_start < vstart);
@@ -1789,70 +2073,199 @@ static DEFINE_MUTEX(vmap_purge_lock);
 
 /* for per-CPU blocks */
 static void purge_fragmented_blocks_allcpus(void);
+static cpumask_t purge_nodes;
 
-/*
- * Purges all lazily-freed vmap areas.
- */
-static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end)
+static void
+reclaim_list_global(struct list_head *head)
 {
-       unsigned long resched_threshold;
-       unsigned int num_purged_areas = 0;
-       struct list_head local_purge_list;
-       struct vmap_area *va, *n_va;
+       struct vmap_area *va, *n;
 
-       lockdep_assert_held(&vmap_purge_lock);
+       if (list_empty(head))
+               return;
 
-       spin_lock(&purge_vmap_area_lock);
-       purge_vmap_area_root = RB_ROOT;
-       list_replace_init(&purge_vmap_area_list, &local_purge_list);
-       spin_unlock(&purge_vmap_area_lock);
+       spin_lock(&free_vmap_area_lock);
+       list_for_each_entry_safe(va, n, head, list)
+               merge_or_add_vmap_area_augment(va,
+                       &free_vmap_area_root, &free_vmap_area_list);
+       spin_unlock(&free_vmap_area_lock);
+}
 
-       if (unlikely(list_empty(&local_purge_list)))
-               goto out;
+static void
+decay_va_pool_node(struct vmap_node *vn, bool full_decay)
+{
+       struct vmap_area *va, *nva;
+       struct list_head decay_list;
+       struct rb_root decay_root;
+       unsigned long n_decay;
+       int i;
 
-       start = min(start,
-               list_first_entry(&local_purge_list,
-                       struct vmap_area, list)->va_start);
+       decay_root = RB_ROOT;
+       INIT_LIST_HEAD(&decay_list);
 
-       end = max(end,
-               list_last_entry(&local_purge_list,
-                       struct vmap_area, list)->va_end);
+       for (i = 0; i < MAX_VA_SIZE_PAGES; i++) {
+               struct list_head tmp_list;
 
-       flush_tlb_kernel_range(start, end);
-       resched_threshold = lazy_max_pages() << 1;
+               if (list_empty(&vn->pool[i].head))
+                       continue;
 
-       spin_lock(&free_vmap_area_lock);
-       list_for_each_entry_safe(va, n_va, &local_purge_list, list) {
-               unsigned long nr = (va->va_end - va->va_start) >> PAGE_SHIFT;
-               unsigned long orig_start = va->va_start;
-               unsigned long orig_end = va->va_end;
+               INIT_LIST_HEAD(&tmp_list);
+
+               /* Detach the pool, so no-one can access it. */
+               spin_lock(&vn->pool_lock);
+               list_replace_init(&vn->pool[i].head, &tmp_list);
+               spin_unlock(&vn->pool_lock);
+
+               if (full_decay)
+                       WRITE_ONCE(vn->pool[i].len, 0);
+
+               /* Decay a pool by ~25% out of left objects. */
+               n_decay = vn->pool[i].len >> 2;
+
+               list_for_each_entry_safe(va, nva, &tmp_list, list) {
+                       list_del_init(&va->list);
+                       merge_or_add_vmap_area(va, &decay_root, &decay_list);
+
+                       if (!full_decay) {
+                               WRITE_ONCE(vn->pool[i].len, vn->pool[i].len - 1);
+
+                               if (!--n_decay)
+                                       break;
+                       }
+               }
 
                /*
-                * Finally insert or merge lazily-freed area. It is
-                * detached and there is no need to "unlink" it from
-                * anything.
+                * Attach the pool back if it has been partly decayed.
+                * Please note, it is supposed that nobody(other contexts)
+                * can populate the pool therefore a simple list replace
+                * operation takes place here.
                 */
-               va = merge_or_add_vmap_area_augment(va, &free_vmap_area_root,
-                               &free_vmap_area_list);
+               if (!full_decay && !list_empty(&tmp_list)) {
+                       spin_lock(&vn->pool_lock);
+                       list_replace_init(&tmp_list, &vn->pool[i].head);
+                       spin_unlock(&vn->pool_lock);
+               }
+       }
 
-               if (!va)
-                       continue;
+       reclaim_list_global(&decay_list);
+}
+
+static void purge_vmap_node(struct work_struct *work)
+{
+       struct vmap_node *vn = container_of(work,
+               struct vmap_node, purge_work);
+       struct vmap_area *va, *n_va;
+       LIST_HEAD(local_list);
+
+       vn->nr_purged = 0;
+
+       list_for_each_entry_safe(va, n_va, &vn->purge_list, list) {
+               unsigned long nr = (va->va_end - va->va_start) >> PAGE_SHIFT;
+               unsigned long orig_start = va->va_start;
+               unsigned long orig_end = va->va_end;
+               unsigned int vn_id = decode_vn_id(va->flags);
+
+               list_del_init(&va->list);
 
                if (is_vmalloc_or_module_addr((void *)orig_start))
                        kasan_release_vmalloc(orig_start, orig_end,
                                              va->va_start, va->va_end);
 
                atomic_long_sub(nr, &vmap_lazy_nr);
-               num_purged_areas++;
+               vn->nr_purged++;
 
-               if (atomic_long_read(&vmap_lazy_nr) < resched_threshold)
-                       cond_resched_lock(&free_vmap_area_lock);
+               if (is_vn_id_valid(vn_id) && !vn->skip_populate)
+                       if (node_pool_add_va(vn, va))
+                               continue;
+
+               /* Go back to global. */
+               list_add(&va->list, &local_list);
        }
-       spin_unlock(&free_vmap_area_lock);
 
-out:
-       trace_purge_vmap_area_lazy(start, end, num_purged_areas);
-       return num_purged_areas > 0;
+       reclaim_list_global(&local_list);
+}
+
+/*
+ * Purges all lazily-freed vmap areas.
+ */
+static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end,
+               bool full_pool_decay)
+{
+       unsigned long nr_purged_areas = 0;
+       unsigned int nr_purge_helpers;
+       unsigned int nr_purge_nodes;
+       struct vmap_node *vn;
+       int i;
+
+       lockdep_assert_held(&vmap_purge_lock);
+
+       /*
+        * Use cpumask to mark which node has to be processed.
+        */
+       purge_nodes = CPU_MASK_NONE;
+
+       for (i = 0; i < nr_vmap_nodes; i++) {
+               vn = &vmap_nodes[i];
+
+               INIT_LIST_HEAD(&vn->purge_list);
+               vn->skip_populate = full_pool_decay;
+               decay_va_pool_node(vn, full_pool_decay);
+
+               if (RB_EMPTY_ROOT(&vn->lazy.root))
+                       continue;
+
+               spin_lock(&vn->lazy.lock);
+               WRITE_ONCE(vn->lazy.root.rb_node, NULL);
+               list_replace_init(&vn->lazy.head, &vn->purge_list);
+               spin_unlock(&vn->lazy.lock);
+
+               start = min(start, list_first_entry(&vn->purge_list,
+                       struct vmap_area, list)->va_start);
+
+               end = max(end, list_last_entry(&vn->purge_list,
+                       struct vmap_area, list)->va_end);
+
+               cpumask_set_cpu(i, &purge_nodes);
+       }
+
+       nr_purge_nodes = cpumask_weight(&purge_nodes);
+       if (nr_purge_nodes > 0) {
+               flush_tlb_kernel_range(start, end);
+
+               /* One extra worker is per a lazy_max_pages() full set minus one. */
+               nr_purge_helpers = atomic_long_read(&vmap_lazy_nr) / lazy_max_pages();
+               nr_purge_helpers = clamp(nr_purge_helpers, 1U, nr_purge_nodes) - 1;
+
+               for_each_cpu(i, &purge_nodes) {
+                       vn = &vmap_nodes[i];
+
+                       if (nr_purge_helpers > 0) {
+                               INIT_WORK(&vn->purge_work, purge_vmap_node);
+
+                               if (cpumask_test_cpu(i, cpu_online_mask))
+                                       schedule_work_on(i, &vn->purge_work);
+                               else
+                                       schedule_work(&vn->purge_work);
+
+                               nr_purge_helpers--;
+                       } else {
+                               vn->purge_work.func = NULL;
+                               purge_vmap_node(&vn->purge_work);
+                               nr_purged_areas += vn->nr_purged;
+                       }
+               }
+
+               for_each_cpu(i, &purge_nodes) {
+                       vn = &vmap_nodes[i];
+
+                       if (vn->purge_work.func) {
+                               flush_work(&vn->purge_work);
+                               nr_purged_areas += vn->nr_purged;
+                       }
+               }
+       }
+
+       trace_purge_vmap_area_lazy(start, end, nr_purged_areas);
+       return nr_purged_areas > 0;
 }
 
 /*
@@ -1863,22 +2276,15 @@ static void reclaim_and_purge_vmap_areas(void)
 {
        mutex_lock(&vmap_purge_lock);
        purge_fragmented_blocks_allcpus();
-       __purge_vmap_area_lazy(ULONG_MAX, 0);
+       __purge_vmap_area_lazy(ULONG_MAX, 0, true);
        mutex_unlock(&vmap_purge_lock);
 }
 
 static void drain_vmap_area_work(struct work_struct *work)
 {
-       unsigned long nr_lazy;
-
-       do {
-               mutex_lock(&vmap_purge_lock);
-               __purge_vmap_area_lazy(ULONG_MAX, 0);
-               mutex_unlock(&vmap_purge_lock);
-
-               /* Recheck if further work is required. */
-               nr_lazy = atomic_long_read(&vmap_lazy_nr);
-       } while (nr_lazy > lazy_max_pages());
+       mutex_lock(&vmap_purge_lock);
+       __purge_vmap_area_lazy(ULONG_MAX, 0, false);
+       mutex_unlock(&vmap_purge_lock);
 }
 
 /*
@@ -1890,6 +2296,8 @@ static void free_vmap_area_noflush(struct vmap_area *va)
 {
        unsigned long nr_lazy_max = lazy_max_pages();
        unsigned long va_start = va->va_start;
+       unsigned int vn_id = decode_vn_id(va->flags);
+       struct vmap_node *vn;
        unsigned long nr_lazy;
 
        if (WARN_ON_ONCE(!list_empty(&va->list)))
@@ -1899,12 +2307,15 @@ static void free_vmap_area_noflush(struct vmap_area *va)
                                PAGE_SHIFT, &vmap_lazy_nr);
 
        /*
-        * Merge or place it to the purge tree/list.
+        * If it was request by a certain node we would like to
+        * return it to that node, i.e. its pool for later reuse.
         */
-       spin_lock(&purge_vmap_area_lock);
-       merge_or_add_vmap_area(va,
-               &purge_vmap_area_root, &purge_vmap_area_list);
-       spin_unlock(&purge_vmap_area_lock);
+       vn = is_vn_id_valid(vn_id) ?
+               id_to_node(vn_id):addr_to_node(va->va_start);
+
+       spin_lock(&vn->lazy.lock);
+       insert_vmap_area(va, &vn->lazy.root, &vn->lazy.head);
+       spin_unlock(&vn->lazy.lock);
 
        trace_free_vmap_area_noflush(va_start, nr_lazy, nr_lazy_max);
 
@@ -1928,26 +2339,62 @@ static void free_unmap_vmap_area(struct vmap_area *va)
 
 struct vmap_area *find_vmap_area(unsigned long addr)
 {
+       struct vmap_node *vn;
        struct vmap_area *va;
+       int i, j;
 
-       spin_lock(&vmap_area_lock);
-       va = __find_vmap_area(addr, &vmap_area_root);
-       spin_unlock(&vmap_area_lock);
+       /*
+        * An addr_to_node_id(addr) converts an address to a node index
+        * where a VA is located. If VA spans several zones and passed
+        * addr is not the same as va->va_start, what is not common, we
+        * may need to scan extra nodes. See an example:
+        *
+        *      <----va---->
+        * -|-----|-----|-----|-----|-
+        *     1     2     0     1
+        *
+        * VA resides in node 1 whereas it spans 1, 2 an 0. If passed
+        * addr is within 2 or 0 nodes we should do extra work.
+        */
+       i = j = addr_to_node_id(addr);
+       do {
+               vn = &vmap_nodes[i];
 
-       return va;
+               spin_lock(&vn->busy.lock);
+               va = __find_vmap_area(addr, &vn->busy.root);
+               spin_unlock(&vn->busy.lock);
+
+               if (va)
+                       return va;
+       } while ((i = (i + 1) % nr_vmap_nodes) != j);
+
+       return NULL;
 }
 
 static struct vmap_area *find_unlink_vmap_area(unsigned long addr)
 {
+       struct vmap_node *vn;
        struct vmap_area *va;
+       int i, j;
 
-       spin_lock(&vmap_area_lock);
-       va = __find_vmap_area(addr, &vmap_area_root);
-       if (va)
-               unlink_va(va, &vmap_area_root);
-       spin_unlock(&vmap_area_lock);
+       /*
+        * Check the comment in the find_vmap_area() about the loop.
+        */
+       i = j = addr_to_node_id(addr);
+       do {
+               vn = &vmap_nodes[i];
 
-       return va;
+               spin_lock(&vn->busy.lock);
+               va = __find_vmap_area(addr, &vn->busy.root);
+               if (va)
+                       unlink_va(va, &vn->busy.root);
+               spin_unlock(&vn->busy.lock);
+
+               if (va)
+                       return va;
+       } while ((i = (i + 1) % nr_vmap_nodes) != j);
+
+       return NULL;
 }
 
 /*** Per cpu kva allocator ***/
@@ -2149,6 +2596,7 @@ static void *new_vmap_block(unsigned int order, gfp_t gfp_mask)
 
 static void free_vmap_block(struct vmap_block *vb)
 {
+       struct vmap_node *vn;
        struct vmap_block *tmp;
        struct xarray *xa;
 
@@ -2156,9 +2604,10 @@ static void free_vmap_block(struct vmap_block *vb)
        tmp = xa_erase(xa, addr_to_vb_idx(vb->va->va_start));
        BUG_ON(tmp != vb);
 
-       spin_lock(&vmap_area_lock);
-       unlink_va(vb->va, &vmap_area_root);
-       spin_unlock(&vmap_area_lock);
+       vn = addr_to_node(vb->va->va_start);
+       spin_lock(&vn->busy.lock);
+       unlink_va(vb->va, &vn->busy.root);
+       spin_unlock(&vn->busy.lock);
 
        free_vmap_area_noflush(vb->va);
        kfree_rcu(vb, rcu_head);
@@ -2375,7 +2824,7 @@ static void _vm_unmap_aliases(unsigned long start, unsigned long end, int flush)
        }
        free_purged_blocks(&purge_list);
 
-       if (!__purge_vmap_area_lazy(start, end) && flush)
+       if (!__purge_vmap_area_lazy(start, end, false) && flush)
                flush_tlb_kernel_range(start, end);
        mutex_unlock(&vmap_purge_lock);
 }
@@ -2569,47 +3018,6 @@ void __init vm_area_register_early(struct vm_struct *vm, size_t align)
        kasan_populate_early_vm_area_shadow(vm->addr, vm->size);
 }
 
-static void vmap_init_free_space(void)
-{
-       unsigned long vmap_start = 1;
-       const unsigned long vmap_end = ULONG_MAX;
-       struct vmap_area *busy, *free;
-
-       /*
-        *     B     F     B     B     B     F
-        * -|-----|.....|-----|-----|-----|.....|-
-        *  |           The KVA space           |
-        *  |<--------------------------------->|
-        */
-       list_for_each_entry(busy, &vmap_area_list, list) {
-               if (busy->va_start - vmap_start > 0) {
-                       free = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT);
-                       if (!WARN_ON_ONCE(!free)) {
-                               free->va_start = vmap_start;
-                               free->va_end = busy->va_start;
-
-                               insert_vmap_area_augment(free, NULL,
-                                       &free_vmap_area_root,
-                                               &free_vmap_area_list);
-                       }
-               }
-
-               vmap_start = busy->va_end;
-       }
-
-       if (vmap_end - vmap_start > 0) {
-               free = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT);
-               if (!WARN_ON_ONCE(!free)) {
-                       free->va_start = vmap_start;
-                       free->va_end = vmap_end;
-
-                       insert_vmap_area_augment(free, NULL,
-                               &free_vmap_area_root,
-                                       &free_vmap_area_list);
-               }
-       }
-}
-
 static inline void setup_vmalloc_vm_locked(struct vm_struct *vm,
        struct vmap_area *va, unsigned long flags, const void *caller)
 {
@@ -2623,9 +3031,11 @@ static inline void setup_vmalloc_vm_locked(struct vm_struct *vm,
 static void setup_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va,
                              unsigned long flags, const void *caller)
 {
-       spin_lock(&vmap_area_lock);
+       struct vmap_node *vn = addr_to_node(va->va_start);
+
+       spin_lock(&vn->busy.lock);
        setup_vmalloc_vm_locked(vm, va, flags, caller);
-       spin_unlock(&vmap_area_lock);
+       spin_unlock(&vn->busy.lock);
 }
 
 static void clear_vm_uninitialized_flag(struct vm_struct *vm)
@@ -3813,10 +4223,12 @@ finished:
  */
 long vread_iter(struct iov_iter *iter, const char *addr, size_t count)
 {
+       struct vmap_node *vn;
        struct vmap_area *va;
        struct vm_struct *vm;
        char *vaddr;
        size_t n, size, flags, remains;
+       unsigned long next;
 
        addr = kasan_reset_tag(addr);
 
@@ -3826,16 +4238,15 @@ long vread_iter(struct iov_iter *iter, const char *addr, size_t count)
 
        remains = count;
 
-       spin_lock(&vmap_area_lock);
-       va = find_vmap_area_exceed_addr((unsigned long)addr);
-       if (!va)
+       vn = find_vmap_area_exceed_addr_lock((unsigned long) addr, &va);
+       if (!vn)
                goto finished_zero;
 
        /* no intersects with alive vmap_area */
        if ((unsigned long)addr + remains <= va->va_start)
                goto finished_zero;
 
-       list_for_each_entry_from(va, &vmap_area_list, list) {
+       do {
                size_t copied;
 
                if (remains == 0)
@@ -3850,10 +4261,10 @@ long vread_iter(struct iov_iter *iter, const char *addr, size_t count)
                WARN_ON(flags == VMAP_BLOCK);
 
                if (!vm && !flags)
-                       continue;
+                       goto next_va;
 
                if (vm && (vm->flags & VM_UNINITIALIZED))
-                       continue;
+                       goto next_va;
 
                /* Pair with smp_wmb() in clear_vm_uninitialized_flag() */
                smp_rmb();
@@ -3862,7 +4273,7 @@ long vread_iter(struct iov_iter *iter, const char *addr, size_t count)
                size = vm ? get_vm_area_size(vm) : va_size(va);
 
                if (addr >= vaddr + size)
-                       continue;
+                       goto next_va;
 
                if (addr < vaddr) {
                        size_t to_zero = min_t(size_t, vaddr - addr, remains);
@@ -3891,15 +4302,22 @@ long vread_iter(struct iov_iter *iter, const char *addr, size_t count)
 
                if (copied != n)
                        goto finished;
-       }
+
+       next_va:
+               next = va->va_end;
+               spin_unlock(&vn->busy.lock);
+       } while ((vn = find_vmap_area_exceed_addr_lock(next, &va)));
 
 finished_zero:
-       spin_unlock(&vmap_area_lock);
+       if (vn)
+               spin_unlock(&vn->busy.lock);
+
        /* zero-fill memory holes */
        return count - remains + zero_iter(iter, remains);
 finished:
        /* Nothing remains, or We couldn't copy/zero everything. */
-       spin_unlock(&vmap_area_lock);
+       if (vn)
+               spin_unlock(&vn->busy.lock);
 
        return count - remains;
 }
@@ -4212,9 +4630,8 @@ retry:
                        /* It is a BUG(), but trigger recovery instead. */
                        goto recovery;
 
-               ret = adjust_va_to_fit_type(&free_vmap_area_root,
-                                           &free_vmap_area_list,
-                                           va, start, size);
+               ret = va_clip(&free_vmap_area_root,
+                       &free_vmap_area_list, va, start, size);
                if (WARN_ON_ONCE(unlikely(ret)))
                        /* It is a BUG(), but trigger recovery instead. */
                        goto recovery;
@@ -4234,14 +4651,15 @@ retry:
        }
 
        /* insert all vm's */
-       spin_lock(&vmap_area_lock);
        for (area = 0; area < nr_vms; area++) {
-               insert_vmap_area(vas[area], &vmap_area_root, &vmap_area_list);
+               struct vmap_node *vn = addr_to_node(vas[area]->va_start);
 
+               spin_lock(&vn->busy.lock);
+               insert_vmap_area(vas[area], &vn->busy.root, &vn->busy.head);
                setup_vmalloc_vm_locked(vms[area], vas[area], VM_ALLOC,
                                 pcpu_get_vm_areas);
+               spin_unlock(&vn->busy.lock);
        }
-       spin_unlock(&vmap_area_lock);
 
        /*
         * Mark allocated areas as accessible. Do it now as a best-effort
@@ -4350,60 +4768,39 @@ void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
 #ifdef CONFIG_PRINTK
 bool vmalloc_dump_obj(void *object)
 {
-       void *objp = (void *)PAGE_ALIGN((unsigned long)object);
        const void *caller;
        struct vm_struct *vm;
        struct vmap_area *va;
+       struct vmap_node *vn;
        unsigned long addr;
        unsigned int nr_pages;
 
-       if (!spin_trylock(&vmap_area_lock))
+       addr = PAGE_ALIGN((unsigned long) object);
+       vn = addr_to_node(addr);
+
+       if (!spin_trylock(&vn->busy.lock))
                return false;
-       va = __find_vmap_area((unsigned long)objp, &vmap_area_root);
-       if (!va) {
-               spin_unlock(&vmap_area_lock);
+
+       va = __find_vmap_area(addr, &vn->busy.root);
+       if (!va || !va->vm) {
+               spin_unlock(&vn->busy.lock);
                return false;
        }
 
        vm = va->vm;
-       if (!vm) {
-               spin_unlock(&vmap_area_lock);
-               return false;
-       }
-       addr = (unsigned long)vm->addr;
+       addr = (unsigned long) vm->addr;
        caller = vm->caller;
        nr_pages = vm->nr_pages;
-       spin_unlock(&vmap_area_lock);
+       spin_unlock(&vn->busy.lock);
+
        pr_cont(" %u-page vmalloc region starting at %#lx allocated at %pS\n",
                nr_pages, addr, caller);
+
        return true;
 }
 #endif
 
 #ifdef CONFIG_PROC_FS
-static void *s_start(struct seq_file *m, loff_t *pos)
-       __acquires(&vmap_purge_lock)
-       __acquires(&vmap_area_lock)
-{
-       mutex_lock(&vmap_purge_lock);
-       spin_lock(&vmap_area_lock);
-
-       return seq_list_start(&vmap_area_list, *pos);
-}
-
-static void *s_next(struct seq_file *m, void *p, loff_t *pos)
-{
-       return seq_list_next(p, &vmap_area_list, pos);
-}
-
-static void s_stop(struct seq_file *m, void *p)
-       __releases(&vmap_area_lock)
-       __releases(&vmap_purge_lock)
-{
-       spin_unlock(&vmap_area_lock);
-       mutex_unlock(&vmap_purge_lock);
-}
-
 static void show_numa_info(struct seq_file *m, struct vm_struct *v)
 {
        if (IS_ENABLED(CONFIG_NUMA)) {
@@ -4430,105 +4827,237 @@ static void show_numa_info(struct seq_file *m, struct vm_struct *v)
 
 static void show_purge_info(struct seq_file *m)
 {
+       struct vmap_node *vn;
        struct vmap_area *va;
+       int i;
+
+       for (i = 0; i < nr_vmap_nodes; i++) {
+               vn = &vmap_nodes[i];
 
-       spin_lock(&purge_vmap_area_lock);
-       list_for_each_entry(va, &purge_vmap_area_list, list) {
-               seq_printf(m, "0x%pK-0x%pK %7ld unpurged vm_area\n",
-                       (void *)va->va_start, (void *)va->va_end,
-                       va->va_end - va->va_start);
+               spin_lock(&vn->lazy.lock);
+               list_for_each_entry(va, &vn->lazy.head, list) {
+                       seq_printf(m, "0x%pK-0x%pK %7ld unpurged vm_area\n",
+                               (void *)va->va_start, (void *)va->va_end,
+                               va->va_end - va->va_start);
+               }
+               spin_unlock(&vn->lazy.lock);
        }
-       spin_unlock(&purge_vmap_area_lock);
 }
 
-static int s_show(struct seq_file *m, void *p)
+static int vmalloc_info_show(struct seq_file *m, void *p)
 {
+       struct vmap_node *vn;
        struct vmap_area *va;
        struct vm_struct *v;
+       int i;
 
-       va = list_entry(p, struct vmap_area, list);
+       for (i = 0; i < nr_vmap_nodes; i++) {
+               vn = &vmap_nodes[i];
 
-       if (!va->vm) {
-               if (va->flags & VMAP_RAM)
-                       seq_printf(m, "0x%pK-0x%pK %7ld vm_map_ram\n",
-                               (void *)va->va_start, (void *)va->va_end,
-                               va->va_end - va->va_start);
+               spin_lock(&vn->busy.lock);
+               list_for_each_entry(va, &vn->busy.head, list) {
+                       if (!va->vm) {
+                               if (va->flags & VMAP_RAM)
+                                       seq_printf(m, "0x%pK-0x%pK %7ld vm_map_ram\n",
+                                               (void *)va->va_start, (void *)va->va_end,
+                                               va->va_end - va->va_start);
 
-               goto final;
-       }
+                               continue;
+                       }
 
-       v = va->vm;
+                       v = va->vm;
 
-       seq_printf(m, "0x%pK-0x%pK %7ld",
-               v->addr, v->addr + v->size, v->size);
+                       seq_printf(m, "0x%pK-0x%pK %7ld",
+                               v->addr, v->addr + v->size, v->size);
 
-       if (v->caller)
-               seq_printf(m, " %pS", v->caller);
+                       if (v->caller)
+                               seq_printf(m, " %pS", v->caller);
 
-       if (v->nr_pages)
-               seq_printf(m, " pages=%d", v->nr_pages);
+                       if (v->nr_pages)
+                               seq_printf(m, " pages=%d", v->nr_pages);
 
-       if (v->phys_addr)
-               seq_printf(m, " phys=%pa", &v->phys_addr);
+                       if (v->phys_addr)
+                               seq_printf(m, " phys=%pa", &v->phys_addr);
 
-       if (v->flags & VM_IOREMAP)
-               seq_puts(m, " ioremap");
+                       if (v->flags & VM_IOREMAP)
+                               seq_puts(m, " ioremap");
 
-       if (v->flags & VM_SPARSE)
-               seq_puts(m, " sparse");
+                       if (v->flags & VM_SPARSE)
+                               seq_puts(m, " sparse");
 
-       if (v->flags & VM_ALLOC)
-               seq_puts(m, " vmalloc");
+                       if (v->flags & VM_ALLOC)
+                               seq_puts(m, " vmalloc");
 
-       if (v->flags & VM_MAP)
-               seq_puts(m, " vmap");
+                       if (v->flags & VM_MAP)
+                               seq_puts(m, " vmap");
 
-       if (v->flags & VM_USERMAP)
-               seq_puts(m, " user");
+                       if (v->flags & VM_USERMAP)
+                               seq_puts(m, " user");
 
-       if (v->flags & VM_DMA_COHERENT)
-               seq_puts(m, " dma-coherent");
+                       if (v->flags & VM_DMA_COHERENT)
+                               seq_puts(m, " dma-coherent");
 
-       if (is_vmalloc_addr(v->pages))
-               seq_puts(m, " vpages");
+                       if (is_vmalloc_addr(v->pages))
+                               seq_puts(m, " vpages");
 
-       show_numa_info(m, v);
-       seq_putc(m, '\n');
+                       show_numa_info(m, v);
+                       seq_putc(m, '\n');
+               }
+               spin_unlock(&vn->busy.lock);
+       }
 
        /*
         * As a final step, dump "unpurged" areas.
         */
-final:
-       if (list_is_last(&va->list, &vmap_area_list))
-               show_purge_info(m);
-
+       show_purge_info(m);
        return 0;
 }
 
-static const struct seq_operations vmalloc_op = {
-       .start = s_start,
-       .next = s_next,
-       .stop = s_stop,
-       .show = s_show,
-};
-
 static int __init proc_vmalloc_init(void)
 {
+       void *priv_data = NULL;
+
        if (IS_ENABLED(CONFIG_NUMA))
-               proc_create_seq_private("vmallocinfo", 0400, NULL,
-                               &vmalloc_op,
-                               nr_node_ids * sizeof(unsigned int), NULL);
-       else
-               proc_create_seq("vmallocinfo", 0400, NULL, &vmalloc_op);
+               priv_data = kmalloc(nr_node_ids * sizeof(unsigned int), GFP_KERNEL);
+
+       proc_create_single_data("vmallocinfo",
+               0400, NULL, vmalloc_info_show, priv_data);
+
        return 0;
 }
 module_init(proc_vmalloc_init);
 
 #endif
 
+static void __init vmap_init_free_space(void)
+{
+       unsigned long vmap_start = 1;
+       const unsigned long vmap_end = ULONG_MAX;
+       struct vmap_area *free;
+       struct vm_struct *busy;
+
+       /*
+        *     B     F     B     B     B     F
+        * -|-----|.....|-----|-----|-----|.....|-
+        *  |           The KVA space           |
+        *  |<--------------------------------->|
+        */
+       for (busy = vmlist; busy; busy = busy->next) {
+               if ((unsigned long) busy->addr - vmap_start > 0) {
+                       free = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT);
+                       if (!WARN_ON_ONCE(!free)) {
+                               free->va_start = vmap_start;
+                               free->va_end = (unsigned long) busy->addr;
+
+                               insert_vmap_area_augment(free, NULL,
+                                       &free_vmap_area_root,
+                                               &free_vmap_area_list);
+                       }
+               }
+
+               vmap_start = (unsigned long) busy->addr + busy->size;
+       }
+
+       if (vmap_end - vmap_start > 0) {
+               free = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT);
+               if (!WARN_ON_ONCE(!free)) {
+                       free->va_start = vmap_start;
+                       free->va_end = vmap_end;
+
+                       insert_vmap_area_augment(free, NULL,
+                               &free_vmap_area_root,
+                                       &free_vmap_area_list);
+               }
+       }
+}
+
+static void vmap_init_nodes(void)
+{
+       struct vmap_node *vn;
+       int i, n;
+
+#if BITS_PER_LONG == 64
+       /*
+        * A high threshold of max nodes is fixed and bound to 128,
+        * thus a scale factor is 1 for systems where number of cores
+        * are less or equal to specified threshold.
+        *
+        * As for NUMA-aware notes. For bigger systems, for example
+        * NUMA with multi-sockets, where we can end-up with thousands
+        * of cores in total, a "sub-numa-clustering" should be added.
+        *
+        * In this case a NUMA domain is considered as a single entity
+        * with dedicated sub-nodes in it which describe one group or
+        * set of cores. Therefore a per-domain purging is supposed to
+        * be added as well as a per-domain balancing.
+        */
+       n = clamp_t(unsigned int, num_possible_cpus(), 1, 128);
+
+       if (n > 1) {
+               vn = kmalloc_array(n, sizeof(*vn), GFP_NOWAIT | __GFP_NOWARN);
+               if (vn) {
+                       /* Node partition is 16 pages. */
+                       vmap_zone_size = (1 << 4) * PAGE_SIZE;
+                       nr_vmap_nodes = n;
+                       vmap_nodes = vn;
+               } else {
+                       pr_err("Failed to allocate an array. Disable a node layer\n");
+               }
+       }
+#endif
+
+       for (n = 0; n < nr_vmap_nodes; n++) {
+               vn = &vmap_nodes[n];
+               vn->busy.root = RB_ROOT;
+               INIT_LIST_HEAD(&vn->busy.head);
+               spin_lock_init(&vn->busy.lock);
+
+               vn->lazy.root = RB_ROOT;
+               INIT_LIST_HEAD(&vn->lazy.head);
+               spin_lock_init(&vn->lazy.lock);
+
+               for (i = 0; i < MAX_VA_SIZE_PAGES; i++) {
+                       INIT_LIST_HEAD(&vn->pool[i].head);
+                       WRITE_ONCE(vn->pool[i].len, 0);
+               }
+
+               spin_lock_init(&vn->pool_lock);
+       }
+}
+
+static unsigned long
+vmap_node_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
+{
+       unsigned long count;
+       struct vmap_node *vn;
+       int i, j;
+
+       for (count = 0, i = 0; i < nr_vmap_nodes; i++) {
+               vn = &vmap_nodes[i];
+
+               for (j = 0; j < MAX_VA_SIZE_PAGES; j++)
+                       count += READ_ONCE(vn->pool[j].len);
+       }
+
+       return count ? count : SHRINK_EMPTY;
+}
+
+static unsigned long
+vmap_node_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
+{
+       int i;
+
+       for (i = 0; i < nr_vmap_nodes; i++)
+               decay_va_pool_node(&vmap_nodes[i], true);
+
+       return SHRINK_STOP;
+}
+
 void __init vmalloc_init(void)
 {
+       struct shrinker *vmap_node_shrinker;
        struct vmap_area *va;
+       struct vmap_node *vn;
        struct vm_struct *tmp;
        int i;
 
@@ -4550,6 +5079,11 @@ void __init vmalloc_init(void)
                xa_init(&vbq->vmap_blocks);
        }
 
+       /*
+        * Setup nodes before importing vmlist.
+        */
+       vmap_init_nodes();
+
        /* Import existing vmlist entries. */
        for (tmp = vmlist; tmp; tmp = tmp->next) {
                va = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT);
@@ -4559,7 +5093,9 @@ void __init vmalloc_init(void)
                va->va_start = (unsigned long)tmp->addr;
                va->va_end = va->va_start + tmp->size;
                va->vm = tmp;
-               insert_vmap_area(va, &vmap_area_root, &vmap_area_list);
+
+               vn = addr_to_node(va->va_start);
+               insert_vmap_area(va, &vn->busy.root, &vn->busy.head);
        }
 
        /*
@@ -4567,4 +5103,14 @@ void __init vmalloc_init(void)
         */
        vmap_init_free_space();
        vmap_initialized = true;
+
+       vmap_node_shrinker = shrinker_alloc(0, "vmap-node");
+       if (!vmap_node_shrinker) {
+               pr_err("Failed to allocate vmap-node shrinker!\n");
+               return;
+       }
+
+       vmap_node_shrinker->count_objects = vmap_node_shrink_count;
+       vmap_node_shrinker->scan_objects = vmap_node_shrink_scan;
+       shrinker_register(vmap_node_shrinker);
 }