Merge tag 'acpi-5.17-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael...
[linux-2.6-microblaze.git] / mm / zsmalloc.c
index 6483062..9152fbd 100644 (file)
 
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
+/*
+ * lock ordering:
+ *     page_lock
+ *     pool->migrate_lock
+ *     class->lock
+ *     zspage->lock
+ */
+
 #include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/sched.h>
@@ -57,6 +65,7 @@
 #include <linux/wait.h>
 #include <linux/pagemap.h>
 #include <linux/fs.h>
+#include <linux/local_lock.h>
 
 #define ZSPAGE_MAGIC   0x58
 
 
 #define _PFN_BITS              (MAX_POSSIBLE_PHYSMEM_BITS - PAGE_SHIFT)
 
-/*
- * Memory for allocating for handle keeps object position by
- * encoding <page, obj_idx> and the encoded value has a room
- * in least bit(ie, look at obj_to_location).
- * We use the bit to synchronize between object access by
- * user and migration.
- */
-#define HANDLE_PIN_BIT 0
-
 /*
  * Head in allocated object should have OBJ_ALLOCATED_TAG
  * to identify the object was allocated or not.
@@ -254,11 +254,9 @@ struct zs_pool {
 #ifdef CONFIG_COMPACTION
        struct inode *inode;
        struct work_struct free_work;
-       /* A wait queue for when migration races with async_free_zspage() */
-       struct wait_queue_head migration_wait;
-       atomic_long_t isolated_pages;
-       bool destroying;
 #endif
+       /* protect page/zspage migration */
+       rwlock_t migrate_lock;
 };
 
 struct zspage {
@@ -279,6 +277,7 @@ struct zspage {
 };
 
 struct mapping_area {
+       local_lock_t lock;
        char *vm_buf; /* copy buffer for objects that span pages */
        char *vm_addr; /* address of kmap_atomic()'ed pages */
        enum zs_mapmode vm_mm; /* mapping mode */
@@ -301,6 +300,9 @@ static void zs_unregister_migration(struct zs_pool *pool);
 static void migrate_lock_init(struct zspage *zspage);
 static void migrate_read_lock(struct zspage *zspage);
 static void migrate_read_unlock(struct zspage *zspage);
+static void migrate_write_lock(struct zspage *zspage);
+static void migrate_write_lock_nested(struct zspage *zspage);
+static void migrate_write_unlock(struct zspage *zspage);
 static void kick_deferred_free(struct zs_pool *pool);
 static void init_deferred_free(struct zs_pool *pool);
 static void SetZsPageMovable(struct zs_pool *pool, struct zspage *zspage);
@@ -312,6 +314,9 @@ static void zs_unregister_migration(struct zs_pool *pool) {}
 static void migrate_lock_init(struct zspage *zspage) {}
 static void migrate_read_lock(struct zspage *zspage) {}
 static void migrate_read_unlock(struct zspage *zspage) {}
+static void migrate_write_lock(struct zspage *zspage) {}
+static void migrate_write_lock_nested(struct zspage *zspage) {}
+static void migrate_write_unlock(struct zspage *zspage) {}
 static void kick_deferred_free(struct zs_pool *pool) {}
 static void init_deferred_free(struct zs_pool *pool) {}
 static void SetZsPageMovable(struct zs_pool *pool, struct zspage *zspage) {}
@@ -363,14 +368,10 @@ static void cache_free_zspage(struct zs_pool *pool, struct zspage *zspage)
        kmem_cache_free(pool->zspage_cachep, zspage);
 }
 
+/* class->lock(which owns the handle) synchronizes races */
 static void record_obj(unsigned long handle, unsigned long obj)
 {
-       /*
-        * lsb of @obj represents handle lock while other bits
-        * represent object value the handle is pointing so
-        * updating shouldn't do store tearing.
-        */
-       WRITE_ONCE(*(unsigned long *)handle, obj);
+       *(unsigned long *)handle = obj;
 }
 
 /* zpool driver */
@@ -452,12 +453,9 @@ MODULE_ALIAS("zpool-zsmalloc");
 #endif /* CONFIG_ZPOOL */
 
 /* per-cpu VM mapping areas for zspage accesses that cross page boundaries */
-static DEFINE_PER_CPU(struct mapping_area, zs_map_area);
-
-static bool is_zspage_isolated(struct zspage *zspage)
-{
-       return zspage->isolated;
-}
+static DEFINE_PER_CPU(struct mapping_area, zs_map_area) = {
+       .lock   = INIT_LOCAL_LOCK(lock),
+};
 
 static __maybe_unused int is_first_page(struct page *page)
 {
@@ -744,7 +742,6 @@ static void remove_zspage(struct size_class *class,
                                enum fullness_group fullness)
 {
        VM_BUG_ON(list_empty(&class->fullness_list[fullness]));
-       VM_BUG_ON(is_zspage_isolated(zspage));
 
        list_del_init(&zspage->list);
        class_stat_dec(class, fullness, 1);
@@ -770,13 +767,9 @@ static enum fullness_group fix_fullness_group(struct size_class *class,
        if (newfg == currfg)
                goto out;
 
-       if (!is_zspage_isolated(zspage)) {
-               remove_zspage(class, zspage, currfg);
-               insert_zspage(class, zspage, newfg);
-       }
-
+       remove_zspage(class, zspage, currfg);
+       insert_zspage(class, zspage, newfg);
        set_zspage_mapping(zspage, class_idx, newfg);
-
 out:
        return newfg;
 }
@@ -894,26 +887,6 @@ static bool obj_allocated(struct page *page, void *obj, unsigned long *phandle)
        return true;
 }
 
-static inline int testpin_tag(unsigned long handle)
-{
-       return bit_spin_is_locked(HANDLE_PIN_BIT, (unsigned long *)handle);
-}
-
-static inline int trypin_tag(unsigned long handle)
-{
-       return bit_spin_trylock(HANDLE_PIN_BIT, (unsigned long *)handle);
-}
-
-static void pin_tag(unsigned long handle) __acquires(bitlock)
-{
-       bit_spin_lock(HANDLE_PIN_BIT, (unsigned long *)handle);
-}
-
-static void unpin_tag(unsigned long handle) __releases(bitlock)
-{
-       bit_spin_unlock(HANDLE_PIN_BIT, (unsigned long *)handle);
-}
-
 static void reset_page(struct page *page)
 {
        __ClearPageMovable(page);
@@ -982,6 +955,11 @@ static void free_zspage(struct zs_pool *pool, struct size_class *class,
        VM_BUG_ON(get_zspage_inuse(zspage));
        VM_BUG_ON(list_empty(&zspage->list));
 
+       /*
+        * Since zs_free couldn't be sleepable, this function cannot call
+        * lock_page. The page locks trylock_zspage got will be released
+        * by __free_zspage.
+        */
        if (!trylock_zspage(zspage)) {
                kick_deferred_free(pool);
                return;
@@ -1277,20 +1255,26 @@ void *zs_map_object(struct zs_pool *pool, unsigned long handle,
         */
        BUG_ON(in_interrupt());
 
-       /* From now on, migration cannot move the object */
-       pin_tag(handle);
-
+       /* It guarantees it can get zspage from handle safely */
+       read_lock(&pool->migrate_lock);
        obj = handle_to_obj(handle);
        obj_to_location(obj, &page, &obj_idx);
        zspage = get_zspage(page);
 
-       /* migration cannot move any subpage in this zspage */
+       /*
+        * migration cannot move any zpages in this zspage. Here, class->lock
+        * is too heavy since callers would take some time until they calls
+        * zs_unmap_object API so delegate the locking from class to zspage
+        * which is smaller granularity.
+        */
        migrate_read_lock(zspage);
+       read_unlock(&pool->migrate_lock);
 
        class = zspage_class(pool, zspage);
        off = (class->size * obj_idx) & ~PAGE_MASK;
 
-       area = &get_cpu_var(zs_map_area);
+       local_lock(&zs_map_area.lock);
+       area = this_cpu_ptr(&zs_map_area);
        area->vm_mm = mm;
        if (off + class->size <= PAGE_SIZE) {
                /* this object is contained entirely within a page */
@@ -1341,10 +1325,9 @@ void zs_unmap_object(struct zs_pool *pool, unsigned long handle)
 
                __zs_unmap_object(area, pages, off, class->size);
        }
-       put_cpu_var(zs_map_area);
+       local_unlock(&zs_map_area.lock);
 
        migrate_read_unlock(zspage);
-       unpin_tag(handle);
 }
 EXPORT_SYMBOL_GPL(zs_unmap_object);
 
@@ -1438,6 +1421,7 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp)
        size += ZS_HANDLE_SIZE;
        class = pool->size_class[get_size_class_index(size)];
 
+       /* class->lock effectively protects the zpage migration */
        spin_lock(&class->lock);
        zspage = find_get_zspage(class);
        if (likely(zspage)) {
@@ -1511,37 +1495,31 @@ void zs_free(struct zs_pool *pool, unsigned long handle)
        unsigned long obj;
        struct size_class *class;
        enum fullness_group fullness;
-       bool isolated;
 
        if (unlikely(!handle))
                return;
 
-       pin_tag(handle);
+       /*
+        * The pool->migrate_lock protects the race with zpage's migration
+        * so it's safe to get the page from handle.
+        */
+       read_lock(&pool->migrate_lock);
        obj = handle_to_obj(handle);
        obj_to_page(obj, &f_page);
        zspage = get_zspage(f_page);
-
-       migrate_read_lock(zspage);
        class = zspage_class(pool, zspage);
-
        spin_lock(&class->lock);
+       read_unlock(&pool->migrate_lock);
+
        obj_free(class->size, obj);
        class_stat_dec(class, OBJ_USED, 1);
        fullness = fix_fullness_group(class, zspage);
-       if (fullness != ZS_EMPTY) {
-               migrate_read_unlock(zspage);
+       if (fullness != ZS_EMPTY)
                goto out;
-       }
 
-       isolated = is_zspage_isolated(zspage);
-       migrate_read_unlock(zspage);
-       /* If zspage is isolated, zs_page_putback will free the zspage */
-       if (likely(!isolated))
-               free_zspage(pool, class, zspage);
+       free_zspage(pool, class, zspage);
 out:
-
        spin_unlock(&class->lock);
-       unpin_tag(handle);
        cache_free_handle(pool, handle);
 }
 EXPORT_SYMBOL_GPL(zs_free);
@@ -1625,11 +1603,8 @@ static unsigned long find_alloced_obj(struct size_class *class,
        offset += class->size * index;
 
        while (offset < PAGE_SIZE) {
-               if (obj_allocated(page, addr + offset, &handle)) {
-                       if (trypin_tag(handle))
-                               break;
-                       handle = 0;
-               }
+               if (obj_allocated(page, addr + offset, &handle))
+                       break;
 
                offset += class->size;
                index++;
@@ -1675,7 +1650,6 @@ static int migrate_zspage(struct zs_pool *pool, struct size_class *class,
 
                /* Stop if there is no more space */
                if (zspage_full(class, get_zspage(d_page))) {
-                       unpin_tag(handle);
                        ret = -ENOMEM;
                        break;
                }
@@ -1684,15 +1658,7 @@ static int migrate_zspage(struct zs_pool *pool, struct size_class *class,
                free_obj = obj_malloc(pool, get_zspage(d_page), handle);
                zs_object_copy(class, free_obj, used_obj);
                obj_idx++;
-               /*
-                * record_obj updates handle's value to free_obj and it will
-                * invalidate lock bit(ie, HANDLE_PIN_BIT) of handle, which
-                * breaks synchronization using pin_tag(e,g, zs_free) so
-                * let's keep the lock bit.
-                */
-               free_obj |= BIT(HANDLE_PIN_BIT);
                record_obj(handle, free_obj);
-               unpin_tag(handle);
                obj_free(class->size, used_obj);
        }
 
@@ -1718,7 +1684,6 @@ static struct zspage *isolate_zspage(struct size_class *class, bool source)
                zspage = list_first_entry_or_null(&class->fullness_list[fg[i]],
                                                        struct zspage, list);
                if (zspage) {
-                       VM_BUG_ON(is_zspage_isolated(zspage));
                        remove_zspage(class, zspage, fg[i]);
                        return zspage;
                }
@@ -1739,8 +1704,6 @@ static enum fullness_group putback_zspage(struct size_class *class,
 {
        enum fullness_group fullness;
 
-       VM_BUG_ON(is_zspage_isolated(zspage));
-
        fullness = get_fullness_group(class, zspage);
        insert_zspage(class, zspage, fullness);
        set_zspage_mapping(zspage, class->index, fullness);
@@ -1809,6 +1772,11 @@ static void migrate_write_lock(struct zspage *zspage)
        write_lock(&zspage->lock);
 }
 
+static void migrate_write_lock_nested(struct zspage *zspage)
+{
+       write_lock_nested(&zspage->lock, SINGLE_DEPTH_NESTING);
+}
+
 static void migrate_write_unlock(struct zspage *zspage)
 {
        write_unlock(&zspage->lock);
@@ -1822,35 +1790,10 @@ static void inc_zspage_isolation(struct zspage *zspage)
 
 static void dec_zspage_isolation(struct zspage *zspage)
 {
+       VM_BUG_ON(zspage->isolated == 0);
        zspage->isolated--;
 }
 
-static void putback_zspage_deferred(struct zs_pool *pool,
-                                   struct size_class *class,
-                                   struct zspage *zspage)
-{
-       enum fullness_group fg;
-
-       fg = putback_zspage(class, zspage);
-       if (fg == ZS_EMPTY)
-               schedule_work(&pool->free_work);
-
-}
-
-static inline void zs_pool_dec_isolated(struct zs_pool *pool)
-{
-       VM_BUG_ON(atomic_long_read(&pool->isolated_pages) <= 0);
-       atomic_long_dec(&pool->isolated_pages);
-       /*
-        * Checking pool->destroying must happen after atomic_long_dec()
-        * for pool->isolated_pages above. Paired with the smp_mb() in
-        * zs_unregister_migration().
-        */
-       smp_mb__after_atomic();
-       if (atomic_long_read(&pool->isolated_pages) == 0 && pool->destroying)
-               wake_up_all(&pool->migration_wait);
-}
-
 static void replace_sub_page(struct size_class *class, struct zspage *zspage,
                                struct page *newpage, struct page *oldpage)
 {
@@ -1876,10 +1819,7 @@ static void replace_sub_page(struct size_class *class, struct zspage *zspage,
 
 static bool zs_page_isolate(struct page *page, isolate_mode_t mode)
 {
-       struct zs_pool *pool;
-       struct size_class *class;
        struct zspage *zspage;
-       struct address_space *mapping;
 
        /*
         * Page is locked so zspage couldn't be destroyed. For detail, look at
@@ -1889,39 +1829,9 @@ static bool zs_page_isolate(struct page *page, isolate_mode_t mode)
        VM_BUG_ON_PAGE(PageIsolated(page), page);
 
        zspage = get_zspage(page);
-
-       mapping = page_mapping(page);
-       pool = mapping->private_data;
-
-       class = zspage_class(pool, zspage);
-
-       spin_lock(&class->lock);
-       if (get_zspage_inuse(zspage) == 0) {
-               spin_unlock(&class->lock);
-               return false;
-       }
-
-       /* zspage is isolated for object migration */
-       if (list_empty(&zspage->list) && !is_zspage_isolated(zspage)) {
-               spin_unlock(&class->lock);
-               return false;
-       }
-
-       /*
-        * If this is first time isolation for the zspage, isolate zspage from
-        * size_class to prevent further object allocation from the zspage.
-        */
-       if (!list_empty(&zspage->list) && !is_zspage_isolated(zspage)) {
-               enum fullness_group fullness;
-               unsigned int class_idx;
-
-               get_zspage_mapping(zspage, &class_idx, &fullness);
-               atomic_long_inc(&pool->isolated_pages);
-               remove_zspage(class, zspage, fullness);
-       }
-
+       migrate_write_lock(zspage);
        inc_zspage_isolation(zspage);
-       spin_unlock(&class->lock);
+       migrate_write_unlock(zspage);
 
        return true;
 }
@@ -1934,11 +1844,10 @@ static int zs_page_migrate(struct address_space *mapping, struct page *newpage,
        struct zspage *zspage;
        struct page *dummy;
        void *s_addr, *d_addr, *addr;
-       int offset, pos;
+       int offset;
        unsigned long handle;
        unsigned long old_obj, new_obj;
        unsigned int obj_idx;
-       int ret = -EAGAIN;
 
        /*
         * We cannot support the _NO_COPY case here, because copy needs to
@@ -1951,32 +1860,25 @@ static int zs_page_migrate(struct address_space *mapping, struct page *newpage,
        VM_BUG_ON_PAGE(!PageMovable(page), page);
        VM_BUG_ON_PAGE(!PageIsolated(page), page);
 
-       zspage = get_zspage(page);
-
-       /* Concurrent compactor cannot migrate any subpage in zspage */
-       migrate_write_lock(zspage);
        pool = mapping->private_data;
+
+       /*
+        * The pool migrate_lock protects the race between zpage migration
+        * and zs_free.
+        */
+       write_lock(&pool->migrate_lock);
+       zspage = get_zspage(page);
        class = zspage_class(pool, zspage);
-       offset = get_first_obj_offset(page);
 
+       /*
+        * the class lock protects zpage alloc/free in the zspage.
+        */
        spin_lock(&class->lock);
-       if (!get_zspage_inuse(zspage)) {
-               /*
-                * Set "offset" to end of the page so that every loops
-                * skips unnecessary object scanning.
-                */
-               offset = PAGE_SIZE;
-       }
+       /* the migrate_write_lock protects zpage access via zs_map_object */
+       migrate_write_lock(zspage);
 
-       pos = offset;
+       offset = get_first_obj_offset(page);
        s_addr = kmap_atomic(page);
-       while (pos < PAGE_SIZE) {
-               if (obj_allocated(page, s_addr + pos, &handle)) {
-                       if (!trypin_tag(handle))
-                               goto unpin_objects;
-               }
-               pos += class->size;
-       }
 
        /*
         * Here, any user cannot access all objects in the zspage so let's move.
@@ -1985,40 +1887,30 @@ static int zs_page_migrate(struct address_space *mapping, struct page *newpage,
        memcpy(d_addr, s_addr, PAGE_SIZE);
        kunmap_atomic(d_addr);
 
-       for (addr = s_addr + offset; addr < s_addr + pos;
+       for (addr = s_addr + offset; addr < s_addr + PAGE_SIZE;
                                        addr += class->size) {
                if (obj_allocated(page, addr, &handle)) {
-                       BUG_ON(!testpin_tag(handle));
 
                        old_obj = handle_to_obj(handle);
                        obj_to_location(old_obj, &dummy, &obj_idx);
                        new_obj = (unsigned long)location_to_obj(newpage,
                                                                obj_idx);
-                       new_obj |= BIT(HANDLE_PIN_BIT);
                        record_obj(handle, new_obj);
                }
        }
+       kunmap_atomic(s_addr);
 
        replace_sub_page(class, zspage, newpage, page);
-       get_page(newpage);
-
-       dec_zspage_isolation(zspage);
-
        /*
-        * Page migration is done so let's putback isolated zspage to
-        * the list if @page is final isolated subpage in the zspage.
+        * Since we complete the data copy and set up new zspage structure,
+        * it's okay to release migration_lock.
         */
-       if (!is_zspage_isolated(zspage)) {
-               /*
-                * We cannot race with zs_destroy_pool() here because we wait
-                * for isolation to hit zero before we start destroying.
-                * Also, we ensure that everyone can see pool->destroying before
-                * we start waiting.
-                */
-               putback_zspage_deferred(pool, class, zspage);
-               zs_pool_dec_isolated(pool);
-       }
+       write_unlock(&pool->migrate_lock);
+       spin_unlock(&class->lock);
+       dec_zspage_isolation(zspage);
+       migrate_write_unlock(zspage);
 
+       get_page(newpage);
        if (page_zone(newpage) != page_zone(page)) {
                dec_zone_page_state(page, NR_ZSPAGES);
                inc_zone_page_state(newpage, NR_ZSPAGES);
@@ -2026,50 +1918,21 @@ static int zs_page_migrate(struct address_space *mapping, struct page *newpage,
 
        reset_page(page);
        put_page(page);
-       page = newpage;
-
-       ret = MIGRATEPAGE_SUCCESS;
-unpin_objects:
-       for (addr = s_addr + offset; addr < s_addr + pos;
-                                               addr += class->size) {
-               if (obj_allocated(page, addr, &handle)) {
-                       BUG_ON(!testpin_tag(handle));
-                       unpin_tag(handle);
-               }
-       }
-       kunmap_atomic(s_addr);
-       spin_unlock(&class->lock);
-       migrate_write_unlock(zspage);
 
-       return ret;
+       return MIGRATEPAGE_SUCCESS;
 }
 
 static void zs_page_putback(struct page *page)
 {
-       struct zs_pool *pool;
-       struct size_class *class;
-       struct address_space *mapping;
        struct zspage *zspage;
 
        VM_BUG_ON_PAGE(!PageMovable(page), page);
        VM_BUG_ON_PAGE(!PageIsolated(page), page);
 
        zspage = get_zspage(page);
-       mapping = page_mapping(page);
-       pool = mapping->private_data;
-       class = zspage_class(pool, zspage);
-
-       spin_lock(&class->lock);
+       migrate_write_lock(zspage);
        dec_zspage_isolation(zspage);
-       if (!is_zspage_isolated(zspage)) {
-               /*
-                * Due to page_lock, we cannot free zspage immediately
-                * so let's defer.
-                */
-               putback_zspage_deferred(pool, class, zspage);
-               zs_pool_dec_isolated(pool);
-       }
-       spin_unlock(&class->lock);
+       migrate_write_unlock(zspage);
 }
 
 static const struct address_space_operations zsmalloc_aops = {
@@ -2091,36 +1954,8 @@ static int zs_register_migration(struct zs_pool *pool)
        return 0;
 }
 
-static bool pool_isolated_are_drained(struct zs_pool *pool)
-{
-       return atomic_long_read(&pool->isolated_pages) == 0;
-}
-
-/* Function for resolving migration */
-static void wait_for_isolated_drain(struct zs_pool *pool)
-{
-
-       /*
-        * We're in the process of destroying the pool, so there are no
-        * active allocations. zs_page_isolate() fails for completely free
-        * zspages, so we need only wait for the zs_pool's isolated
-        * count to hit zero.
-        */
-       wait_event(pool->migration_wait,
-                  pool_isolated_are_drained(pool));
-}
-
 static void zs_unregister_migration(struct zs_pool *pool)
 {
-       pool->destroying = true;
-       /*
-        * We need a memory barrier here to ensure global visibility of
-        * pool->destroying. Thus pool->isolated pages will either be 0 in which
-        * case we don't care, or it will be > 0 and pool->destroying will
-        * ensure that we wake up once isolation hits 0.
-        */
-       smp_mb();
-       wait_for_isolated_drain(pool); /* This can block */
        flush_work(&pool->free_work);
        iput(pool->inode);
 }
@@ -2150,7 +1985,6 @@ static void async_free_zspage(struct work_struct *work)
                spin_unlock(&class->lock);
        }
 
-
        list_for_each_entry_safe(zspage, tmp, &free_pages, list) {
                list_del(&zspage->list);
                lock_zspage(zspage);
@@ -2214,8 +2048,13 @@ static unsigned long __zs_compact(struct zs_pool *pool,
        struct zspage *dst_zspage = NULL;
        unsigned long pages_freed = 0;
 
+       /* protect the race between zpage migration and zs_free */
+       write_lock(&pool->migrate_lock);
+       /* protect zpage allocation/free */
        spin_lock(&class->lock);
        while ((src_zspage = isolate_zspage(class, true))) {
+               /* protect someone accessing the zspage(i.e., zs_map_object) */
+               migrate_write_lock(src_zspage);
 
                if (!zs_can_compact(class))
                        break;
@@ -2224,6 +2063,8 @@ static unsigned long __zs_compact(struct zs_pool *pool,
                cc.s_page = get_first_page(src_zspage);
 
                while ((dst_zspage = isolate_zspage(class, false))) {
+                       migrate_write_lock_nested(dst_zspage);
+
                        cc.d_page = get_first_page(dst_zspage);
                        /*
                         * If there is no more space in dst_page, resched
@@ -2233,6 +2074,10 @@ static unsigned long __zs_compact(struct zs_pool *pool,
                                break;
 
                        putback_zspage(class, dst_zspage);
+                       migrate_write_unlock(dst_zspage);
+                       dst_zspage = NULL;
+                       if (rwlock_is_contended(&pool->migrate_lock))
+                               break;
                }
 
                /* Stop if we couldn't find slot */
@@ -2240,19 +2085,28 @@ static unsigned long __zs_compact(struct zs_pool *pool,
                        break;
 
                putback_zspage(class, dst_zspage);
+               migrate_write_unlock(dst_zspage);
+
                if (putback_zspage(class, src_zspage) == ZS_EMPTY) {
+                       migrate_write_unlock(src_zspage);
                        free_zspage(pool, class, src_zspage);
                        pages_freed += class->pages_per_zspage;
-               }
+               } else
+                       migrate_write_unlock(src_zspage);
                spin_unlock(&class->lock);
+               write_unlock(&pool->migrate_lock);
                cond_resched();
+               write_lock(&pool->migrate_lock);
                spin_lock(&class->lock);
        }
 
-       if (src_zspage)
+       if (src_zspage) {
                putback_zspage(class, src_zspage);
+               migrate_write_unlock(src_zspage);
+       }
 
        spin_unlock(&class->lock);
+       write_unlock(&pool->migrate_lock);
 
        return pages_freed;
 }
@@ -2358,15 +2212,12 @@ struct zs_pool *zs_create_pool(const char *name)
                return NULL;
 
        init_deferred_free(pool);
+       rwlock_init(&pool->migrate_lock);
 
        pool->name = kstrdup(name, GFP_KERNEL);
        if (!pool->name)
                goto err;
 
-#ifdef CONFIG_COMPACTION
-       init_waitqueue_head(&pool->migration_wait);
-#endif
-
        if (create_cache(pool))
                goto err;