lib/zlib: add s390 hardware support for kernel zlib_deflate
[linux-2.6-microblaze.git] / mm / z3fold.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * z3fold.c
4  *
5  * Author: Vitaly Wool <vitaly.wool@konsulko.com>
6  * Copyright (C) 2016, Sony Mobile Communications Inc.
7  *
8  * This implementation is based on zbud written by Seth Jennings.
9  *
10  * z3fold is an special purpose allocator for storing compressed pages. It
11  * can store up to three compressed pages per page which improves the
12  * compression ratio of zbud while retaining its main concepts (e. g. always
13  * storing an integral number of objects per page) and simplicity.
14  * It still has simple and deterministic reclaim properties that make it
15  * preferable to a higher density approach (with no requirement on integral
16  * number of object per page) when reclaim is used.
17  *
18  * As in zbud, pages are divided into "chunks".  The size of the chunks is
19  * fixed at compile time and is determined by NCHUNKS_ORDER below.
20  *
21  * z3fold doesn't export any API and is meant to be used via zpool API.
22  */
23
24 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
25
26 #include <linux/atomic.h>
27 #include <linux/sched.h>
28 #include <linux/cpumask.h>
29 #include <linux/list.h>
30 #include <linux/mm.h>
31 #include <linux/module.h>
32 #include <linux/page-flags.h>
33 #include <linux/migrate.h>
34 #include <linux/node.h>
35 #include <linux/compaction.h>
36 #include <linux/percpu.h>
37 #include <linux/mount.h>
38 #include <linux/pseudo_fs.h>
39 #include <linux/fs.h>
40 #include <linux/preempt.h>
41 #include <linux/workqueue.h>
42 #include <linux/slab.h>
43 #include <linux/spinlock.h>
44 #include <linux/rwlock.h>
45 #include <linux/zpool.h>
46 #include <linux/magic.h>
47
48 /*
49  * NCHUNKS_ORDER determines the internal allocation granularity, effectively
50  * adjusting internal fragmentation.  It also determines the number of
51  * freelists maintained in each pool. NCHUNKS_ORDER of 6 means that the
52  * allocation granularity will be in chunks of size PAGE_SIZE/64. Some chunks
53  * in the beginning of an allocated page are occupied by z3fold header, so
54  * NCHUNKS will be calculated to 63 (or 62 in case CONFIG_DEBUG_SPINLOCK=y),
55  * which shows the max number of free chunks in z3fold page, also there will
56  * be 63, or 62, respectively, freelists per pool.
57  */
58 #define NCHUNKS_ORDER   6
59
60 #define CHUNK_SHIFT     (PAGE_SHIFT - NCHUNKS_ORDER)
61 #define CHUNK_SIZE      (1 << CHUNK_SHIFT)
62 #define ZHDR_SIZE_ALIGNED round_up(sizeof(struct z3fold_header), CHUNK_SIZE)
63 #define ZHDR_CHUNKS     (ZHDR_SIZE_ALIGNED >> CHUNK_SHIFT)
64 #define TOTAL_CHUNKS    (PAGE_SIZE >> CHUNK_SHIFT)
65 #define NCHUNKS         ((PAGE_SIZE - ZHDR_SIZE_ALIGNED) >> CHUNK_SHIFT)
66
67 #define BUDDY_MASK      (0x3)
68 #define BUDDY_SHIFT     2
69 #define SLOTS_ALIGN     (0x40)
70
71 /*****************
72  * Structures
73 *****************/
74 struct z3fold_pool;
75 struct z3fold_ops {
76         int (*evict)(struct z3fold_pool *pool, unsigned long handle);
77 };
78
79 enum buddy {
80         HEADLESS = 0,
81         FIRST,
82         MIDDLE,
83         LAST,
84         BUDDIES_MAX = LAST
85 };
86
87 struct z3fold_buddy_slots {
88         /*
89          * we are using BUDDY_MASK in handle_to_buddy etc. so there should
90          * be enough slots to hold all possible variants
91          */
92         unsigned long slot[BUDDY_MASK + 1];
93         unsigned long pool; /* back link + flags */
94         rwlock_t lock;
95 };
96 #define HANDLE_FLAG_MASK        (0x03)
97
98 /*
99  * struct z3fold_header - z3fold page metadata occupying first chunks of each
100  *                      z3fold page, except for HEADLESS pages
101  * @buddy:              links the z3fold page into the relevant list in the
102  *                      pool
103  * @page_lock:          per-page lock
104  * @refcount:           reference count for the z3fold page
105  * @work:               work_struct for page layout optimization
106  * @slots:              pointer to the structure holding buddy slots
107  * @pool:               pointer to the containing pool
108  * @cpu:                CPU which this page "belongs" to
109  * @first_chunks:       the size of the first buddy in chunks, 0 if free
110  * @middle_chunks:      the size of the middle buddy in chunks, 0 if free
111  * @last_chunks:        the size of the last buddy in chunks, 0 if free
112  * @first_num:          the starting number (for the first handle)
113  * @mapped_count:       the number of objects currently mapped
114  */
115 struct z3fold_header {
116         struct list_head buddy;
117         spinlock_t page_lock;
118         struct kref refcount;
119         struct work_struct work;
120         struct z3fold_buddy_slots *slots;
121         struct z3fold_pool *pool;
122         short cpu;
123         unsigned short first_chunks;
124         unsigned short middle_chunks;
125         unsigned short last_chunks;
126         unsigned short start_middle;
127         unsigned short first_num:2;
128         unsigned short mapped_count:2;
129         unsigned short foreign_handles:2;
130 };
131
132 /**
133  * struct z3fold_pool - stores metadata for each z3fold pool
134  * @name:       pool name
135  * @lock:       protects pool unbuddied/lru lists
136  * @stale_lock: protects pool stale page list
137  * @unbuddied:  per-cpu array of lists tracking z3fold pages that contain 2-
138  *              buddies; the list each z3fold page is added to depends on
139  *              the size of its free region.
140  * @lru:        list tracking the z3fold pages in LRU order by most recently
141  *              added buddy.
142  * @stale:      list of pages marked for freeing
143  * @pages_nr:   number of z3fold pages in the pool.
144  * @c_handle:   cache for z3fold_buddy_slots allocation
145  * @ops:        pointer to a structure of user defined operations specified at
146  *              pool creation time.
147  * @compact_wq: workqueue for page layout background optimization
148  * @release_wq: workqueue for safe page release
149  * @work:       work_struct for safe page release
150  * @inode:      inode for z3fold pseudo filesystem
151  *
152  * This structure is allocated at pool creation time and maintains metadata
153  * pertaining to a particular z3fold pool.
154  */
155 struct z3fold_pool {
156         const char *name;
157         spinlock_t lock;
158         spinlock_t stale_lock;
159         struct list_head *unbuddied;
160         struct list_head lru;
161         struct list_head stale;
162         atomic64_t pages_nr;
163         struct kmem_cache *c_handle;
164         const struct z3fold_ops *ops;
165         struct zpool *zpool;
166         const struct zpool_ops *zpool_ops;
167         struct workqueue_struct *compact_wq;
168         struct workqueue_struct *release_wq;
169         struct work_struct work;
170         struct inode *inode;
171 };
172
173 /*
174  * Internal z3fold page flags
175  */
176 enum z3fold_page_flags {
177         PAGE_HEADLESS = 0,
178         MIDDLE_CHUNK_MAPPED,
179         NEEDS_COMPACTING,
180         PAGE_STALE,
181         PAGE_CLAIMED, /* by either reclaim or free */
182 };
183
184 /*
185  * handle flags, go under HANDLE_FLAG_MASK
186  */
187 enum z3fold_handle_flags {
188         HANDLES_ORPHANED = 0,
189 };
190
191 /*
192  * Forward declarations
193  */
194 static struct z3fold_header *__z3fold_alloc(struct z3fold_pool *, size_t, bool);
195 static void compact_page_work(struct work_struct *w);
196
197 /*****************
198  * Helpers
199 *****************/
200
201 /* Converts an allocation size in bytes to size in z3fold chunks */
202 static int size_to_chunks(size_t size)
203 {
204         return (size + CHUNK_SIZE - 1) >> CHUNK_SHIFT;
205 }
206
207 #define for_each_unbuddied_list(_iter, _begin) \
208         for ((_iter) = (_begin); (_iter) < NCHUNKS; (_iter)++)
209
210 static inline struct z3fold_buddy_slots *alloc_slots(struct z3fold_pool *pool,
211                                                         gfp_t gfp)
212 {
213         struct z3fold_buddy_slots *slots;
214
215         slots = kmem_cache_alloc(pool->c_handle,
216                                  (gfp & ~(__GFP_HIGHMEM | __GFP_MOVABLE)));
217
218         if (slots) {
219                 memset(slots->slot, 0, sizeof(slots->slot));
220                 slots->pool = (unsigned long)pool;
221                 rwlock_init(&slots->lock);
222         }
223
224         return slots;
225 }
226
227 static inline struct z3fold_pool *slots_to_pool(struct z3fold_buddy_slots *s)
228 {
229         return (struct z3fold_pool *)(s->pool & ~HANDLE_FLAG_MASK);
230 }
231
232 static inline struct z3fold_buddy_slots *handle_to_slots(unsigned long handle)
233 {
234         return (struct z3fold_buddy_slots *)(handle & ~(SLOTS_ALIGN - 1));
235 }
236
237 /* Lock a z3fold page */
238 static inline void z3fold_page_lock(struct z3fold_header *zhdr)
239 {
240         spin_lock(&zhdr->page_lock);
241 }
242
243 /* Try to lock a z3fold page */
244 static inline int z3fold_page_trylock(struct z3fold_header *zhdr)
245 {
246         return spin_trylock(&zhdr->page_lock);
247 }
248
249 /* Unlock a z3fold page */
250 static inline void z3fold_page_unlock(struct z3fold_header *zhdr)
251 {
252         spin_unlock(&zhdr->page_lock);
253 }
254
255
256 static inline struct z3fold_header *__get_z3fold_header(unsigned long handle,
257                                                         bool lock)
258 {
259         struct z3fold_buddy_slots *slots;
260         struct z3fold_header *zhdr;
261         int locked = 0;
262
263         if (!(handle & (1 << PAGE_HEADLESS))) {
264                 slots = handle_to_slots(handle);
265                 do {
266                         unsigned long addr;
267
268                         read_lock(&slots->lock);
269                         addr = *(unsigned long *)handle;
270                         zhdr = (struct z3fold_header *)(addr & PAGE_MASK);
271                         if (lock)
272                                 locked = z3fold_page_trylock(zhdr);
273                         read_unlock(&slots->lock);
274                         if (locked)
275                                 break;
276                         cpu_relax();
277                 } while (lock);
278         } else {
279                 zhdr = (struct z3fold_header *)(handle & PAGE_MASK);
280         }
281
282         return zhdr;
283 }
284
285 /* Returns the z3fold page where a given handle is stored */
286 static inline struct z3fold_header *handle_to_z3fold_header(unsigned long h)
287 {
288         return __get_z3fold_header(h, false);
289 }
290
291 /* return locked z3fold page if it's not headless */
292 static inline struct z3fold_header *get_z3fold_header(unsigned long h)
293 {
294         return __get_z3fold_header(h, true);
295 }
296
297 static inline void put_z3fold_header(struct z3fold_header *zhdr)
298 {
299         struct page *page = virt_to_page(zhdr);
300
301         if (!test_bit(PAGE_HEADLESS, &page->private))
302                 z3fold_page_unlock(zhdr);
303 }
304
305 static inline void free_handle(unsigned long handle)
306 {
307         struct z3fold_buddy_slots *slots;
308         struct z3fold_header *zhdr;
309         int i;
310         bool is_free;
311
312         if (handle & (1 << PAGE_HEADLESS))
313                 return;
314
315         if (WARN_ON(*(unsigned long *)handle == 0))
316                 return;
317
318         zhdr = handle_to_z3fold_header(handle);
319         slots = handle_to_slots(handle);
320         write_lock(&slots->lock);
321         *(unsigned long *)handle = 0;
322         write_unlock(&slots->lock);
323         if (zhdr->slots == slots)
324                 return; /* simple case, nothing else to do */
325
326         /* we are freeing a foreign handle if we are here */
327         zhdr->foreign_handles--;
328         is_free = true;
329         read_lock(&slots->lock);
330         if (!test_bit(HANDLES_ORPHANED, &slots->pool)) {
331                 read_unlock(&slots->lock);
332                 return;
333         }
334         for (i = 0; i <= BUDDY_MASK; i++) {
335                 if (slots->slot[i]) {
336                         is_free = false;
337                         break;
338                 }
339         }
340         read_unlock(&slots->lock);
341
342         if (is_free) {
343                 struct z3fold_pool *pool = slots_to_pool(slots);
344
345                 kmem_cache_free(pool->c_handle, slots);
346         }
347 }
348
349 static int z3fold_init_fs_context(struct fs_context *fc)
350 {
351         return init_pseudo(fc, Z3FOLD_MAGIC) ? 0 : -ENOMEM;
352 }
353
354 static struct file_system_type z3fold_fs = {
355         .name           = "z3fold",
356         .init_fs_context = z3fold_init_fs_context,
357         .kill_sb        = kill_anon_super,
358 };
359
360 static struct vfsmount *z3fold_mnt;
361 static int z3fold_mount(void)
362 {
363         int ret = 0;
364
365         z3fold_mnt = kern_mount(&z3fold_fs);
366         if (IS_ERR(z3fold_mnt))
367                 ret = PTR_ERR(z3fold_mnt);
368
369         return ret;
370 }
371
372 static void z3fold_unmount(void)
373 {
374         kern_unmount(z3fold_mnt);
375 }
376
377 static const struct address_space_operations z3fold_aops;
378 static int z3fold_register_migration(struct z3fold_pool *pool)
379 {
380         pool->inode = alloc_anon_inode(z3fold_mnt->mnt_sb);
381         if (IS_ERR(pool->inode)) {
382                 pool->inode = NULL;
383                 return 1;
384         }
385
386         pool->inode->i_mapping->private_data = pool;
387         pool->inode->i_mapping->a_ops = &z3fold_aops;
388         return 0;
389 }
390
391 static void z3fold_unregister_migration(struct z3fold_pool *pool)
392 {
393         if (pool->inode)
394                 iput(pool->inode);
395  }
396
397 /* Initializes the z3fold header of a newly allocated z3fold page */
398 static struct z3fold_header *init_z3fold_page(struct page *page, bool headless,
399                                         struct z3fold_pool *pool, gfp_t gfp)
400 {
401         struct z3fold_header *zhdr = page_address(page);
402         struct z3fold_buddy_slots *slots;
403
404         INIT_LIST_HEAD(&page->lru);
405         clear_bit(PAGE_HEADLESS, &page->private);
406         clear_bit(MIDDLE_CHUNK_MAPPED, &page->private);
407         clear_bit(NEEDS_COMPACTING, &page->private);
408         clear_bit(PAGE_STALE, &page->private);
409         clear_bit(PAGE_CLAIMED, &page->private);
410         if (headless)
411                 return zhdr;
412
413         slots = alloc_slots(pool, gfp);
414         if (!slots)
415                 return NULL;
416
417         spin_lock_init(&zhdr->page_lock);
418         kref_init(&zhdr->refcount);
419         zhdr->first_chunks = 0;
420         zhdr->middle_chunks = 0;
421         zhdr->last_chunks = 0;
422         zhdr->first_num = 0;
423         zhdr->start_middle = 0;
424         zhdr->cpu = -1;
425         zhdr->foreign_handles = 0;
426         zhdr->slots = slots;
427         zhdr->pool = pool;
428         INIT_LIST_HEAD(&zhdr->buddy);
429         INIT_WORK(&zhdr->work, compact_page_work);
430         return zhdr;
431 }
432
433 /* Resets the struct page fields and frees the page */
434 static void free_z3fold_page(struct page *page, bool headless)
435 {
436         if (!headless) {
437                 lock_page(page);
438                 __ClearPageMovable(page);
439                 unlock_page(page);
440         }
441         ClearPagePrivate(page);
442         __free_page(page);
443 }
444
445 /* Helper function to build the index */
446 static inline int __idx(struct z3fold_header *zhdr, enum buddy bud)
447 {
448         return (bud + zhdr->first_num) & BUDDY_MASK;
449 }
450
451 /*
452  * Encodes the handle of a particular buddy within a z3fold page
453  * Pool lock should be held as this function accesses first_num
454  */
455 static unsigned long __encode_handle(struct z3fold_header *zhdr,
456                                 struct z3fold_buddy_slots *slots,
457                                 enum buddy bud)
458 {
459         unsigned long h = (unsigned long)zhdr;
460         int idx = 0;
461
462         /*
463          * For a headless page, its handle is its pointer with the extra
464          * PAGE_HEADLESS bit set
465          */
466         if (bud == HEADLESS)
467                 return h | (1 << PAGE_HEADLESS);
468
469         /* otherwise, return pointer to encoded handle */
470         idx = __idx(zhdr, bud);
471         h += idx;
472         if (bud == LAST)
473                 h |= (zhdr->last_chunks << BUDDY_SHIFT);
474
475         write_lock(&slots->lock);
476         slots->slot[idx] = h;
477         write_unlock(&slots->lock);
478         return (unsigned long)&slots->slot[idx];
479 }
480
481 static unsigned long encode_handle(struct z3fold_header *zhdr, enum buddy bud)
482 {
483         return __encode_handle(zhdr, zhdr->slots, bud);
484 }
485
486 /* only for LAST bud, returns zero otherwise */
487 static unsigned short handle_to_chunks(unsigned long handle)
488 {
489         struct z3fold_buddy_slots *slots = handle_to_slots(handle);
490         unsigned long addr;
491
492         read_lock(&slots->lock);
493         addr = *(unsigned long *)handle;
494         read_unlock(&slots->lock);
495         return (addr & ~PAGE_MASK) >> BUDDY_SHIFT;
496 }
497
498 /*
499  * (handle & BUDDY_MASK) < zhdr->first_num is possible in encode_handle
500  *  but that doesn't matter. because the masking will result in the
501  *  correct buddy number.
502  */
503 static enum buddy handle_to_buddy(unsigned long handle)
504 {
505         struct z3fold_header *zhdr;
506         struct z3fold_buddy_slots *slots = handle_to_slots(handle);
507         unsigned long addr;
508
509         read_lock(&slots->lock);
510         WARN_ON(handle & (1 << PAGE_HEADLESS));
511         addr = *(unsigned long *)handle;
512         read_unlock(&slots->lock);
513         zhdr = (struct z3fold_header *)(addr & PAGE_MASK);
514         return (addr - zhdr->first_num) & BUDDY_MASK;
515 }
516
517 static inline struct z3fold_pool *zhdr_to_pool(struct z3fold_header *zhdr)
518 {
519         return zhdr->pool;
520 }
521
522 static void __release_z3fold_page(struct z3fold_header *zhdr, bool locked)
523 {
524         struct page *page = virt_to_page(zhdr);
525         struct z3fold_pool *pool = zhdr_to_pool(zhdr);
526         bool is_free = true;
527         int i;
528
529         WARN_ON(!list_empty(&zhdr->buddy));
530         set_bit(PAGE_STALE, &page->private);
531         clear_bit(NEEDS_COMPACTING, &page->private);
532         spin_lock(&pool->lock);
533         if (!list_empty(&page->lru))
534                 list_del_init(&page->lru);
535         spin_unlock(&pool->lock);
536
537         /* If there are no foreign handles, free the handles array */
538         read_lock(&zhdr->slots->lock);
539         for (i = 0; i <= BUDDY_MASK; i++) {
540                 if (zhdr->slots->slot[i]) {
541                         is_free = false;
542                         break;
543                 }
544         }
545         if (!is_free)
546                 set_bit(HANDLES_ORPHANED, &zhdr->slots->pool);
547         read_unlock(&zhdr->slots->lock);
548
549         if (is_free)
550                 kmem_cache_free(pool->c_handle, zhdr->slots);
551
552         if (locked)
553                 z3fold_page_unlock(zhdr);
554
555         spin_lock(&pool->stale_lock);
556         list_add(&zhdr->buddy, &pool->stale);
557         queue_work(pool->release_wq, &pool->work);
558         spin_unlock(&pool->stale_lock);
559 }
560
561 static void __attribute__((__unused__))
562                         release_z3fold_page(struct kref *ref)
563 {
564         struct z3fold_header *zhdr = container_of(ref, struct z3fold_header,
565                                                 refcount);
566         __release_z3fold_page(zhdr, false);
567 }
568
569 static void release_z3fold_page_locked(struct kref *ref)
570 {
571         struct z3fold_header *zhdr = container_of(ref, struct z3fold_header,
572                                                 refcount);
573         WARN_ON(z3fold_page_trylock(zhdr));
574         __release_z3fold_page(zhdr, true);
575 }
576
577 static void release_z3fold_page_locked_list(struct kref *ref)
578 {
579         struct z3fold_header *zhdr = container_of(ref, struct z3fold_header,
580                                                refcount);
581         struct z3fold_pool *pool = zhdr_to_pool(zhdr);
582
583         spin_lock(&pool->lock);
584         list_del_init(&zhdr->buddy);
585         spin_unlock(&pool->lock);
586
587         WARN_ON(z3fold_page_trylock(zhdr));
588         __release_z3fold_page(zhdr, true);
589 }
590
591 static void free_pages_work(struct work_struct *w)
592 {
593         struct z3fold_pool *pool = container_of(w, struct z3fold_pool, work);
594
595         spin_lock(&pool->stale_lock);
596         while (!list_empty(&pool->stale)) {
597                 struct z3fold_header *zhdr = list_first_entry(&pool->stale,
598                                                 struct z3fold_header, buddy);
599                 struct page *page = virt_to_page(zhdr);
600
601                 list_del(&zhdr->buddy);
602                 if (WARN_ON(!test_bit(PAGE_STALE, &page->private)))
603                         continue;
604                 spin_unlock(&pool->stale_lock);
605                 cancel_work_sync(&zhdr->work);
606                 free_z3fold_page(page, false);
607                 cond_resched();
608                 spin_lock(&pool->stale_lock);
609         }
610         spin_unlock(&pool->stale_lock);
611 }
612
613 /*
614  * Returns the number of free chunks in a z3fold page.
615  * NB: can't be used with HEADLESS pages.
616  */
617 static int num_free_chunks(struct z3fold_header *zhdr)
618 {
619         int nfree;
620         /*
621          * If there is a middle object, pick up the bigger free space
622          * either before or after it. Otherwise just subtract the number
623          * of chunks occupied by the first and the last objects.
624          */
625         if (zhdr->middle_chunks != 0) {
626                 int nfree_before = zhdr->first_chunks ?
627                         0 : zhdr->start_middle - ZHDR_CHUNKS;
628                 int nfree_after = zhdr->last_chunks ?
629                         0 : TOTAL_CHUNKS -
630                                 (zhdr->start_middle + zhdr->middle_chunks);
631                 nfree = max(nfree_before, nfree_after);
632         } else
633                 nfree = NCHUNKS - zhdr->first_chunks - zhdr->last_chunks;
634         return nfree;
635 }
636
637 /* Add to the appropriate unbuddied list */
638 static inline void add_to_unbuddied(struct z3fold_pool *pool,
639                                 struct z3fold_header *zhdr)
640 {
641         if (zhdr->first_chunks == 0 || zhdr->last_chunks == 0 ||
642                         zhdr->middle_chunks == 0) {
643                 struct list_head *unbuddied = get_cpu_ptr(pool->unbuddied);
644
645                 int freechunks = num_free_chunks(zhdr);
646                 spin_lock(&pool->lock);
647                 list_add(&zhdr->buddy, &unbuddied[freechunks]);
648                 spin_unlock(&pool->lock);
649                 zhdr->cpu = smp_processor_id();
650                 put_cpu_ptr(pool->unbuddied);
651         }
652 }
653
654 static inline void *mchunk_memmove(struct z3fold_header *zhdr,
655                                 unsigned short dst_chunk)
656 {
657         void *beg = zhdr;
658         return memmove(beg + (dst_chunk << CHUNK_SHIFT),
659                        beg + (zhdr->start_middle << CHUNK_SHIFT),
660                        zhdr->middle_chunks << CHUNK_SHIFT);
661 }
662
663 static inline bool buddy_single(struct z3fold_header *zhdr)
664 {
665         return !((zhdr->first_chunks && zhdr->middle_chunks) ||
666                         (zhdr->first_chunks && zhdr->last_chunks) ||
667                         (zhdr->middle_chunks && zhdr->last_chunks));
668 }
669
670 static struct z3fold_header *compact_single_buddy(struct z3fold_header *zhdr)
671 {
672         struct z3fold_pool *pool = zhdr_to_pool(zhdr);
673         void *p = zhdr;
674         unsigned long old_handle = 0;
675         size_t sz = 0;
676         struct z3fold_header *new_zhdr = NULL;
677         int first_idx = __idx(zhdr, FIRST);
678         int middle_idx = __idx(zhdr, MIDDLE);
679         int last_idx = __idx(zhdr, LAST);
680         unsigned short *moved_chunks = NULL;
681
682         /*
683          * No need to protect slots here -- all the slots are "local" and
684          * the page lock is already taken
685          */
686         if (zhdr->first_chunks && zhdr->slots->slot[first_idx]) {
687                 p += ZHDR_SIZE_ALIGNED;
688                 sz = zhdr->first_chunks << CHUNK_SHIFT;
689                 old_handle = (unsigned long)&zhdr->slots->slot[first_idx];
690                 moved_chunks = &zhdr->first_chunks;
691         } else if (zhdr->middle_chunks && zhdr->slots->slot[middle_idx]) {
692                 p += zhdr->start_middle << CHUNK_SHIFT;
693                 sz = zhdr->middle_chunks << CHUNK_SHIFT;
694                 old_handle = (unsigned long)&zhdr->slots->slot[middle_idx];
695                 moved_chunks = &zhdr->middle_chunks;
696         } else if (zhdr->last_chunks && zhdr->slots->slot[last_idx]) {
697                 p += PAGE_SIZE - (zhdr->last_chunks << CHUNK_SHIFT);
698                 sz = zhdr->last_chunks << CHUNK_SHIFT;
699                 old_handle = (unsigned long)&zhdr->slots->slot[last_idx];
700                 moved_chunks = &zhdr->last_chunks;
701         }
702
703         if (sz > 0) {
704                 enum buddy new_bud = HEADLESS;
705                 short chunks = size_to_chunks(sz);
706                 void *q;
707
708                 new_zhdr = __z3fold_alloc(pool, sz, false);
709                 if (!new_zhdr)
710                         return NULL;
711
712                 if (WARN_ON(new_zhdr == zhdr))
713                         goto out_fail;
714
715                 if (new_zhdr->first_chunks == 0) {
716                         if (new_zhdr->middle_chunks != 0 &&
717                                         chunks >= new_zhdr->start_middle) {
718                                 new_bud = LAST;
719                         } else {
720                                 new_bud = FIRST;
721                         }
722                 } else if (new_zhdr->last_chunks == 0) {
723                         new_bud = LAST;
724                 } else if (new_zhdr->middle_chunks == 0) {
725                         new_bud = MIDDLE;
726                 }
727                 q = new_zhdr;
728                 switch (new_bud) {
729                 case FIRST:
730                         new_zhdr->first_chunks = chunks;
731                         q += ZHDR_SIZE_ALIGNED;
732                         break;
733                 case MIDDLE:
734                         new_zhdr->middle_chunks = chunks;
735                         new_zhdr->start_middle =
736                                 new_zhdr->first_chunks + ZHDR_CHUNKS;
737                         q += new_zhdr->start_middle << CHUNK_SHIFT;
738                         break;
739                 case LAST:
740                         new_zhdr->last_chunks = chunks;
741                         q += PAGE_SIZE - (new_zhdr->last_chunks << CHUNK_SHIFT);
742                         break;
743                 default:
744                         goto out_fail;
745                 }
746                 new_zhdr->foreign_handles++;
747                 memcpy(q, p, sz);
748                 write_lock(&zhdr->slots->lock);
749                 *(unsigned long *)old_handle = (unsigned long)new_zhdr +
750                         __idx(new_zhdr, new_bud);
751                 if (new_bud == LAST)
752                         *(unsigned long *)old_handle |=
753                                         (new_zhdr->last_chunks << BUDDY_SHIFT);
754                 write_unlock(&zhdr->slots->lock);
755                 add_to_unbuddied(pool, new_zhdr);
756                 z3fold_page_unlock(new_zhdr);
757
758                 *moved_chunks = 0;
759         }
760
761         return new_zhdr;
762
763 out_fail:
764         if (new_zhdr) {
765                 if (kref_put(&new_zhdr->refcount, release_z3fold_page_locked))
766                         atomic64_dec(&pool->pages_nr);
767                 else {
768                         add_to_unbuddied(pool, new_zhdr);
769                         z3fold_page_unlock(new_zhdr);
770                 }
771         }
772         return NULL;
773
774 }
775
776 #define BIG_CHUNK_GAP   3
777 /* Has to be called with lock held */
778 static int z3fold_compact_page(struct z3fold_header *zhdr)
779 {
780         struct page *page = virt_to_page(zhdr);
781
782         if (test_bit(MIDDLE_CHUNK_MAPPED, &page->private))
783                 return 0; /* can't move middle chunk, it's used */
784
785         if (unlikely(PageIsolated(page)))
786                 return 0;
787
788         if (zhdr->middle_chunks == 0)
789                 return 0; /* nothing to compact */
790
791         if (zhdr->first_chunks == 0 && zhdr->last_chunks == 0) {
792                 /* move to the beginning */
793                 mchunk_memmove(zhdr, ZHDR_CHUNKS);
794                 zhdr->first_chunks = zhdr->middle_chunks;
795                 zhdr->middle_chunks = 0;
796                 zhdr->start_middle = 0;
797                 zhdr->first_num++;
798                 return 1;
799         }
800
801         /*
802          * moving data is expensive, so let's only do that if
803          * there's substantial gain (at least BIG_CHUNK_GAP chunks)
804          */
805         if (zhdr->first_chunks != 0 && zhdr->last_chunks == 0 &&
806             zhdr->start_middle - (zhdr->first_chunks + ZHDR_CHUNKS) >=
807                         BIG_CHUNK_GAP) {
808                 mchunk_memmove(zhdr, zhdr->first_chunks + ZHDR_CHUNKS);
809                 zhdr->start_middle = zhdr->first_chunks + ZHDR_CHUNKS;
810                 return 1;
811         } else if (zhdr->last_chunks != 0 && zhdr->first_chunks == 0 &&
812                    TOTAL_CHUNKS - (zhdr->last_chunks + zhdr->start_middle
813                                         + zhdr->middle_chunks) >=
814                         BIG_CHUNK_GAP) {
815                 unsigned short new_start = TOTAL_CHUNKS - zhdr->last_chunks -
816                         zhdr->middle_chunks;
817                 mchunk_memmove(zhdr, new_start);
818                 zhdr->start_middle = new_start;
819                 return 1;
820         }
821
822         return 0;
823 }
824
825 static void do_compact_page(struct z3fold_header *zhdr, bool locked)
826 {
827         struct z3fold_pool *pool = zhdr_to_pool(zhdr);
828         struct page *page;
829
830         page = virt_to_page(zhdr);
831         if (locked)
832                 WARN_ON(z3fold_page_trylock(zhdr));
833         else
834                 z3fold_page_lock(zhdr);
835         if (WARN_ON(!test_and_clear_bit(NEEDS_COMPACTING, &page->private))) {
836                 z3fold_page_unlock(zhdr);
837                 return;
838         }
839         spin_lock(&pool->lock);
840         list_del_init(&zhdr->buddy);
841         spin_unlock(&pool->lock);
842
843         if (kref_put(&zhdr->refcount, release_z3fold_page_locked)) {
844                 atomic64_dec(&pool->pages_nr);
845                 return;
846         }
847
848         if (unlikely(PageIsolated(page) ||
849                      test_bit(PAGE_CLAIMED, &page->private) ||
850                      test_bit(PAGE_STALE, &page->private))) {
851                 z3fold_page_unlock(zhdr);
852                 return;
853         }
854
855         if (!zhdr->foreign_handles && buddy_single(zhdr) &&
856             zhdr->mapped_count == 0 && compact_single_buddy(zhdr)) {
857                 if (kref_put(&zhdr->refcount, release_z3fold_page_locked))
858                         atomic64_dec(&pool->pages_nr);
859                 else
860                         z3fold_page_unlock(zhdr);
861                 return;
862         }
863
864         z3fold_compact_page(zhdr);
865         add_to_unbuddied(pool, zhdr);
866         z3fold_page_unlock(zhdr);
867 }
868
869 static void compact_page_work(struct work_struct *w)
870 {
871         struct z3fold_header *zhdr = container_of(w, struct z3fold_header,
872                                                 work);
873
874         do_compact_page(zhdr, false);
875 }
876
877 /* returns _locked_ z3fold page header or NULL */
878 static inline struct z3fold_header *__z3fold_alloc(struct z3fold_pool *pool,
879                                                 size_t size, bool can_sleep)
880 {
881         struct z3fold_header *zhdr = NULL;
882         struct page *page;
883         struct list_head *unbuddied;
884         int chunks = size_to_chunks(size), i;
885
886 lookup:
887         /* First, try to find an unbuddied z3fold page. */
888         unbuddied = get_cpu_ptr(pool->unbuddied);
889         for_each_unbuddied_list(i, chunks) {
890                 struct list_head *l = &unbuddied[i];
891
892                 zhdr = list_first_entry_or_null(READ_ONCE(l),
893                                         struct z3fold_header, buddy);
894
895                 if (!zhdr)
896                         continue;
897
898                 /* Re-check under lock. */
899                 spin_lock(&pool->lock);
900                 l = &unbuddied[i];
901                 if (unlikely(zhdr != list_first_entry(READ_ONCE(l),
902                                                 struct z3fold_header, buddy)) ||
903                     !z3fold_page_trylock(zhdr)) {
904                         spin_unlock(&pool->lock);
905                         zhdr = NULL;
906                         put_cpu_ptr(pool->unbuddied);
907                         if (can_sleep)
908                                 cond_resched();
909                         goto lookup;
910                 }
911                 list_del_init(&zhdr->buddy);
912                 zhdr->cpu = -1;
913                 spin_unlock(&pool->lock);
914
915                 page = virt_to_page(zhdr);
916                 if (test_bit(NEEDS_COMPACTING, &page->private) ||
917                     test_bit(PAGE_CLAIMED, &page->private)) {
918                         z3fold_page_unlock(zhdr);
919                         zhdr = NULL;
920                         put_cpu_ptr(pool->unbuddied);
921                         if (can_sleep)
922                                 cond_resched();
923                         goto lookup;
924                 }
925
926                 /*
927                  * this page could not be removed from its unbuddied
928                  * list while pool lock was held, and then we've taken
929                  * page lock so kref_put could not be called before
930                  * we got here, so it's safe to just call kref_get()
931                  */
932                 kref_get(&zhdr->refcount);
933                 break;
934         }
935         put_cpu_ptr(pool->unbuddied);
936
937         if (!zhdr) {
938                 int cpu;
939
940                 /* look for _exact_ match on other cpus' lists */
941                 for_each_online_cpu(cpu) {
942                         struct list_head *l;
943
944                         unbuddied = per_cpu_ptr(pool->unbuddied, cpu);
945                         spin_lock(&pool->lock);
946                         l = &unbuddied[chunks];
947
948                         zhdr = list_first_entry_or_null(READ_ONCE(l),
949                                                 struct z3fold_header, buddy);
950
951                         if (!zhdr || !z3fold_page_trylock(zhdr)) {
952                                 spin_unlock(&pool->lock);
953                                 zhdr = NULL;
954                                 continue;
955                         }
956                         list_del_init(&zhdr->buddy);
957                         zhdr->cpu = -1;
958                         spin_unlock(&pool->lock);
959
960                         page = virt_to_page(zhdr);
961                         if (test_bit(NEEDS_COMPACTING, &page->private) ||
962                             test_bit(PAGE_CLAIMED, &page->private)) {
963                                 z3fold_page_unlock(zhdr);
964                                 zhdr = NULL;
965                                 if (can_sleep)
966                                         cond_resched();
967                                 continue;
968                         }
969                         kref_get(&zhdr->refcount);
970                         break;
971                 }
972         }
973
974         return zhdr;
975 }
976
977 /*
978  * API Functions
979  */
980
981 /**
982  * z3fold_create_pool() - create a new z3fold pool
983  * @name:       pool name
984  * @gfp:        gfp flags when allocating the z3fold pool structure
985  * @ops:        user-defined operations for the z3fold pool
986  *
987  * Return: pointer to the new z3fold pool or NULL if the metadata allocation
988  * failed.
989  */
990 static struct z3fold_pool *z3fold_create_pool(const char *name, gfp_t gfp,
991                 const struct z3fold_ops *ops)
992 {
993         struct z3fold_pool *pool = NULL;
994         int i, cpu;
995
996         pool = kzalloc(sizeof(struct z3fold_pool), gfp);
997         if (!pool)
998                 goto out;
999         pool->c_handle = kmem_cache_create("z3fold_handle",
1000                                 sizeof(struct z3fold_buddy_slots),
1001                                 SLOTS_ALIGN, 0, NULL);
1002         if (!pool->c_handle)
1003                 goto out_c;
1004         spin_lock_init(&pool->lock);
1005         spin_lock_init(&pool->stale_lock);
1006         pool->unbuddied = __alloc_percpu(sizeof(struct list_head)*NCHUNKS, 2);
1007         if (!pool->unbuddied)
1008                 goto out_pool;
1009         for_each_possible_cpu(cpu) {
1010                 struct list_head *unbuddied =
1011                                 per_cpu_ptr(pool->unbuddied, cpu);
1012                 for_each_unbuddied_list(i, 0)
1013                         INIT_LIST_HEAD(&unbuddied[i]);
1014         }
1015         INIT_LIST_HEAD(&pool->lru);
1016         INIT_LIST_HEAD(&pool->stale);
1017         atomic64_set(&pool->pages_nr, 0);
1018         pool->name = name;
1019         pool->compact_wq = create_singlethread_workqueue(pool->name);
1020         if (!pool->compact_wq)
1021                 goto out_unbuddied;
1022         pool->release_wq = create_singlethread_workqueue(pool->name);
1023         if (!pool->release_wq)
1024                 goto out_wq;
1025         if (z3fold_register_migration(pool))
1026                 goto out_rwq;
1027         INIT_WORK(&pool->work, free_pages_work);
1028         pool->ops = ops;
1029         return pool;
1030
1031 out_rwq:
1032         destroy_workqueue(pool->release_wq);
1033 out_wq:
1034         destroy_workqueue(pool->compact_wq);
1035 out_unbuddied:
1036         free_percpu(pool->unbuddied);
1037 out_pool:
1038         kmem_cache_destroy(pool->c_handle);
1039 out_c:
1040         kfree(pool);
1041 out:
1042         return NULL;
1043 }
1044
1045 /**
1046  * z3fold_destroy_pool() - destroys an existing z3fold pool
1047  * @pool:       the z3fold pool to be destroyed
1048  *
1049  * The pool should be emptied before this function is called.
1050  */
1051 static void z3fold_destroy_pool(struct z3fold_pool *pool)
1052 {
1053         kmem_cache_destroy(pool->c_handle);
1054
1055         /*
1056          * We need to destroy pool->compact_wq before pool->release_wq,
1057          * as any pending work on pool->compact_wq will call
1058          * queue_work(pool->release_wq, &pool->work).
1059          *
1060          * There are still outstanding pages until both workqueues are drained,
1061          * so we cannot unregister migration until then.
1062          */
1063
1064         destroy_workqueue(pool->compact_wq);
1065         destroy_workqueue(pool->release_wq);
1066         z3fold_unregister_migration(pool);
1067         kfree(pool);
1068 }
1069
1070 /**
1071  * z3fold_alloc() - allocates a region of a given size
1072  * @pool:       z3fold pool from which to allocate
1073  * @size:       size in bytes of the desired allocation
1074  * @gfp:        gfp flags used if the pool needs to grow
1075  * @handle:     handle of the new allocation
1076  *
1077  * This function will attempt to find a free region in the pool large enough to
1078  * satisfy the allocation request.  A search of the unbuddied lists is
1079  * performed first. If no suitable free region is found, then a new page is
1080  * allocated and added to the pool to satisfy the request.
1081  *
1082  * gfp should not set __GFP_HIGHMEM as highmem pages cannot be used
1083  * as z3fold pool pages.
1084  *
1085  * Return: 0 if success and handle is set, otherwise -EINVAL if the size or
1086  * gfp arguments are invalid or -ENOMEM if the pool was unable to allocate
1087  * a new page.
1088  */
1089 static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp,
1090                         unsigned long *handle)
1091 {
1092         int chunks = size_to_chunks(size);
1093         struct z3fold_header *zhdr = NULL;
1094         struct page *page = NULL;
1095         enum buddy bud;
1096         bool can_sleep = gfpflags_allow_blocking(gfp);
1097
1098         if (!size)
1099                 return -EINVAL;
1100
1101         if (size > PAGE_SIZE)
1102                 return -ENOSPC;
1103
1104         if (size > PAGE_SIZE - ZHDR_SIZE_ALIGNED - CHUNK_SIZE)
1105                 bud = HEADLESS;
1106         else {
1107 retry:
1108                 zhdr = __z3fold_alloc(pool, size, can_sleep);
1109                 if (zhdr) {
1110                         if (zhdr->first_chunks == 0) {
1111                                 if (zhdr->middle_chunks != 0 &&
1112                                     chunks >= zhdr->start_middle)
1113                                         bud = LAST;
1114                                 else
1115                                         bud = FIRST;
1116                         } else if (zhdr->last_chunks == 0)
1117                                 bud = LAST;
1118                         else if (zhdr->middle_chunks == 0)
1119                                 bud = MIDDLE;
1120                         else {
1121                                 if (kref_put(&zhdr->refcount,
1122                                              release_z3fold_page_locked))
1123                                         atomic64_dec(&pool->pages_nr);
1124                                 else
1125                                         z3fold_page_unlock(zhdr);
1126                                 pr_err("No free chunks in unbuddied\n");
1127                                 WARN_ON(1);
1128                                 goto retry;
1129                         }
1130                         page = virt_to_page(zhdr);
1131                         goto found;
1132                 }
1133                 bud = FIRST;
1134         }
1135
1136         page = NULL;
1137         if (can_sleep) {
1138                 spin_lock(&pool->stale_lock);
1139                 zhdr = list_first_entry_or_null(&pool->stale,
1140                                                 struct z3fold_header, buddy);
1141                 /*
1142                  * Before allocating a page, let's see if we can take one from
1143                  * the stale pages list. cancel_work_sync() can sleep so we
1144                  * limit this case to the contexts where we can sleep
1145                  */
1146                 if (zhdr) {
1147                         list_del(&zhdr->buddy);
1148                         spin_unlock(&pool->stale_lock);
1149                         cancel_work_sync(&zhdr->work);
1150                         page = virt_to_page(zhdr);
1151                 } else {
1152                         spin_unlock(&pool->stale_lock);
1153                 }
1154         }
1155         if (!page)
1156                 page = alloc_page(gfp);
1157
1158         if (!page)
1159                 return -ENOMEM;
1160
1161         zhdr = init_z3fold_page(page, bud == HEADLESS, pool, gfp);
1162         if (!zhdr) {
1163                 __free_page(page);
1164                 return -ENOMEM;
1165         }
1166         atomic64_inc(&pool->pages_nr);
1167
1168         if (bud == HEADLESS) {
1169                 set_bit(PAGE_HEADLESS, &page->private);
1170                 goto headless;
1171         }
1172         if (can_sleep) {
1173                 lock_page(page);
1174                 __SetPageMovable(page, pool->inode->i_mapping);
1175                 unlock_page(page);
1176         } else {
1177                 if (trylock_page(page)) {
1178                         __SetPageMovable(page, pool->inode->i_mapping);
1179                         unlock_page(page);
1180                 }
1181         }
1182         z3fold_page_lock(zhdr);
1183
1184 found:
1185         if (bud == FIRST)
1186                 zhdr->first_chunks = chunks;
1187         else if (bud == LAST)
1188                 zhdr->last_chunks = chunks;
1189         else {
1190                 zhdr->middle_chunks = chunks;
1191                 zhdr->start_middle = zhdr->first_chunks + ZHDR_CHUNKS;
1192         }
1193         add_to_unbuddied(pool, zhdr);
1194
1195 headless:
1196         spin_lock(&pool->lock);
1197         /* Add/move z3fold page to beginning of LRU */
1198         if (!list_empty(&page->lru))
1199                 list_del(&page->lru);
1200
1201         list_add(&page->lru, &pool->lru);
1202
1203         *handle = encode_handle(zhdr, bud);
1204         spin_unlock(&pool->lock);
1205         if (bud != HEADLESS)
1206                 z3fold_page_unlock(zhdr);
1207
1208         return 0;
1209 }
1210
1211 /**
1212  * z3fold_free() - frees the allocation associated with the given handle
1213  * @pool:       pool in which the allocation resided
1214  * @handle:     handle associated with the allocation returned by z3fold_alloc()
1215  *
1216  * In the case that the z3fold page in which the allocation resides is under
1217  * reclaim, as indicated by the PG_reclaim flag being set, this function
1218  * only sets the first|last_chunks to 0.  The page is actually freed
1219  * once both buddies are evicted (see z3fold_reclaim_page() below).
1220  */
1221 static void z3fold_free(struct z3fold_pool *pool, unsigned long handle)
1222 {
1223         struct z3fold_header *zhdr;
1224         struct page *page;
1225         enum buddy bud;
1226         bool page_claimed;
1227
1228         zhdr = get_z3fold_header(handle);
1229         page = virt_to_page(zhdr);
1230         page_claimed = test_and_set_bit(PAGE_CLAIMED, &page->private);
1231
1232         if (test_bit(PAGE_HEADLESS, &page->private)) {
1233                 /* if a headless page is under reclaim, just leave.
1234                  * NB: we use test_and_set_bit for a reason: if the bit
1235                  * has not been set before, we release this page
1236                  * immediately so we don't care about its value any more.
1237                  */
1238                 if (!page_claimed) {
1239                         spin_lock(&pool->lock);
1240                         list_del(&page->lru);
1241                         spin_unlock(&pool->lock);
1242                         put_z3fold_header(zhdr);
1243                         free_z3fold_page(page, true);
1244                         atomic64_dec(&pool->pages_nr);
1245                 }
1246                 return;
1247         }
1248
1249         /* Non-headless case */
1250         bud = handle_to_buddy(handle);
1251
1252         switch (bud) {
1253         case FIRST:
1254                 zhdr->first_chunks = 0;
1255                 break;
1256         case MIDDLE:
1257                 zhdr->middle_chunks = 0;
1258                 break;
1259         case LAST:
1260                 zhdr->last_chunks = 0;
1261                 break;
1262         default:
1263                 pr_err("%s: unknown bud %d\n", __func__, bud);
1264                 WARN_ON(1);
1265                 put_z3fold_header(zhdr);
1266                 clear_bit(PAGE_CLAIMED, &page->private);
1267                 return;
1268         }
1269
1270         if (!page_claimed)
1271                 free_handle(handle);
1272         if (kref_put(&zhdr->refcount, release_z3fold_page_locked_list)) {
1273                 atomic64_dec(&pool->pages_nr);
1274                 return;
1275         }
1276         if (page_claimed) {
1277                 /* the page has not been claimed by us */
1278                 z3fold_page_unlock(zhdr);
1279                 return;
1280         }
1281         if (unlikely(PageIsolated(page)) ||
1282             test_and_set_bit(NEEDS_COMPACTING, &page->private)) {
1283                 put_z3fold_header(zhdr);
1284                 clear_bit(PAGE_CLAIMED, &page->private);
1285                 return;
1286         }
1287         if (zhdr->cpu < 0 || !cpu_online(zhdr->cpu)) {
1288                 spin_lock(&pool->lock);
1289                 list_del_init(&zhdr->buddy);
1290                 spin_unlock(&pool->lock);
1291                 zhdr->cpu = -1;
1292                 kref_get(&zhdr->refcount);
1293                 clear_bit(PAGE_CLAIMED, &page->private);
1294                 do_compact_page(zhdr, true);
1295                 return;
1296         }
1297         kref_get(&zhdr->refcount);
1298         clear_bit(PAGE_CLAIMED, &page->private);
1299         queue_work_on(zhdr->cpu, pool->compact_wq, &zhdr->work);
1300         put_z3fold_header(zhdr);
1301 }
1302
1303 /**
1304  * z3fold_reclaim_page() - evicts allocations from a pool page and frees it
1305  * @pool:       pool from which a page will attempt to be evicted
1306  * @retries:    number of pages on the LRU list for which eviction will
1307  *              be attempted before failing
1308  *
1309  * z3fold reclaim is different from normal system reclaim in that it is done
1310  * from the bottom, up. This is because only the bottom layer, z3fold, has
1311  * information on how the allocations are organized within each z3fold page.
1312  * This has the potential to create interesting locking situations between
1313  * z3fold and the user, however.
1314  *
1315  * To avoid these, this is how z3fold_reclaim_page() should be called:
1316  *
1317  * The user detects a page should be reclaimed and calls z3fold_reclaim_page().
1318  * z3fold_reclaim_page() will remove a z3fold page from the pool LRU list and
1319  * call the user-defined eviction handler with the pool and handle as
1320  * arguments.
1321  *
1322  * If the handle can not be evicted, the eviction handler should return
1323  * non-zero. z3fold_reclaim_page() will add the z3fold page back to the
1324  * appropriate list and try the next z3fold page on the LRU up to
1325  * a user defined number of retries.
1326  *
1327  * If the handle is successfully evicted, the eviction handler should
1328  * return 0 _and_ should have called z3fold_free() on the handle. z3fold_free()
1329  * contains logic to delay freeing the page if the page is under reclaim,
1330  * as indicated by the setting of the PG_reclaim flag on the underlying page.
1331  *
1332  * If all buddies in the z3fold page are successfully evicted, then the
1333  * z3fold page can be freed.
1334  *
1335  * Returns: 0 if page is successfully freed, otherwise -EINVAL if there are
1336  * no pages to evict or an eviction handler is not registered, -EAGAIN if
1337  * the retry limit was hit.
1338  */
1339 static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries)
1340 {
1341         int i, ret = -1;
1342         struct z3fold_header *zhdr = NULL;
1343         struct page *page = NULL;
1344         struct list_head *pos;
1345         unsigned long first_handle = 0, middle_handle = 0, last_handle = 0;
1346
1347         spin_lock(&pool->lock);
1348         if (!pool->ops || !pool->ops->evict || retries == 0) {
1349                 spin_unlock(&pool->lock);
1350                 return -EINVAL;
1351         }
1352         for (i = 0; i < retries; i++) {
1353                 if (list_empty(&pool->lru)) {
1354                         spin_unlock(&pool->lock);
1355                         return -EINVAL;
1356                 }
1357                 list_for_each_prev(pos, &pool->lru) {
1358                         page = list_entry(pos, struct page, lru);
1359
1360                         /* this bit could have been set by free, in which case
1361                          * we pass over to the next page in the pool.
1362                          */
1363                         if (test_and_set_bit(PAGE_CLAIMED, &page->private)) {
1364                                 page = NULL;
1365                                 continue;
1366                         }
1367
1368                         if (unlikely(PageIsolated(page))) {
1369                                 clear_bit(PAGE_CLAIMED, &page->private);
1370                                 page = NULL;
1371                                 continue;
1372                         }
1373                         zhdr = page_address(page);
1374                         if (test_bit(PAGE_HEADLESS, &page->private))
1375                                 break;
1376
1377                         if (!z3fold_page_trylock(zhdr)) {
1378                                 clear_bit(PAGE_CLAIMED, &page->private);
1379                                 zhdr = NULL;
1380                                 continue; /* can't evict at this point */
1381                         }
1382                         if (zhdr->foreign_handles) {
1383                                 clear_bit(PAGE_CLAIMED, &page->private);
1384                                 z3fold_page_unlock(zhdr);
1385                                 zhdr = NULL;
1386                                 continue; /* can't evict such page */
1387                         }
1388                         kref_get(&zhdr->refcount);
1389                         list_del_init(&zhdr->buddy);
1390                         zhdr->cpu = -1;
1391                         break;
1392                 }
1393
1394                 if (!zhdr)
1395                         break;
1396
1397                 list_del_init(&page->lru);
1398                 spin_unlock(&pool->lock);
1399
1400                 if (!test_bit(PAGE_HEADLESS, &page->private)) {
1401                         /*
1402                          * We need encode the handles before unlocking, and
1403                          * use our local slots structure because z3fold_free
1404                          * can zero out zhdr->slots and we can't do much
1405                          * about that
1406                          */
1407                         first_handle = 0;
1408                         last_handle = 0;
1409                         middle_handle = 0;
1410                         if (zhdr->first_chunks)
1411                                 first_handle = encode_handle(zhdr, FIRST);
1412                         if (zhdr->middle_chunks)
1413                                 middle_handle = encode_handle(zhdr, MIDDLE);
1414                         if (zhdr->last_chunks)
1415                                 last_handle = encode_handle(zhdr, LAST);
1416                         /*
1417                          * it's safe to unlock here because we hold a
1418                          * reference to this page
1419                          */
1420                         z3fold_page_unlock(zhdr);
1421                 } else {
1422                         first_handle = encode_handle(zhdr, HEADLESS);
1423                         last_handle = middle_handle = 0;
1424                 }
1425                 /* Issue the eviction callback(s) */
1426                 if (middle_handle) {
1427                         ret = pool->ops->evict(pool, middle_handle);
1428                         if (ret)
1429                                 goto next;
1430                         free_handle(middle_handle);
1431                 }
1432                 if (first_handle) {
1433                         ret = pool->ops->evict(pool, first_handle);
1434                         if (ret)
1435                                 goto next;
1436                         free_handle(first_handle);
1437                 }
1438                 if (last_handle) {
1439                         ret = pool->ops->evict(pool, last_handle);
1440                         if (ret)
1441                                 goto next;
1442                         free_handle(last_handle);
1443                 }
1444 next:
1445                 if (test_bit(PAGE_HEADLESS, &page->private)) {
1446                         if (ret == 0) {
1447                                 free_z3fold_page(page, true);
1448                                 atomic64_dec(&pool->pages_nr);
1449                                 return 0;
1450                         }
1451                         spin_lock(&pool->lock);
1452                         list_add(&page->lru, &pool->lru);
1453                         spin_unlock(&pool->lock);
1454                         clear_bit(PAGE_CLAIMED, &page->private);
1455                 } else {
1456                         z3fold_page_lock(zhdr);
1457                         if (kref_put(&zhdr->refcount,
1458                                         release_z3fold_page_locked)) {
1459                                 atomic64_dec(&pool->pages_nr);
1460                                 return 0;
1461                         }
1462                         /*
1463                          * if we are here, the page is still not completely
1464                          * free. Take the global pool lock then to be able
1465                          * to add it back to the lru list
1466                          */
1467                         spin_lock(&pool->lock);
1468                         list_add(&page->lru, &pool->lru);
1469                         spin_unlock(&pool->lock);
1470                         z3fold_page_unlock(zhdr);
1471                         clear_bit(PAGE_CLAIMED, &page->private);
1472                 }
1473
1474                 /* We started off locked to we need to lock the pool back */
1475                 spin_lock(&pool->lock);
1476         }
1477         spin_unlock(&pool->lock);
1478         return -EAGAIN;
1479 }
1480
1481 /**
1482  * z3fold_map() - maps the allocation associated with the given handle
1483  * @pool:       pool in which the allocation resides
1484  * @handle:     handle associated with the allocation to be mapped
1485  *
1486  * Extracts the buddy number from handle and constructs the pointer to the
1487  * correct starting chunk within the page.
1488  *
1489  * Returns: a pointer to the mapped allocation
1490  */
1491 static void *z3fold_map(struct z3fold_pool *pool, unsigned long handle)
1492 {
1493         struct z3fold_header *zhdr;
1494         struct page *page;
1495         void *addr;
1496         enum buddy buddy;
1497
1498         zhdr = get_z3fold_header(handle);
1499         addr = zhdr;
1500         page = virt_to_page(zhdr);
1501
1502         if (test_bit(PAGE_HEADLESS, &page->private))
1503                 goto out;
1504
1505         buddy = handle_to_buddy(handle);
1506         switch (buddy) {
1507         case FIRST:
1508                 addr += ZHDR_SIZE_ALIGNED;
1509                 break;
1510         case MIDDLE:
1511                 addr += zhdr->start_middle << CHUNK_SHIFT;
1512                 set_bit(MIDDLE_CHUNK_MAPPED, &page->private);
1513                 break;
1514         case LAST:
1515                 addr += PAGE_SIZE - (handle_to_chunks(handle) << CHUNK_SHIFT);
1516                 break;
1517         default:
1518                 pr_err("unknown buddy id %d\n", buddy);
1519                 WARN_ON(1);
1520                 addr = NULL;
1521                 break;
1522         }
1523
1524         if (addr)
1525                 zhdr->mapped_count++;
1526 out:
1527         put_z3fold_header(zhdr);
1528         return addr;
1529 }
1530
1531 /**
1532  * z3fold_unmap() - unmaps the allocation associated with the given handle
1533  * @pool:       pool in which the allocation resides
1534  * @handle:     handle associated with the allocation to be unmapped
1535  */
1536 static void z3fold_unmap(struct z3fold_pool *pool, unsigned long handle)
1537 {
1538         struct z3fold_header *zhdr;
1539         struct page *page;
1540         enum buddy buddy;
1541
1542         zhdr = get_z3fold_header(handle);
1543         page = virt_to_page(zhdr);
1544
1545         if (test_bit(PAGE_HEADLESS, &page->private))
1546                 return;
1547
1548         buddy = handle_to_buddy(handle);
1549         if (buddy == MIDDLE)
1550                 clear_bit(MIDDLE_CHUNK_MAPPED, &page->private);
1551         zhdr->mapped_count--;
1552         put_z3fold_header(zhdr);
1553 }
1554
1555 /**
1556  * z3fold_get_pool_size() - gets the z3fold pool size in pages
1557  * @pool:       pool whose size is being queried
1558  *
1559  * Returns: size in pages of the given pool.
1560  */
1561 static u64 z3fold_get_pool_size(struct z3fold_pool *pool)
1562 {
1563         return atomic64_read(&pool->pages_nr);
1564 }
1565
1566 static bool z3fold_page_isolate(struct page *page, isolate_mode_t mode)
1567 {
1568         struct z3fold_header *zhdr;
1569         struct z3fold_pool *pool;
1570
1571         VM_BUG_ON_PAGE(!PageMovable(page), page);
1572         VM_BUG_ON_PAGE(PageIsolated(page), page);
1573
1574         if (test_bit(PAGE_HEADLESS, &page->private) ||
1575             test_bit(PAGE_CLAIMED, &page->private))
1576                 return false;
1577
1578         zhdr = page_address(page);
1579         z3fold_page_lock(zhdr);
1580         if (test_bit(NEEDS_COMPACTING, &page->private) ||
1581             test_bit(PAGE_STALE, &page->private))
1582                 goto out;
1583
1584         if (zhdr->mapped_count != 0 || zhdr->foreign_handles != 0)
1585                 goto out;
1586
1587         pool = zhdr_to_pool(zhdr);
1588         spin_lock(&pool->lock);
1589         if (!list_empty(&zhdr->buddy))
1590                 list_del_init(&zhdr->buddy);
1591         if (!list_empty(&page->lru))
1592                 list_del_init(&page->lru);
1593         spin_unlock(&pool->lock);
1594
1595         kref_get(&zhdr->refcount);
1596         z3fold_page_unlock(zhdr);
1597         return true;
1598
1599 out:
1600         z3fold_page_unlock(zhdr);
1601         return false;
1602 }
1603
1604 static int z3fold_page_migrate(struct address_space *mapping, struct page *newpage,
1605                                struct page *page, enum migrate_mode mode)
1606 {
1607         struct z3fold_header *zhdr, *new_zhdr;
1608         struct z3fold_pool *pool;
1609         struct address_space *new_mapping;
1610
1611         VM_BUG_ON_PAGE(!PageMovable(page), page);
1612         VM_BUG_ON_PAGE(!PageIsolated(page), page);
1613         VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
1614
1615         zhdr = page_address(page);
1616         pool = zhdr_to_pool(zhdr);
1617
1618         if (!z3fold_page_trylock(zhdr)) {
1619                 return -EAGAIN;
1620         }
1621         if (zhdr->mapped_count != 0 || zhdr->foreign_handles != 0) {
1622                 z3fold_page_unlock(zhdr);
1623                 return -EBUSY;
1624         }
1625         if (work_pending(&zhdr->work)) {
1626                 z3fold_page_unlock(zhdr);
1627                 return -EAGAIN;
1628         }
1629         new_zhdr = page_address(newpage);
1630         memcpy(new_zhdr, zhdr, PAGE_SIZE);
1631         newpage->private = page->private;
1632         page->private = 0;
1633         z3fold_page_unlock(zhdr);
1634         spin_lock_init(&new_zhdr->page_lock);
1635         INIT_WORK(&new_zhdr->work, compact_page_work);
1636         /*
1637          * z3fold_page_isolate() ensures that new_zhdr->buddy is empty,
1638          * so we only have to reinitialize it.
1639          */
1640         INIT_LIST_HEAD(&new_zhdr->buddy);
1641         new_mapping = page_mapping(page);
1642         __ClearPageMovable(page);
1643         ClearPagePrivate(page);
1644
1645         get_page(newpage);
1646         z3fold_page_lock(new_zhdr);
1647         if (new_zhdr->first_chunks)
1648                 encode_handle(new_zhdr, FIRST);
1649         if (new_zhdr->last_chunks)
1650                 encode_handle(new_zhdr, LAST);
1651         if (new_zhdr->middle_chunks)
1652                 encode_handle(new_zhdr, MIDDLE);
1653         set_bit(NEEDS_COMPACTING, &newpage->private);
1654         new_zhdr->cpu = smp_processor_id();
1655         spin_lock(&pool->lock);
1656         list_add(&newpage->lru, &pool->lru);
1657         spin_unlock(&pool->lock);
1658         __SetPageMovable(newpage, new_mapping);
1659         z3fold_page_unlock(new_zhdr);
1660
1661         queue_work_on(new_zhdr->cpu, pool->compact_wq, &new_zhdr->work);
1662
1663         page_mapcount_reset(page);
1664         put_page(page);
1665         return 0;
1666 }
1667
1668 static void z3fold_page_putback(struct page *page)
1669 {
1670         struct z3fold_header *zhdr;
1671         struct z3fold_pool *pool;
1672
1673         zhdr = page_address(page);
1674         pool = zhdr_to_pool(zhdr);
1675
1676         z3fold_page_lock(zhdr);
1677         if (!list_empty(&zhdr->buddy))
1678                 list_del_init(&zhdr->buddy);
1679         INIT_LIST_HEAD(&page->lru);
1680         if (kref_put(&zhdr->refcount, release_z3fold_page_locked)) {
1681                 atomic64_dec(&pool->pages_nr);
1682                 return;
1683         }
1684         spin_lock(&pool->lock);
1685         list_add(&page->lru, &pool->lru);
1686         spin_unlock(&pool->lock);
1687         z3fold_page_unlock(zhdr);
1688 }
1689
1690 static const struct address_space_operations z3fold_aops = {
1691         .isolate_page = z3fold_page_isolate,
1692         .migratepage = z3fold_page_migrate,
1693         .putback_page = z3fold_page_putback,
1694 };
1695
1696 /*****************
1697  * zpool
1698  ****************/
1699
1700 static int z3fold_zpool_evict(struct z3fold_pool *pool, unsigned long handle)
1701 {
1702         if (pool->zpool && pool->zpool_ops && pool->zpool_ops->evict)
1703                 return pool->zpool_ops->evict(pool->zpool, handle);
1704         else
1705                 return -ENOENT;
1706 }
1707
1708 static const struct z3fold_ops z3fold_zpool_ops = {
1709         .evict =        z3fold_zpool_evict
1710 };
1711
1712 static void *z3fold_zpool_create(const char *name, gfp_t gfp,
1713                                const struct zpool_ops *zpool_ops,
1714                                struct zpool *zpool)
1715 {
1716         struct z3fold_pool *pool;
1717
1718         pool = z3fold_create_pool(name, gfp,
1719                                 zpool_ops ? &z3fold_zpool_ops : NULL);
1720         if (pool) {
1721                 pool->zpool = zpool;
1722                 pool->zpool_ops = zpool_ops;
1723         }
1724         return pool;
1725 }
1726
1727 static void z3fold_zpool_destroy(void *pool)
1728 {
1729         z3fold_destroy_pool(pool);
1730 }
1731
1732 static int z3fold_zpool_malloc(void *pool, size_t size, gfp_t gfp,
1733                         unsigned long *handle)
1734 {
1735         return z3fold_alloc(pool, size, gfp, handle);
1736 }
1737 static void z3fold_zpool_free(void *pool, unsigned long handle)
1738 {
1739         z3fold_free(pool, handle);
1740 }
1741
1742 static int z3fold_zpool_shrink(void *pool, unsigned int pages,
1743                         unsigned int *reclaimed)
1744 {
1745         unsigned int total = 0;
1746         int ret = -EINVAL;
1747
1748         while (total < pages) {
1749                 ret = z3fold_reclaim_page(pool, 8);
1750                 if (ret < 0)
1751                         break;
1752                 total++;
1753         }
1754
1755         if (reclaimed)
1756                 *reclaimed = total;
1757
1758         return ret;
1759 }
1760
1761 static void *z3fold_zpool_map(void *pool, unsigned long handle,
1762                         enum zpool_mapmode mm)
1763 {
1764         return z3fold_map(pool, handle);
1765 }
1766 static void z3fold_zpool_unmap(void *pool, unsigned long handle)
1767 {
1768         z3fold_unmap(pool, handle);
1769 }
1770
1771 static u64 z3fold_zpool_total_size(void *pool)
1772 {
1773         return z3fold_get_pool_size(pool) * PAGE_SIZE;
1774 }
1775
1776 static struct zpool_driver z3fold_zpool_driver = {
1777         .type =         "z3fold",
1778         .owner =        THIS_MODULE,
1779         .create =       z3fold_zpool_create,
1780         .destroy =      z3fold_zpool_destroy,
1781         .malloc =       z3fold_zpool_malloc,
1782         .free =         z3fold_zpool_free,
1783         .shrink =       z3fold_zpool_shrink,
1784         .map =          z3fold_zpool_map,
1785         .unmap =        z3fold_zpool_unmap,
1786         .total_size =   z3fold_zpool_total_size,
1787 };
1788
1789 MODULE_ALIAS("zpool-z3fold");
1790
1791 static int __init init_z3fold(void)
1792 {
1793         int ret;
1794
1795         /* Make sure the z3fold header is not larger than the page size */
1796         BUILD_BUG_ON(ZHDR_SIZE_ALIGNED > PAGE_SIZE);
1797         ret = z3fold_mount();
1798         if (ret)
1799                 return ret;
1800
1801         zpool_register_driver(&z3fold_zpool_driver);
1802
1803         return 0;
1804 }
1805
1806 static void __exit exit_z3fold(void)
1807 {
1808         z3fold_unmount();
1809         zpool_unregister_driver(&z3fold_zpool_driver);
1810 }
1811
1812 module_init(init_z3fold);
1813 module_exit(exit_z3fold);
1814
1815 MODULE_LICENSE("GPL");
1816 MODULE_AUTHOR("Vitaly Wool <vitalywool@gmail.com>");
1817 MODULE_DESCRIPTION("3-Fold Allocator for Compressed Pages");