s390/vdso: drop unnecessary cc-ldoption
[linux-2.6-microblaze.git] / drivers / gpu / drm / ttm / ttm_page_alloc.c
1 /*
2  * Copyright (c) Red Hat Inc.
3
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sub license,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the
12  * next paragraph) shall be included in all copies or substantial portions
13  * of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  *
23  * Authors: Dave Airlie <airlied@redhat.com>
24  *          Jerome Glisse <jglisse@redhat.com>
25  *          Pauli Nieminen <suokkos@gmail.com>
26  */
27
28 /* simple list based uncached page pool
29  * - Pool collects resently freed pages for reuse
30  * - Use page->lru to keep a free list
31  * - doesn't track currently in use pages
32  */
33
34 #define pr_fmt(fmt) "[TTM] " fmt
35
36 #include <linux/list.h>
37 #include <linux/spinlock.h>
38 #include <linux/highmem.h>
39 #include <linux/mm_types.h>
40 #include <linux/module.h>
41 #include <linux/mm.h>
42 #include <linux/seq_file.h> /* for seq_printf */
43 #include <linux/slab.h>
44 #include <linux/dma-mapping.h>
45
46 #include <linux/atomic.h>
47
48 #include <drm/ttm/ttm_bo_driver.h>
49 #include <drm/ttm/ttm_page_alloc.h>
50 #include <drm/ttm/ttm_set_memory.h>
51
52 #define NUM_PAGES_TO_ALLOC              (PAGE_SIZE/sizeof(struct page *))
53 #define SMALL_ALLOCATION                16
54 #define FREE_ALL_PAGES                  (~0U)
55 /* times are in msecs */
56 #define PAGE_FREE_INTERVAL              1000
57
58 /**
59  * struct ttm_page_pool - Pool to reuse recently allocated uc/wc pages.
60  *
61  * @lock: Protects the shared pool from concurrnet access. Must be used with
62  * irqsave/irqrestore variants because pool allocator maybe called from
63  * delayed work.
64  * @fill_lock: Prevent concurrent calls to fill.
65  * @list: Pool of free uc/wc pages for fast reuse.
66  * @gfp_flags: Flags to pass for alloc_page.
67  * @npages: Number of pages in pool.
68  */
69 struct ttm_page_pool {
70         spinlock_t              lock;
71         bool                    fill_lock;
72         struct list_head        list;
73         gfp_t                   gfp_flags;
74         unsigned                npages;
75         char                    *name;
76         unsigned long           nfrees;
77         unsigned long           nrefills;
78         unsigned int            order;
79 };
80
81 /**
82  * Limits for the pool. They are handled without locks because only place where
83  * they may change is in sysfs store. They won't have immediate effect anyway
84  * so forcing serialization to access them is pointless.
85  */
86
87 struct ttm_pool_opts {
88         unsigned        alloc_size;
89         unsigned        max_size;
90         unsigned        small;
91 };
92
93 #define NUM_POOLS 6
94
95 /**
96  * struct ttm_pool_manager - Holds memory pools for fst allocation
97  *
98  * Manager is read only object for pool code so it doesn't need locking.
99  *
100  * @free_interval: minimum number of jiffies between freeing pages from pool.
101  * @page_alloc_inited: reference counting for pool allocation.
102  * @work: Work that is used to shrink the pool. Work is only run when there is
103  * some pages to free.
104  * @small_allocation: Limit in number of pages what is small allocation.
105  *
106  * @pools: All pool objects in use.
107  **/
108 struct ttm_pool_manager {
109         struct kobject          kobj;
110         struct shrinker         mm_shrink;
111         struct ttm_pool_opts    options;
112
113         union {
114                 struct ttm_page_pool    pools[NUM_POOLS];
115                 struct {
116                         struct ttm_page_pool    wc_pool;
117                         struct ttm_page_pool    uc_pool;
118                         struct ttm_page_pool    wc_pool_dma32;
119                         struct ttm_page_pool    uc_pool_dma32;
120                         struct ttm_page_pool    wc_pool_huge;
121                         struct ttm_page_pool    uc_pool_huge;
122                 } ;
123         };
124 };
125
126 static struct attribute ttm_page_pool_max = {
127         .name = "pool_max_size",
128         .mode = S_IRUGO | S_IWUSR
129 };
130 static struct attribute ttm_page_pool_small = {
131         .name = "pool_small_allocation",
132         .mode = S_IRUGO | S_IWUSR
133 };
134 static struct attribute ttm_page_pool_alloc_size = {
135         .name = "pool_allocation_size",
136         .mode = S_IRUGO | S_IWUSR
137 };
138
139 static struct attribute *ttm_pool_attrs[] = {
140         &ttm_page_pool_max,
141         &ttm_page_pool_small,
142         &ttm_page_pool_alloc_size,
143         NULL
144 };
145
146 static void ttm_pool_kobj_release(struct kobject *kobj)
147 {
148         struct ttm_pool_manager *m =
149                 container_of(kobj, struct ttm_pool_manager, kobj);
150         kfree(m);
151 }
152
153 static ssize_t ttm_pool_store(struct kobject *kobj,
154                 struct attribute *attr, const char *buffer, size_t size)
155 {
156         struct ttm_pool_manager *m =
157                 container_of(kobj, struct ttm_pool_manager, kobj);
158         int chars;
159         unsigned val;
160         chars = sscanf(buffer, "%u", &val);
161         if (chars == 0)
162                 return size;
163
164         /* Convert kb to number of pages */
165         val = val / (PAGE_SIZE >> 10);
166
167         if (attr == &ttm_page_pool_max)
168                 m->options.max_size = val;
169         else if (attr == &ttm_page_pool_small)
170                 m->options.small = val;
171         else if (attr == &ttm_page_pool_alloc_size) {
172                 if (val > NUM_PAGES_TO_ALLOC*8) {
173                         pr_err("Setting allocation size to %lu is not allowed. Recommended size is %lu\n",
174                                NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7),
175                                NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
176                         return size;
177                 } else if (val > NUM_PAGES_TO_ALLOC) {
178                         pr_warn("Setting allocation size to larger than %lu is not recommended\n",
179                                 NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
180                 }
181                 m->options.alloc_size = val;
182         }
183
184         return size;
185 }
186
187 static ssize_t ttm_pool_show(struct kobject *kobj,
188                 struct attribute *attr, char *buffer)
189 {
190         struct ttm_pool_manager *m =
191                 container_of(kobj, struct ttm_pool_manager, kobj);
192         unsigned val = 0;
193
194         if (attr == &ttm_page_pool_max)
195                 val = m->options.max_size;
196         else if (attr == &ttm_page_pool_small)
197                 val = m->options.small;
198         else if (attr == &ttm_page_pool_alloc_size)
199                 val = m->options.alloc_size;
200
201         val = val * (PAGE_SIZE >> 10);
202
203         return snprintf(buffer, PAGE_SIZE, "%u\n", val);
204 }
205
206 static const struct sysfs_ops ttm_pool_sysfs_ops = {
207         .show = &ttm_pool_show,
208         .store = &ttm_pool_store,
209 };
210
211 static struct kobj_type ttm_pool_kobj_type = {
212         .release = &ttm_pool_kobj_release,
213         .sysfs_ops = &ttm_pool_sysfs_ops,
214         .default_attrs = ttm_pool_attrs,
215 };
216
217 static struct ttm_pool_manager *_manager;
218
219 /**
220  * Select the right pool or requested caching state and ttm flags. */
221 static struct ttm_page_pool *ttm_get_pool(int flags, bool huge,
222                                           enum ttm_caching_state cstate)
223 {
224         int pool_index;
225
226         if (cstate == tt_cached)
227                 return NULL;
228
229         if (cstate == tt_wc)
230                 pool_index = 0x0;
231         else
232                 pool_index = 0x1;
233
234         if (flags & TTM_PAGE_FLAG_DMA32) {
235                 if (huge)
236                         return NULL;
237                 pool_index |= 0x2;
238
239         } else if (huge) {
240                 pool_index |= 0x4;
241         }
242
243         return &_manager->pools[pool_index];
244 }
245
246 /* set memory back to wb and free the pages. */
247 static void ttm_pages_put(struct page *pages[], unsigned npages,
248                 unsigned int order)
249 {
250         unsigned int i, pages_nr = (1 << order);
251
252         if (order == 0) {
253                 if (ttm_set_pages_array_wb(pages, npages))
254                         pr_err("Failed to set %d pages to wb!\n", npages);
255         }
256
257         for (i = 0; i < npages; ++i) {
258                 if (order > 0) {
259                         if (ttm_set_pages_wb(pages[i], pages_nr))
260                                 pr_err("Failed to set %d pages to wb!\n", pages_nr);
261                 }
262                 __free_pages(pages[i], order);
263         }
264 }
265
266 static void ttm_pool_update_free_locked(struct ttm_page_pool *pool,
267                 unsigned freed_pages)
268 {
269         pool->npages -= freed_pages;
270         pool->nfrees += freed_pages;
271 }
272
273 /**
274  * Free pages from pool.
275  *
276  * To prevent hogging the ttm_swap process we only free NUM_PAGES_TO_ALLOC
277  * number of pages in one go.
278  *
279  * @pool: to free the pages from
280  * @free_all: If set to true will free all pages in pool
281  * @use_static: Safe to use static buffer
282  **/
283 static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free,
284                               bool use_static)
285 {
286         static struct page *static_buf[NUM_PAGES_TO_ALLOC];
287         unsigned long irq_flags;
288         struct page *p;
289         struct page **pages_to_free;
290         unsigned freed_pages = 0,
291                  npages_to_free = nr_free;
292
293         if (NUM_PAGES_TO_ALLOC < nr_free)
294                 npages_to_free = NUM_PAGES_TO_ALLOC;
295
296         if (use_static)
297                 pages_to_free = static_buf;
298         else
299                 pages_to_free = kmalloc_array(npages_to_free,
300                                               sizeof(struct page *),
301                                               GFP_KERNEL);
302         if (!pages_to_free) {
303                 pr_debug("Failed to allocate memory for pool free operation\n");
304                 return 0;
305         }
306
307 restart:
308         spin_lock_irqsave(&pool->lock, irq_flags);
309
310         list_for_each_entry_reverse(p, &pool->list, lru) {
311                 if (freed_pages >= npages_to_free)
312                         break;
313
314                 pages_to_free[freed_pages++] = p;
315                 /* We can only remove NUM_PAGES_TO_ALLOC at a time. */
316                 if (freed_pages >= NUM_PAGES_TO_ALLOC) {
317                         /* remove range of pages from the pool */
318                         __list_del(p->lru.prev, &pool->list);
319
320                         ttm_pool_update_free_locked(pool, freed_pages);
321                         /**
322                          * Because changing page caching is costly
323                          * we unlock the pool to prevent stalling.
324                          */
325                         spin_unlock_irqrestore(&pool->lock, irq_flags);
326
327                         ttm_pages_put(pages_to_free, freed_pages, pool->order);
328                         if (likely(nr_free != FREE_ALL_PAGES))
329                                 nr_free -= freed_pages;
330
331                         if (NUM_PAGES_TO_ALLOC >= nr_free)
332                                 npages_to_free = nr_free;
333                         else
334                                 npages_to_free = NUM_PAGES_TO_ALLOC;
335
336                         freed_pages = 0;
337
338                         /* free all so restart the processing */
339                         if (nr_free)
340                                 goto restart;
341
342                         /* Not allowed to fall through or break because
343                          * following context is inside spinlock while we are
344                          * outside here.
345                          */
346                         goto out;
347
348                 }
349         }
350
351         /* remove range of pages from the pool */
352         if (freed_pages) {
353                 __list_del(&p->lru, &pool->list);
354
355                 ttm_pool_update_free_locked(pool, freed_pages);
356                 nr_free -= freed_pages;
357         }
358
359         spin_unlock_irqrestore(&pool->lock, irq_flags);
360
361         if (freed_pages)
362                 ttm_pages_put(pages_to_free, freed_pages, pool->order);
363 out:
364         if (pages_to_free != static_buf)
365                 kfree(pages_to_free);
366         return nr_free;
367 }
368
369 /**
370  * Callback for mm to request pool to reduce number of page held.
371  *
372  * XXX: (dchinner) Deadlock warning!
373  *
374  * This code is crying out for a shrinker per pool....
375  */
376 static unsigned long
377 ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
378 {
379         static DEFINE_MUTEX(lock);
380         static unsigned start_pool;
381         unsigned i;
382         unsigned pool_offset;
383         struct ttm_page_pool *pool;
384         int shrink_pages = sc->nr_to_scan;
385         unsigned long freed = 0;
386         unsigned int nr_free_pool;
387
388         if (!mutex_trylock(&lock))
389                 return SHRINK_STOP;
390         pool_offset = ++start_pool % NUM_POOLS;
391         /* select start pool in round robin fashion */
392         for (i = 0; i < NUM_POOLS; ++i) {
393                 unsigned nr_free = shrink_pages;
394                 unsigned page_nr;
395
396                 if (shrink_pages == 0)
397                         break;
398
399                 pool = &_manager->pools[(i + pool_offset)%NUM_POOLS];
400                 page_nr = (1 << pool->order);
401                 /* OK to use static buffer since global mutex is held. */
402                 nr_free_pool = roundup(nr_free, page_nr) >> pool->order;
403                 shrink_pages = ttm_page_pool_free(pool, nr_free_pool, true);
404                 freed += (nr_free_pool - shrink_pages) << pool->order;
405                 if (freed >= sc->nr_to_scan)
406                         break;
407                 shrink_pages <<= pool->order;
408         }
409         mutex_unlock(&lock);
410         return freed;
411 }
412
413
414 static unsigned long
415 ttm_pool_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
416 {
417         unsigned i;
418         unsigned long count = 0;
419         struct ttm_page_pool *pool;
420
421         for (i = 0; i < NUM_POOLS; ++i) {
422                 pool = &_manager->pools[i];
423                 count += (pool->npages << pool->order);
424         }
425
426         return count;
427 }
428
429 static int ttm_pool_mm_shrink_init(struct ttm_pool_manager *manager)
430 {
431         manager->mm_shrink.count_objects = ttm_pool_shrink_count;
432         manager->mm_shrink.scan_objects = ttm_pool_shrink_scan;
433         manager->mm_shrink.seeks = 1;
434         return register_shrinker(&manager->mm_shrink);
435 }
436
437 static void ttm_pool_mm_shrink_fini(struct ttm_pool_manager *manager)
438 {
439         unregister_shrinker(&manager->mm_shrink);
440 }
441
442 static int ttm_set_pages_caching(struct page **pages,
443                 enum ttm_caching_state cstate, unsigned cpages)
444 {
445         int r = 0;
446         /* Set page caching */
447         switch (cstate) {
448         case tt_uncached:
449                 r = ttm_set_pages_array_uc(pages, cpages);
450                 if (r)
451                         pr_err("Failed to set %d pages to uc!\n", cpages);
452                 break;
453         case tt_wc:
454                 r = ttm_set_pages_array_wc(pages, cpages);
455                 if (r)
456                         pr_err("Failed to set %d pages to wc!\n", cpages);
457                 break;
458         default:
459                 break;
460         }
461         return r;
462 }
463
464 /**
465  * Free pages the pages that failed to change the caching state. If there is
466  * any pages that have changed their caching state already put them to the
467  * pool.
468  */
469 static void ttm_handle_caching_state_failure(struct list_head *pages,
470                 int ttm_flags, enum ttm_caching_state cstate,
471                 struct page **failed_pages, unsigned cpages)
472 {
473         unsigned i;
474         /* Failed pages have to be freed */
475         for (i = 0; i < cpages; ++i) {
476                 list_del(&failed_pages[i]->lru);
477                 __free_page(failed_pages[i]);
478         }
479 }
480
481 /**
482  * Allocate new pages with correct caching.
483  *
484  * This function is reentrant if caller updates count depending on number of
485  * pages returned in pages array.
486  */
487 static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags,
488                                int ttm_flags, enum ttm_caching_state cstate,
489                                unsigned count, unsigned order)
490 {
491         struct page **caching_array;
492         struct page *p;
493         int r = 0;
494         unsigned i, j, cpages;
495         unsigned npages = 1 << order;
496         unsigned max_cpages = min(count << order, (unsigned)NUM_PAGES_TO_ALLOC);
497
498         /* allocate array for page caching change */
499         caching_array = kmalloc_array(max_cpages, sizeof(struct page *),
500                                       GFP_KERNEL);
501
502         if (!caching_array) {
503                 pr_debug("Unable to allocate table for new pages\n");
504                 return -ENOMEM;
505         }
506
507         for (i = 0, cpages = 0; i < count; ++i) {
508                 p = alloc_pages(gfp_flags, order);
509
510                 if (!p) {
511                         pr_debug("Unable to get page %u\n", i);
512
513                         /* store already allocated pages in the pool after
514                          * setting the caching state */
515                         if (cpages) {
516                                 r = ttm_set_pages_caching(caching_array,
517                                                           cstate, cpages);
518                                 if (r)
519                                         ttm_handle_caching_state_failure(pages,
520                                                 ttm_flags, cstate,
521                                                 caching_array, cpages);
522                         }
523                         r = -ENOMEM;
524                         goto out;
525                 }
526
527                 list_add(&p->lru, pages);
528
529 #ifdef CONFIG_HIGHMEM
530                 /* gfp flags of highmem page should never be dma32 so we
531                  * we should be fine in such case
532                  */
533                 if (PageHighMem(p))
534                         continue;
535
536 #endif
537                 for (j = 0; j < npages; ++j) {
538                         caching_array[cpages++] = p++;
539                         if (cpages == max_cpages) {
540
541                                 r = ttm_set_pages_caching(caching_array,
542                                                 cstate, cpages);
543                                 if (r) {
544                                         ttm_handle_caching_state_failure(pages,
545                                                 ttm_flags, cstate,
546                                                 caching_array, cpages);
547                                         goto out;
548                                 }
549                                 cpages = 0;
550                         }
551                 }
552         }
553
554         if (cpages) {
555                 r = ttm_set_pages_caching(caching_array, cstate, cpages);
556                 if (r)
557                         ttm_handle_caching_state_failure(pages,
558                                         ttm_flags, cstate,
559                                         caching_array, cpages);
560         }
561 out:
562         kfree(caching_array);
563
564         return r;
565 }
566
567 /**
568  * Fill the given pool if there aren't enough pages and the requested number of
569  * pages is small.
570  */
571 static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool, int ttm_flags,
572                                       enum ttm_caching_state cstate,
573                                       unsigned count, unsigned long *irq_flags)
574 {
575         struct page *p;
576         int r;
577         unsigned cpages = 0;
578         /**
579          * Only allow one pool fill operation at a time.
580          * If pool doesn't have enough pages for the allocation new pages are
581          * allocated from outside of pool.
582          */
583         if (pool->fill_lock)
584                 return;
585
586         pool->fill_lock = true;
587
588         /* If allocation request is small and there are not enough
589          * pages in a pool we fill the pool up first. */
590         if (count < _manager->options.small
591                 && count > pool->npages) {
592                 struct list_head new_pages;
593                 unsigned alloc_size = _manager->options.alloc_size;
594
595                 /**
596                  * Can't change page caching if in irqsave context. We have to
597                  * drop the pool->lock.
598                  */
599                 spin_unlock_irqrestore(&pool->lock, *irq_flags);
600
601                 INIT_LIST_HEAD(&new_pages);
602                 r = ttm_alloc_new_pages(&new_pages, pool->gfp_flags, ttm_flags,
603                                         cstate, alloc_size, 0);
604                 spin_lock_irqsave(&pool->lock, *irq_flags);
605
606                 if (!r) {
607                         list_splice(&new_pages, &pool->list);
608                         ++pool->nrefills;
609                         pool->npages += alloc_size;
610                 } else {
611                         pr_debug("Failed to fill pool (%p)\n", pool);
612                         /* If we have any pages left put them to the pool. */
613                         list_for_each_entry(p, &new_pages, lru) {
614                                 ++cpages;
615                         }
616                         list_splice(&new_pages, &pool->list);
617                         pool->npages += cpages;
618                 }
619
620         }
621         pool->fill_lock = false;
622 }
623
624 /**
625  * Allocate pages from the pool and put them on the return list.
626  *
627  * @return zero for success or negative error code.
628  */
629 static int ttm_page_pool_get_pages(struct ttm_page_pool *pool,
630                                    struct list_head *pages,
631                                    int ttm_flags,
632                                    enum ttm_caching_state cstate,
633                                    unsigned count, unsigned order)
634 {
635         unsigned long irq_flags;
636         struct list_head *p;
637         unsigned i;
638         int r = 0;
639
640         spin_lock_irqsave(&pool->lock, irq_flags);
641         if (!order)
642                 ttm_page_pool_fill_locked(pool, ttm_flags, cstate, count,
643                                           &irq_flags);
644
645         if (count >= pool->npages) {
646                 /* take all pages from the pool */
647                 list_splice_init(&pool->list, pages);
648                 count -= pool->npages;
649                 pool->npages = 0;
650                 goto out;
651         }
652         /* find the last pages to include for requested number of pages. Split
653          * pool to begin and halve it to reduce search space. */
654         if (count <= pool->npages/2) {
655                 i = 0;
656                 list_for_each(p, &pool->list) {
657                         if (++i == count)
658                                 break;
659                 }
660         } else {
661                 i = pool->npages + 1;
662                 list_for_each_prev(p, &pool->list) {
663                         if (--i == count)
664                                 break;
665                 }
666         }
667         /* Cut 'count' number of pages from the pool */
668         list_cut_position(pages, &pool->list, p);
669         pool->npages -= count;
670         count = 0;
671 out:
672         spin_unlock_irqrestore(&pool->lock, irq_flags);
673
674         /* clear the pages coming from the pool if requested */
675         if (ttm_flags & TTM_PAGE_FLAG_ZERO_ALLOC) {
676                 struct page *page;
677
678                 list_for_each_entry(page, pages, lru) {
679                         if (PageHighMem(page))
680                                 clear_highpage(page);
681                         else
682                                 clear_page(page_address(page));
683                 }
684         }
685
686         /* If pool didn't have enough pages allocate new one. */
687         if (count) {
688                 gfp_t gfp_flags = pool->gfp_flags;
689
690                 /* set zero flag for page allocation if required */
691                 if (ttm_flags & TTM_PAGE_FLAG_ZERO_ALLOC)
692                         gfp_flags |= __GFP_ZERO;
693
694                 if (ttm_flags & TTM_PAGE_FLAG_NO_RETRY)
695                         gfp_flags |= __GFP_RETRY_MAYFAIL;
696
697                 /* ttm_alloc_new_pages doesn't reference pool so we can run
698                  * multiple requests in parallel.
699                  **/
700                 r = ttm_alloc_new_pages(pages, gfp_flags, ttm_flags, cstate,
701                                         count, order);
702         }
703
704         return r;
705 }
706
707 /* Put all pages in pages list to correct pool to wait for reuse */
708 static void ttm_put_pages(struct page **pages, unsigned npages, int flags,
709                           enum ttm_caching_state cstate)
710 {
711         struct ttm_page_pool *pool = ttm_get_pool(flags, false, cstate);
712 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
713         struct ttm_page_pool *huge = ttm_get_pool(flags, true, cstate);
714 #endif
715         unsigned long irq_flags;
716         unsigned i;
717
718         if (pool == NULL) {
719                 /* No pool for this memory type so free the pages */
720                 i = 0;
721                 while (i < npages) {
722 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
723                         struct page *p = pages[i];
724 #endif
725                         unsigned order = 0, j;
726
727                         if (!pages[i]) {
728                                 ++i;
729                                 continue;
730                         }
731
732 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
733                         if (!(flags & TTM_PAGE_FLAG_DMA32)) {
734                                 for (j = 0; j < HPAGE_PMD_NR; ++j)
735                                         if (p++ != pages[i + j])
736                                             break;
737
738                                 if (j == HPAGE_PMD_NR)
739                                         order = HPAGE_PMD_ORDER;
740                         }
741 #endif
742
743                         if (page_count(pages[i]) != 1)
744                                 pr_err("Erroneous page count. Leaking pages.\n");
745                         __free_pages(pages[i], order);
746
747                         j = 1 << order;
748                         while (j) {
749                                 pages[i++] = NULL;
750                                 --j;
751                         }
752                 }
753                 return;
754         }
755
756         i = 0;
757 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
758         if (huge) {
759                 unsigned max_size, n2free;
760
761                 spin_lock_irqsave(&huge->lock, irq_flags);
762                 while (i < npages) {
763                         struct page *p = pages[i];
764                         unsigned j;
765
766                         if (!p)
767                                 break;
768
769                         for (j = 0; j < HPAGE_PMD_NR; ++j)
770                                 if (p++ != pages[i + j])
771                                     break;
772
773                         if (j != HPAGE_PMD_NR)
774                                 break;
775
776                         list_add_tail(&pages[i]->lru, &huge->list);
777
778                         for (j = 0; j < HPAGE_PMD_NR; ++j)
779                                 pages[i++] = NULL;
780                         huge->npages++;
781                 }
782
783                 /* Check that we don't go over the pool limit */
784                 max_size = _manager->options.max_size;
785                 max_size /= HPAGE_PMD_NR;
786                 if (huge->npages > max_size)
787                         n2free = huge->npages - max_size;
788                 else
789                         n2free = 0;
790                 spin_unlock_irqrestore(&huge->lock, irq_flags);
791                 if (n2free)
792                         ttm_page_pool_free(huge, n2free, false);
793         }
794 #endif
795
796         spin_lock_irqsave(&pool->lock, irq_flags);
797         while (i < npages) {
798                 if (pages[i]) {
799                         if (page_count(pages[i]) != 1)
800                                 pr_err("Erroneous page count. Leaking pages.\n");
801                         list_add_tail(&pages[i]->lru, &pool->list);
802                         pages[i] = NULL;
803                         pool->npages++;
804                 }
805                 ++i;
806         }
807         /* Check that we don't go over the pool limit */
808         npages = 0;
809         if (pool->npages > _manager->options.max_size) {
810                 npages = pool->npages - _manager->options.max_size;
811                 /* free at least NUM_PAGES_TO_ALLOC number of pages
812                  * to reduce calls to set_memory_wb */
813                 if (npages < NUM_PAGES_TO_ALLOC)
814                         npages = NUM_PAGES_TO_ALLOC;
815         }
816         spin_unlock_irqrestore(&pool->lock, irq_flags);
817         if (npages)
818                 ttm_page_pool_free(pool, npages, false);
819 }
820
821 /*
822  * On success pages list will hold count number of correctly
823  * cached pages.
824  */
825 static int ttm_get_pages(struct page **pages, unsigned npages, int flags,
826                          enum ttm_caching_state cstate)
827 {
828         struct ttm_page_pool *pool = ttm_get_pool(flags, false, cstate);
829 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
830         struct ttm_page_pool *huge = ttm_get_pool(flags, true, cstate);
831 #endif
832         struct list_head plist;
833         struct page *p = NULL;
834         unsigned count, first;
835         int r;
836
837         /* No pool for cached pages */
838         if (pool == NULL) {
839                 gfp_t gfp_flags = GFP_USER;
840                 unsigned i;
841 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
842                 unsigned j;
843 #endif
844
845                 /* set zero flag for page allocation if required */
846                 if (flags & TTM_PAGE_FLAG_ZERO_ALLOC)
847                         gfp_flags |= __GFP_ZERO;
848
849                 if (flags & TTM_PAGE_FLAG_NO_RETRY)
850                         gfp_flags |= __GFP_RETRY_MAYFAIL;
851
852                 if (flags & TTM_PAGE_FLAG_DMA32)
853                         gfp_flags |= GFP_DMA32;
854                 else
855                         gfp_flags |= GFP_HIGHUSER;
856
857                 i = 0;
858 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
859                 if (!(gfp_flags & GFP_DMA32)) {
860                         while (npages >= HPAGE_PMD_NR) {
861                                 gfp_t huge_flags = gfp_flags;
862
863                                 huge_flags |= GFP_TRANSHUGE_LIGHT | __GFP_NORETRY |
864                                         __GFP_KSWAPD_RECLAIM;
865                                 huge_flags &= ~__GFP_MOVABLE;
866                                 huge_flags &= ~__GFP_COMP;
867                                 p = alloc_pages(huge_flags, HPAGE_PMD_ORDER);
868                                 if (!p)
869                                         break;
870
871                                 for (j = 0; j < HPAGE_PMD_NR; ++j)
872                                         pages[i++] = p++;
873
874                                 npages -= HPAGE_PMD_NR;
875                         }
876                 }
877 #endif
878
879                 first = i;
880                 while (npages) {
881                         p = alloc_page(gfp_flags);
882                         if (!p) {
883                                 pr_debug("Unable to allocate page\n");
884                                 return -ENOMEM;
885                         }
886
887                         /* Swap the pages if we detect consecutive order */
888                         if (i > first && pages[i - 1] == p - 1)
889                                 swap(p, pages[i - 1]);
890
891                         pages[i++] = p;
892                         --npages;
893                 }
894                 return 0;
895         }
896
897         count = 0;
898
899 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
900         if (huge && npages >= HPAGE_PMD_NR) {
901                 INIT_LIST_HEAD(&plist);
902                 ttm_page_pool_get_pages(huge, &plist, flags, cstate,
903                                         npages / HPAGE_PMD_NR,
904                                         HPAGE_PMD_ORDER);
905
906                 list_for_each_entry(p, &plist, lru) {
907                         unsigned j;
908
909                         for (j = 0; j < HPAGE_PMD_NR; ++j)
910                                 pages[count++] = &p[j];
911                 }
912         }
913 #endif
914
915         INIT_LIST_HEAD(&plist);
916         r = ttm_page_pool_get_pages(pool, &plist, flags, cstate,
917                                     npages - count, 0);
918
919         first = count;
920         list_for_each_entry(p, &plist, lru) {
921                 struct page *tmp = p;
922
923                 /* Swap the pages if we detect consecutive order */
924                 if (count > first && pages[count - 1] == tmp - 1)
925                         swap(tmp, pages[count - 1]);
926                 pages[count++] = tmp;
927         }
928
929         if (r) {
930                 /* If there is any pages in the list put them back to
931                  * the pool.
932                  */
933                 pr_debug("Failed to allocate extra pages for large request\n");
934                 ttm_put_pages(pages, count, flags, cstate);
935                 return r;
936         }
937
938         return 0;
939 }
940
941 static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, gfp_t flags,
942                 char *name, unsigned int order)
943 {
944         spin_lock_init(&pool->lock);
945         pool->fill_lock = false;
946         INIT_LIST_HEAD(&pool->list);
947         pool->npages = pool->nfrees = 0;
948         pool->gfp_flags = flags;
949         pool->name = name;
950         pool->order = order;
951 }
952
953 int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
954 {
955         int ret;
956 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
957         unsigned order = HPAGE_PMD_ORDER;
958 #else
959         unsigned order = 0;
960 #endif
961
962         WARN_ON(_manager);
963
964         pr_info("Initializing pool allocator\n");
965
966         _manager = kzalloc(sizeof(*_manager), GFP_KERNEL);
967         if (!_manager)
968                 return -ENOMEM;
969
970         ttm_page_pool_init_locked(&_manager->wc_pool, GFP_HIGHUSER, "wc", 0);
971
972         ttm_page_pool_init_locked(&_manager->uc_pool, GFP_HIGHUSER, "uc", 0);
973
974         ttm_page_pool_init_locked(&_manager->wc_pool_dma32,
975                                   GFP_USER | GFP_DMA32, "wc dma", 0);
976
977         ttm_page_pool_init_locked(&_manager->uc_pool_dma32,
978                                   GFP_USER | GFP_DMA32, "uc dma", 0);
979
980         ttm_page_pool_init_locked(&_manager->wc_pool_huge,
981                                   (GFP_TRANSHUGE_LIGHT | __GFP_NORETRY |
982                                    __GFP_KSWAPD_RECLAIM) &
983                                   ~(__GFP_MOVABLE | __GFP_COMP),
984                                   "wc huge", order);
985
986         ttm_page_pool_init_locked(&_manager->uc_pool_huge,
987                                   (GFP_TRANSHUGE_LIGHT | __GFP_NORETRY |
988                                    __GFP_KSWAPD_RECLAIM) &
989                                   ~(__GFP_MOVABLE | __GFP_COMP)
990                                   , "uc huge", order);
991
992         _manager->options.max_size = max_pages;
993         _manager->options.small = SMALL_ALLOCATION;
994         _manager->options.alloc_size = NUM_PAGES_TO_ALLOC;
995
996         ret = kobject_init_and_add(&_manager->kobj, &ttm_pool_kobj_type,
997                                    &glob->kobj, "pool");
998         if (unlikely(ret != 0))
999                 goto error;
1000
1001         ret = ttm_pool_mm_shrink_init(_manager);
1002         if (unlikely(ret != 0))
1003                 goto error;
1004         return 0;
1005
1006 error:
1007         kobject_put(&_manager->kobj);
1008         _manager = NULL;
1009         return ret;
1010 }
1011
1012 void ttm_page_alloc_fini(void)
1013 {
1014         int i;
1015
1016         pr_info("Finalizing pool allocator\n");
1017         ttm_pool_mm_shrink_fini(_manager);
1018
1019         /* OK to use static buffer since global mutex is no longer used. */
1020         for (i = 0; i < NUM_POOLS; ++i)
1021                 ttm_page_pool_free(&_manager->pools[i], FREE_ALL_PAGES, true);
1022
1023         kobject_put(&_manager->kobj);
1024         _manager = NULL;
1025 }
1026
1027 static void
1028 ttm_pool_unpopulate_helper(struct ttm_tt *ttm, unsigned mem_count_update)
1029 {
1030         struct ttm_mem_global *mem_glob = ttm->bdev->glob->mem_glob;
1031         unsigned i;
1032
1033         if (mem_count_update == 0)
1034                 goto put_pages;
1035
1036         for (i = 0; i < mem_count_update; ++i) {
1037                 if (!ttm->pages[i])
1038                         continue;
1039
1040                 ttm_mem_global_free_page(mem_glob, ttm->pages[i], PAGE_SIZE);
1041         }
1042
1043 put_pages:
1044         ttm_put_pages(ttm->pages, ttm->num_pages, ttm->page_flags,
1045                       ttm->caching_state);
1046         ttm->state = tt_unpopulated;
1047 }
1048
1049 int ttm_pool_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
1050 {
1051         struct ttm_mem_global *mem_glob = ttm->bdev->glob->mem_glob;
1052         unsigned i;
1053         int ret;
1054
1055         if (ttm->state != tt_unpopulated)
1056                 return 0;
1057
1058         if (ttm_check_under_lowerlimit(mem_glob, ttm->num_pages, ctx))
1059                 return -ENOMEM;
1060
1061         ret = ttm_get_pages(ttm->pages, ttm->num_pages, ttm->page_flags,
1062                             ttm->caching_state);
1063         if (unlikely(ret != 0)) {
1064                 ttm_pool_unpopulate_helper(ttm, 0);
1065                 return ret;
1066         }
1067
1068         for (i = 0; i < ttm->num_pages; ++i) {
1069                 ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
1070                                                 PAGE_SIZE, ctx);
1071                 if (unlikely(ret != 0)) {
1072                         ttm_pool_unpopulate_helper(ttm, i);
1073                         return -ENOMEM;
1074                 }
1075         }
1076
1077         if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
1078                 ret = ttm_tt_swapin(ttm);
1079                 if (unlikely(ret != 0)) {
1080                         ttm_pool_unpopulate(ttm);
1081                         return ret;
1082                 }
1083         }
1084
1085         ttm->state = tt_unbound;
1086         return 0;
1087 }
1088 EXPORT_SYMBOL(ttm_pool_populate);
1089
1090 void ttm_pool_unpopulate(struct ttm_tt *ttm)
1091 {
1092         ttm_pool_unpopulate_helper(ttm, ttm->num_pages);
1093 }
1094 EXPORT_SYMBOL(ttm_pool_unpopulate);
1095
1096 int ttm_populate_and_map_pages(struct device *dev, struct ttm_dma_tt *tt,
1097                                         struct ttm_operation_ctx *ctx)
1098 {
1099         unsigned i, j;
1100         int r;
1101
1102         r = ttm_pool_populate(&tt->ttm, ctx);
1103         if (r)
1104                 return r;
1105
1106         for (i = 0; i < tt->ttm.num_pages; ++i) {
1107                 struct page *p = tt->ttm.pages[i];
1108                 size_t num_pages = 1;
1109
1110                 for (j = i + 1; j < tt->ttm.num_pages; ++j) {
1111                         if (++p != tt->ttm.pages[j])
1112                                 break;
1113
1114                         ++num_pages;
1115                 }
1116
1117                 tt->dma_address[i] = dma_map_page(dev, tt->ttm.pages[i],
1118                                                   0, num_pages * PAGE_SIZE,
1119                                                   DMA_BIDIRECTIONAL);
1120                 if (dma_mapping_error(dev, tt->dma_address[i])) {
1121                         while (i--) {
1122                                 dma_unmap_page(dev, tt->dma_address[i],
1123                                                PAGE_SIZE, DMA_BIDIRECTIONAL);
1124                                 tt->dma_address[i] = 0;
1125                         }
1126                         ttm_pool_unpopulate(&tt->ttm);
1127                         return -EFAULT;
1128                 }
1129
1130                 for (j = 1; j < num_pages; ++j) {
1131                         tt->dma_address[i + 1] = tt->dma_address[i] + PAGE_SIZE;
1132                         ++i;
1133                 }
1134         }
1135         return 0;
1136 }
1137 EXPORT_SYMBOL(ttm_populate_and_map_pages);
1138
1139 void ttm_unmap_and_unpopulate_pages(struct device *dev, struct ttm_dma_tt *tt)
1140 {
1141         unsigned i, j;
1142
1143         for (i = 0; i < tt->ttm.num_pages;) {
1144                 struct page *p = tt->ttm.pages[i];
1145                 size_t num_pages = 1;
1146
1147                 if (!tt->dma_address[i] || !tt->ttm.pages[i]) {
1148                         ++i;
1149                         continue;
1150                 }
1151
1152                 for (j = i + 1; j < tt->ttm.num_pages; ++j) {
1153                         if (++p != tt->ttm.pages[j])
1154                                 break;
1155
1156                         ++num_pages;
1157                 }
1158
1159                 dma_unmap_page(dev, tt->dma_address[i], num_pages * PAGE_SIZE,
1160                                DMA_BIDIRECTIONAL);
1161
1162                 i += num_pages;
1163         }
1164         ttm_pool_unpopulate(&tt->ttm);
1165 }
1166 EXPORT_SYMBOL(ttm_unmap_and_unpopulate_pages);
1167
1168 int ttm_page_alloc_debugfs(struct seq_file *m, void *data)
1169 {
1170         struct ttm_page_pool *p;
1171         unsigned i;
1172         char *h[] = {"pool", "refills", "pages freed", "size"};
1173         if (!_manager) {
1174                 seq_printf(m, "No pool allocator running.\n");
1175                 return 0;
1176         }
1177         seq_printf(m, "%7s %12s %13s %8s\n",
1178                         h[0], h[1], h[2], h[3]);
1179         for (i = 0; i < NUM_POOLS; ++i) {
1180                 p = &_manager->pools[i];
1181
1182                 seq_printf(m, "%7s %12ld %13ld %8d\n",
1183                                 p->name, p->nrefills,
1184                                 p->nfrees, p->npages);
1185         }
1186         return 0;
1187 }
1188 EXPORT_SYMBOL(ttm_page_alloc_debugfs);