drm/ttm: Fix an invalid freeing on already freed page in error path
[linux-2.6-microblaze.git] / drivers / gpu / drm / ttm / ttm_pool.c
1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /*
3  * Copyright 2020 Advanced Micro Devices, Inc.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21  * OTHER DEALINGS IN THE SOFTWARE.
22  *
23  * Authors: Christian König
24  */
25
26 /* Pooling of allocated pages is necessary because changing the caching
27  * attributes on x86 of the linear mapping requires a costly cross CPU TLB
28  * invalidate for those addresses.
29  *
30  * Additional to that allocations from the DMA coherent API are pooled as well
31  * cause they are rather slow compared to alloc_pages+map.
32  */
33
34 #include <linux/module.h>
35 #include <linux/dma-mapping.h>
36 #include <linux/debugfs.h>
37 #include <linux/highmem.h>
38 #include <linux/sched/mm.h>
39
40 #ifdef CONFIG_X86
41 #include <asm/set_memory.h>
42 #endif
43
44 #include <drm/ttm/ttm_pool.h>
45 #include <drm/ttm/ttm_tt.h>
46 #include <drm/ttm/ttm_bo.h>
47
48 #include "ttm_module.h"
49
50 /**
51  * struct ttm_pool_dma - Helper object for coherent DMA mappings
52  *
53  * @addr: original DMA address returned for the mapping
54  * @vaddr: original vaddr return for the mapping and order in the lower bits
55  */
56 struct ttm_pool_dma {
57         dma_addr_t addr;
58         unsigned long vaddr;
59 };
60
61 static unsigned long page_pool_size;
62
63 MODULE_PARM_DESC(page_pool_size, "Number of pages in the WC/UC/DMA pool");
64 module_param(page_pool_size, ulong, 0644);
65
66 static atomic_long_t allocated_pages;
67
68 static struct ttm_pool_type global_write_combined[NR_PAGE_ORDERS];
69 static struct ttm_pool_type global_uncached[NR_PAGE_ORDERS];
70
71 static struct ttm_pool_type global_dma32_write_combined[NR_PAGE_ORDERS];
72 static struct ttm_pool_type global_dma32_uncached[NR_PAGE_ORDERS];
73
74 static spinlock_t shrinker_lock;
75 static struct list_head shrinker_list;
76 static struct shrinker *mm_shrinker;
77 static DECLARE_RWSEM(pool_shrink_rwsem);
78
79 /* Allocate pages of size 1 << order with the given gfp_flags */
80 static struct page *ttm_pool_alloc_page(struct ttm_pool *pool, gfp_t gfp_flags,
81                                         unsigned int order)
82 {
83         unsigned long attr = DMA_ATTR_FORCE_CONTIGUOUS;
84         struct ttm_pool_dma *dma;
85         struct page *p;
86         void *vaddr;
87
88         /* Don't set the __GFP_COMP flag for higher order allocations.
89          * Mapping pages directly into an userspace process and calling
90          * put_page() on a TTM allocated page is illegal.
91          */
92         if (order)
93                 gfp_flags |= __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN |
94                         __GFP_KSWAPD_RECLAIM;
95
96         if (!pool->use_dma_alloc) {
97                 p = alloc_pages_node(pool->nid, gfp_flags, order);
98                 if (p)
99                         p->private = order;
100                 return p;
101         }
102
103         dma = kmalloc(sizeof(*dma), GFP_KERNEL);
104         if (!dma)
105                 return NULL;
106
107         if (order)
108                 attr |= DMA_ATTR_NO_WARN;
109
110         vaddr = dma_alloc_attrs(pool->dev, (1ULL << order) * PAGE_SIZE,
111                                 &dma->addr, gfp_flags, attr);
112         if (!vaddr)
113                 goto error_free;
114
115         /* TODO: This is an illegal abuse of the DMA API, but we need to rework
116          * TTM page fault handling and extend the DMA API to clean this up.
117          */
118         if (is_vmalloc_addr(vaddr))
119                 p = vmalloc_to_page(vaddr);
120         else
121                 p = virt_to_page(vaddr);
122
123         dma->vaddr = (unsigned long)vaddr | order;
124         p->private = (unsigned long)dma;
125         return p;
126
127 error_free:
128         kfree(dma);
129         return NULL;
130 }
131
132 /* Reset the caching and pages of size 1 << order */
133 static void ttm_pool_free_page(struct ttm_pool *pool, enum ttm_caching caching,
134                                unsigned int order, struct page *p)
135 {
136         unsigned long attr = DMA_ATTR_FORCE_CONTIGUOUS;
137         struct ttm_pool_dma *dma;
138         void *vaddr;
139
140 #ifdef CONFIG_X86
141         /* We don't care that set_pages_wb is inefficient here. This is only
142          * used when we have to shrink and CPU overhead is irrelevant then.
143          */
144         if (caching != ttm_cached && !PageHighMem(p))
145                 set_pages_wb(p, 1 << order);
146 #endif
147
148         if (!pool || !pool->use_dma_alloc) {
149                 __free_pages(p, order);
150                 return;
151         }
152
153         if (order)
154                 attr |= DMA_ATTR_NO_WARN;
155
156         dma = (void *)p->private;
157         vaddr = (void *)(dma->vaddr & PAGE_MASK);
158         dma_free_attrs(pool->dev, (1UL << order) * PAGE_SIZE, vaddr, dma->addr,
159                        attr);
160         kfree(dma);
161 }
162
163 /* Apply a new caching to an array of pages */
164 static int ttm_pool_apply_caching(struct page **first, struct page **last,
165                                   enum ttm_caching caching)
166 {
167 #ifdef CONFIG_X86
168         unsigned int num_pages = last - first;
169
170         if (!num_pages)
171                 return 0;
172
173         switch (caching) {
174         case ttm_cached:
175                 break;
176         case ttm_write_combined:
177                 return set_pages_array_wc(first, num_pages);
178         case ttm_uncached:
179                 return set_pages_array_uc(first, num_pages);
180         }
181 #endif
182         return 0;
183 }
184
185 /* Map pages of 1 << order size and fill the DMA address array  */
186 static int ttm_pool_map(struct ttm_pool *pool, unsigned int order,
187                         struct page *p, dma_addr_t **dma_addr)
188 {
189         dma_addr_t addr;
190         unsigned int i;
191
192         if (pool->use_dma_alloc) {
193                 struct ttm_pool_dma *dma = (void *)p->private;
194
195                 addr = dma->addr;
196         } else {
197                 size_t size = (1ULL << order) * PAGE_SIZE;
198
199                 addr = dma_map_page(pool->dev, p, 0, size, DMA_BIDIRECTIONAL);
200                 if (dma_mapping_error(pool->dev, addr))
201                         return -EFAULT;
202         }
203
204         for (i = 1 << order; i ; --i) {
205                 *(*dma_addr)++ = addr;
206                 addr += PAGE_SIZE;
207         }
208
209         return 0;
210 }
211
212 /* Unmap pages of 1 << order size */
213 static void ttm_pool_unmap(struct ttm_pool *pool, dma_addr_t dma_addr,
214                            unsigned int num_pages)
215 {
216         /* Unmapped while freeing the page */
217         if (pool->use_dma_alloc)
218                 return;
219
220         dma_unmap_page(pool->dev, dma_addr, (long)num_pages << PAGE_SHIFT,
221                        DMA_BIDIRECTIONAL);
222 }
223
224 /* Give pages into a specific pool_type */
225 static void ttm_pool_type_give(struct ttm_pool_type *pt, struct page *p)
226 {
227         unsigned int i, num_pages = 1 << pt->order;
228
229         for (i = 0; i < num_pages; ++i) {
230                 if (PageHighMem(p))
231                         clear_highpage(p + i);
232                 else
233                         clear_page(page_address(p + i));
234         }
235
236         spin_lock(&pt->lock);
237         list_add(&p->lru, &pt->pages);
238         spin_unlock(&pt->lock);
239         atomic_long_add(1 << pt->order, &allocated_pages);
240 }
241
242 /* Take pages from a specific pool_type, return NULL when nothing available */
243 static struct page *ttm_pool_type_take(struct ttm_pool_type *pt)
244 {
245         struct page *p;
246
247         spin_lock(&pt->lock);
248         p = list_first_entry_or_null(&pt->pages, typeof(*p), lru);
249         if (p) {
250                 atomic_long_sub(1 << pt->order, &allocated_pages);
251                 list_del(&p->lru);
252         }
253         spin_unlock(&pt->lock);
254
255         return p;
256 }
257
258 /* Initialize and add a pool type to the global shrinker list */
259 static void ttm_pool_type_init(struct ttm_pool_type *pt, struct ttm_pool *pool,
260                                enum ttm_caching caching, unsigned int order)
261 {
262         pt->pool = pool;
263         pt->caching = caching;
264         pt->order = order;
265         spin_lock_init(&pt->lock);
266         INIT_LIST_HEAD(&pt->pages);
267
268         spin_lock(&shrinker_lock);
269         list_add_tail(&pt->shrinker_list, &shrinker_list);
270         spin_unlock(&shrinker_lock);
271 }
272
273 /* Remove a pool_type from the global shrinker list and free all pages */
274 static void ttm_pool_type_fini(struct ttm_pool_type *pt)
275 {
276         struct page *p;
277
278         spin_lock(&shrinker_lock);
279         list_del(&pt->shrinker_list);
280         spin_unlock(&shrinker_lock);
281
282         while ((p = ttm_pool_type_take(pt)))
283                 ttm_pool_free_page(pt->pool, pt->caching, pt->order, p);
284 }
285
286 /* Return the pool_type to use for the given caching and order */
287 static struct ttm_pool_type *ttm_pool_select_type(struct ttm_pool *pool,
288                                                   enum ttm_caching caching,
289                                                   unsigned int order)
290 {
291         if (pool->use_dma_alloc || pool->nid != NUMA_NO_NODE)
292                 return &pool->caching[caching].orders[order];
293
294 #ifdef CONFIG_X86
295         switch (caching) {
296         case ttm_write_combined:
297                 if (pool->use_dma32)
298                         return &global_dma32_write_combined[order];
299
300                 return &global_write_combined[order];
301         case ttm_uncached:
302                 if (pool->use_dma32)
303                         return &global_dma32_uncached[order];
304
305                 return &global_uncached[order];
306         default:
307                 break;
308         }
309 #endif
310
311         return NULL;
312 }
313
314 /* Free pages using the global shrinker list */
315 static unsigned int ttm_pool_shrink(void)
316 {
317         struct ttm_pool_type *pt;
318         unsigned int num_pages;
319         struct page *p;
320
321         down_read(&pool_shrink_rwsem);
322         spin_lock(&shrinker_lock);
323         pt = list_first_entry(&shrinker_list, typeof(*pt), shrinker_list);
324         list_move_tail(&pt->shrinker_list, &shrinker_list);
325         spin_unlock(&shrinker_lock);
326
327         p = ttm_pool_type_take(pt);
328         if (p) {
329                 ttm_pool_free_page(pt->pool, pt->caching, pt->order, p);
330                 num_pages = 1 << pt->order;
331         } else {
332                 num_pages = 0;
333         }
334         up_read(&pool_shrink_rwsem);
335
336         return num_pages;
337 }
338
339 /* Return the allocation order based for a page */
340 static unsigned int ttm_pool_page_order(struct ttm_pool *pool, struct page *p)
341 {
342         if (pool->use_dma_alloc) {
343                 struct ttm_pool_dma *dma = (void *)p->private;
344
345                 return dma->vaddr & ~PAGE_MASK;
346         }
347
348         return p->private;
349 }
350
351 /* Called when we got a page, either from a pool or newly allocated */
352 static int ttm_pool_page_allocated(struct ttm_pool *pool, unsigned int order,
353                                    struct page *p, dma_addr_t **dma_addr,
354                                    unsigned long *num_pages,
355                                    struct page ***pages)
356 {
357         unsigned int i;
358         int r;
359
360         if (*dma_addr) {
361                 r = ttm_pool_map(pool, order, p, dma_addr);
362                 if (r)
363                         return r;
364         }
365
366         *num_pages -= 1 << order;
367         for (i = 1 << order; i; --i, ++(*pages), ++p)
368                 **pages = p;
369
370         return 0;
371 }
372
373 /**
374  * ttm_pool_free_range() - Free a range of TTM pages
375  * @pool: The pool used for allocating.
376  * @tt: The struct ttm_tt holding the page pointers.
377  * @caching: The page caching mode used by the range.
378  * @start_page: index for first page to free.
379  * @end_page: index for last page to free + 1.
380  *
381  * During allocation the ttm_tt page-vector may be populated with ranges of
382  * pages with different attributes if allocation hit an error without being
383  * able to completely fulfill the allocation. This function can be used
384  * to free these individual ranges.
385  */
386 static void ttm_pool_free_range(struct ttm_pool *pool, struct ttm_tt *tt,
387                                 enum ttm_caching caching,
388                                 pgoff_t start_page, pgoff_t end_page)
389 {
390         struct page **pages = &tt->pages[start_page];
391         unsigned int order;
392         pgoff_t i, nr;
393
394         for (i = start_page; i < end_page; i += nr, pages += nr) {
395                 struct ttm_pool_type *pt = NULL;
396
397                 order = ttm_pool_page_order(pool, *pages);
398                 nr = (1UL << order);
399                 if (tt->dma_address)
400                         ttm_pool_unmap(pool, tt->dma_address[i], nr);
401
402                 pt = ttm_pool_select_type(pool, caching, order);
403                 if (pt)
404                         ttm_pool_type_give(pt, *pages);
405                 else
406                         ttm_pool_free_page(pool, caching, order, *pages);
407         }
408 }
409
410 /**
411  * ttm_pool_alloc - Fill a ttm_tt object
412  *
413  * @pool: ttm_pool to use
414  * @tt: ttm_tt object to fill
415  * @ctx: operation context
416  *
417  * Fill the ttm_tt object with pages and also make sure to DMA map them when
418  * necessary.
419  *
420  * Returns: 0 on successe, negative error code otherwise.
421  */
422 int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt,
423                    struct ttm_operation_ctx *ctx)
424 {
425         pgoff_t num_pages = tt->num_pages;
426         dma_addr_t *dma_addr = tt->dma_address;
427         struct page **caching = tt->pages;
428         struct page **pages = tt->pages;
429         enum ttm_caching page_caching;
430         gfp_t gfp_flags = GFP_USER;
431         pgoff_t caching_divide;
432         unsigned int order;
433         struct page *p;
434         int r;
435
436         WARN_ON(!num_pages || ttm_tt_is_populated(tt));
437         WARN_ON(dma_addr && !pool->dev);
438
439         if (tt->page_flags & TTM_TT_FLAG_ZERO_ALLOC)
440                 gfp_flags |= __GFP_ZERO;
441
442         if (ctx->gfp_retry_mayfail)
443                 gfp_flags |= __GFP_RETRY_MAYFAIL;
444
445         if (pool->use_dma32)
446                 gfp_flags |= GFP_DMA32;
447         else
448                 gfp_flags |= GFP_HIGHUSER;
449
450         for (order = min_t(unsigned int, MAX_PAGE_ORDER, __fls(num_pages));
451              num_pages;
452              order = min_t(unsigned int, order, __fls(num_pages))) {
453                 struct ttm_pool_type *pt;
454
455                 page_caching = tt->caching;
456                 pt = ttm_pool_select_type(pool, tt->caching, order);
457                 p = pt ? ttm_pool_type_take(pt) : NULL;
458                 if (p) {
459                         r = ttm_pool_apply_caching(caching, pages,
460                                                    tt->caching);
461                         if (r)
462                                 goto error_free_page;
463
464                         caching = pages;
465                         do {
466                                 r = ttm_pool_page_allocated(pool, order, p,
467                                                             &dma_addr,
468                                                             &num_pages,
469                                                             &pages);
470                                 if (r)
471                                         goto error_free_page;
472
473                                 caching = pages;
474                                 if (num_pages < (1 << order))
475                                         break;
476
477                                 p = ttm_pool_type_take(pt);
478                         } while (p);
479                 }
480
481                 page_caching = ttm_cached;
482                 while (num_pages >= (1 << order) &&
483                        (p = ttm_pool_alloc_page(pool, gfp_flags, order))) {
484
485                         if (PageHighMem(p)) {
486                                 r = ttm_pool_apply_caching(caching, pages,
487                                                            tt->caching);
488                                 if (r)
489                                         goto error_free_page;
490                                 caching = pages;
491                         }
492                         r = ttm_pool_page_allocated(pool, order, p, &dma_addr,
493                                                     &num_pages, &pages);
494                         if (r)
495                                 goto error_free_page;
496                         if (PageHighMem(p))
497                                 caching = pages;
498                 }
499
500                 if (!p) {
501                         if (order) {
502                                 --order;
503                                 continue;
504                         }
505                         r = -ENOMEM;
506                         goto error_free_all;
507                 }
508         }
509
510         r = ttm_pool_apply_caching(caching, pages, tt->caching);
511         if (r)
512                 goto error_free_all;
513
514         return 0;
515
516 error_free_page:
517         ttm_pool_free_page(pool, page_caching, order, p);
518
519 error_free_all:
520         num_pages = tt->num_pages - num_pages;
521         caching_divide = caching - tt->pages;
522         ttm_pool_free_range(pool, tt, tt->caching, 0, caching_divide);
523         ttm_pool_free_range(pool, tt, ttm_cached, caching_divide, num_pages);
524
525         return r;
526 }
527 EXPORT_SYMBOL(ttm_pool_alloc);
528
529 /**
530  * ttm_pool_free - Free the backing pages from a ttm_tt object
531  *
532  * @pool: Pool to give pages back to.
533  * @tt: ttm_tt object to unpopulate
534  *
535  * Give the packing pages back to a pool or free them
536  */
537 void ttm_pool_free(struct ttm_pool *pool, struct ttm_tt *tt)
538 {
539         ttm_pool_free_range(pool, tt, tt->caching, 0, tt->num_pages);
540
541         while (atomic_long_read(&allocated_pages) > page_pool_size)
542                 ttm_pool_shrink();
543 }
544 EXPORT_SYMBOL(ttm_pool_free);
545
546 /**
547  * ttm_pool_init - Initialize a pool
548  *
549  * @pool: the pool to initialize
550  * @dev: device for DMA allocations and mappings
551  * @nid: NUMA node to use for allocations
552  * @use_dma_alloc: true if coherent DMA alloc should be used
553  * @use_dma32: true if GFP_DMA32 should be used
554  *
555  * Initialize the pool and its pool types.
556  */
557 void ttm_pool_init(struct ttm_pool *pool, struct device *dev,
558                    int nid, bool use_dma_alloc, bool use_dma32)
559 {
560         unsigned int i, j;
561
562         WARN_ON(!dev && use_dma_alloc);
563
564         pool->dev = dev;
565         pool->nid = nid;
566         pool->use_dma_alloc = use_dma_alloc;
567         pool->use_dma32 = use_dma32;
568
569         if (use_dma_alloc || nid != NUMA_NO_NODE) {
570                 for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i)
571                         for (j = 0; j < NR_PAGE_ORDERS; ++j)
572                                 ttm_pool_type_init(&pool->caching[i].orders[j],
573                                                    pool, i, j);
574         }
575 }
576 EXPORT_SYMBOL(ttm_pool_init);
577
578 /**
579  * ttm_pool_synchronize_shrinkers - Wait for all running shrinkers to complete.
580  *
581  * This is useful to guarantee that all shrinker invocations have seen an
582  * update, before freeing memory, similar to rcu.
583  */
584 static void ttm_pool_synchronize_shrinkers(void)
585 {
586         down_write(&pool_shrink_rwsem);
587         up_write(&pool_shrink_rwsem);
588 }
589
590 /**
591  * ttm_pool_fini - Cleanup a pool
592  *
593  * @pool: the pool to clean up
594  *
595  * Free all pages in the pool and unregister the types from the global
596  * shrinker.
597  */
598 void ttm_pool_fini(struct ttm_pool *pool)
599 {
600         unsigned int i, j;
601
602         if (pool->use_dma_alloc || pool->nid != NUMA_NO_NODE) {
603                 for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i)
604                         for (j = 0; j < NR_PAGE_ORDERS; ++j)
605                                 ttm_pool_type_fini(&pool->caching[i].orders[j]);
606         }
607
608         /* We removed the pool types from the LRU, but we need to also make sure
609          * that no shrinker is concurrently freeing pages from the pool.
610          */
611         ttm_pool_synchronize_shrinkers();
612 }
613 EXPORT_SYMBOL(ttm_pool_fini);
614
615 /* As long as pages are available make sure to release at least one */
616 static unsigned long ttm_pool_shrinker_scan(struct shrinker *shrink,
617                                             struct shrink_control *sc)
618 {
619         unsigned long num_freed = 0;
620
621         do
622                 num_freed += ttm_pool_shrink();
623         while (!num_freed && atomic_long_read(&allocated_pages));
624
625         return num_freed;
626 }
627
628 /* Return the number of pages available or SHRINK_EMPTY if we have none */
629 static unsigned long ttm_pool_shrinker_count(struct shrinker *shrink,
630                                              struct shrink_control *sc)
631 {
632         unsigned long num_pages = atomic_long_read(&allocated_pages);
633
634         return num_pages ? num_pages : SHRINK_EMPTY;
635 }
636
637 #ifdef CONFIG_DEBUG_FS
638 /* Count the number of pages available in a pool_type */
639 static unsigned int ttm_pool_type_count(struct ttm_pool_type *pt)
640 {
641         unsigned int count = 0;
642         struct page *p;
643
644         spin_lock(&pt->lock);
645         /* Only used for debugfs, the overhead doesn't matter */
646         list_for_each_entry(p, &pt->pages, lru)
647                 ++count;
648         spin_unlock(&pt->lock);
649
650         return count;
651 }
652
653 /* Print a nice header for the order */
654 static void ttm_pool_debugfs_header(struct seq_file *m)
655 {
656         unsigned int i;
657
658         seq_puts(m, "\t ");
659         for (i = 0; i < NR_PAGE_ORDERS; ++i)
660                 seq_printf(m, " ---%2u---", i);
661         seq_puts(m, "\n");
662 }
663
664 /* Dump information about the different pool types */
665 static void ttm_pool_debugfs_orders(struct ttm_pool_type *pt,
666                                     struct seq_file *m)
667 {
668         unsigned int i;
669
670         for (i = 0; i < NR_PAGE_ORDERS; ++i)
671                 seq_printf(m, " %8u", ttm_pool_type_count(&pt[i]));
672         seq_puts(m, "\n");
673 }
674
675 /* Dump the total amount of allocated pages */
676 static void ttm_pool_debugfs_footer(struct seq_file *m)
677 {
678         seq_printf(m, "\ntotal\t: %8lu of %8lu\n",
679                    atomic_long_read(&allocated_pages), page_pool_size);
680 }
681
682 /* Dump the information for the global pools */
683 static int ttm_pool_debugfs_globals_show(struct seq_file *m, void *data)
684 {
685         ttm_pool_debugfs_header(m);
686
687         spin_lock(&shrinker_lock);
688         seq_puts(m, "wc\t:");
689         ttm_pool_debugfs_orders(global_write_combined, m);
690         seq_puts(m, "uc\t:");
691         ttm_pool_debugfs_orders(global_uncached, m);
692         seq_puts(m, "wc 32\t:");
693         ttm_pool_debugfs_orders(global_dma32_write_combined, m);
694         seq_puts(m, "uc 32\t:");
695         ttm_pool_debugfs_orders(global_dma32_uncached, m);
696         spin_unlock(&shrinker_lock);
697
698         ttm_pool_debugfs_footer(m);
699
700         return 0;
701 }
702 DEFINE_SHOW_ATTRIBUTE(ttm_pool_debugfs_globals);
703
704 /**
705  * ttm_pool_debugfs - Debugfs dump function for a pool
706  *
707  * @pool: the pool to dump the information for
708  * @m: seq_file to dump to
709  *
710  * Make a debugfs dump with the per pool and global information.
711  */
712 int ttm_pool_debugfs(struct ttm_pool *pool, struct seq_file *m)
713 {
714         unsigned int i;
715
716         if (!pool->use_dma_alloc) {
717                 seq_puts(m, "unused\n");
718                 return 0;
719         }
720
721         ttm_pool_debugfs_header(m);
722
723         spin_lock(&shrinker_lock);
724         for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) {
725                 seq_puts(m, "DMA ");
726                 switch (i) {
727                 case ttm_cached:
728                         seq_puts(m, "\t:");
729                         break;
730                 case ttm_write_combined:
731                         seq_puts(m, "wc\t:");
732                         break;
733                 case ttm_uncached:
734                         seq_puts(m, "uc\t:");
735                         break;
736                 }
737                 ttm_pool_debugfs_orders(pool->caching[i].orders, m);
738         }
739         spin_unlock(&shrinker_lock);
740
741         ttm_pool_debugfs_footer(m);
742         return 0;
743 }
744 EXPORT_SYMBOL(ttm_pool_debugfs);
745
746 /* Test the shrinker functions and dump the result */
747 static int ttm_pool_debugfs_shrink_show(struct seq_file *m, void *data)
748 {
749         struct shrink_control sc = { .gfp_mask = GFP_NOFS };
750
751         fs_reclaim_acquire(GFP_KERNEL);
752         seq_printf(m, "%lu/%lu\n", ttm_pool_shrinker_count(mm_shrinker, &sc),
753                    ttm_pool_shrinker_scan(mm_shrinker, &sc));
754         fs_reclaim_release(GFP_KERNEL);
755
756         return 0;
757 }
758 DEFINE_SHOW_ATTRIBUTE(ttm_pool_debugfs_shrink);
759
760 #endif
761
762 /**
763  * ttm_pool_mgr_init - Initialize globals
764  *
765  * @num_pages: default number of pages
766  *
767  * Initialize the global locks and lists for the MM shrinker.
768  */
769 int ttm_pool_mgr_init(unsigned long num_pages)
770 {
771         unsigned int i;
772
773         if (!page_pool_size)
774                 page_pool_size = num_pages;
775
776         spin_lock_init(&shrinker_lock);
777         INIT_LIST_HEAD(&shrinker_list);
778
779         for (i = 0; i < NR_PAGE_ORDERS; ++i) {
780                 ttm_pool_type_init(&global_write_combined[i], NULL,
781                                    ttm_write_combined, i);
782                 ttm_pool_type_init(&global_uncached[i], NULL, ttm_uncached, i);
783
784                 ttm_pool_type_init(&global_dma32_write_combined[i], NULL,
785                                    ttm_write_combined, i);
786                 ttm_pool_type_init(&global_dma32_uncached[i], NULL,
787                                    ttm_uncached, i);
788         }
789
790 #ifdef CONFIG_DEBUG_FS
791         debugfs_create_file("page_pool", 0444, ttm_debugfs_root, NULL,
792                             &ttm_pool_debugfs_globals_fops);
793         debugfs_create_file("page_pool_shrink", 0400, ttm_debugfs_root, NULL,
794                             &ttm_pool_debugfs_shrink_fops);
795 #endif
796
797         mm_shrinker = shrinker_alloc(0, "drm-ttm_pool");
798         if (!mm_shrinker)
799                 return -ENOMEM;
800
801         mm_shrinker->count_objects = ttm_pool_shrinker_count;
802         mm_shrinker->scan_objects = ttm_pool_shrinker_scan;
803         mm_shrinker->seeks = 1;
804
805         shrinker_register(mm_shrinker);
806
807         return 0;
808 }
809
810 /**
811  * ttm_pool_mgr_fini - Finalize globals
812  *
813  * Cleanup the global pools and unregister the MM shrinker.
814  */
815 void ttm_pool_mgr_fini(void)
816 {
817         unsigned int i;
818
819         for (i = 0; i < NR_PAGE_ORDERS; ++i) {
820                 ttm_pool_type_fini(&global_write_combined[i]);
821                 ttm_pool_type_fini(&global_uncached[i]);
822
823                 ttm_pool_type_fini(&global_dma32_write_combined[i]);
824                 ttm_pool_type_fini(&global_dma32_uncached[i]);
825         }
826
827         shrinker_free(mm_shrinker);
828         WARN_ON(!list_empty(&shrinker_list));
829 }