1 // SPDX-License-Identifier: GPL-2.0 OR MIT
3 * Copyright 2020 Advanced Micro Devices, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
23 * Authors: Christian König
26 /* Pooling of allocated pages is necessary because changing the caching
27 * attributes on x86 of the linear mapping requires a costly cross CPU TLB
28 * invalidate for those addresses.
30 * Additional to that allocations from the DMA coherent API are pooled as well
31 * cause they are rather slow compared to alloc_pages+map.
34 #include <linux/module.h>
35 #include <linux/dma-mapping.h>
36 #include <linux/sched/mm.h>
39 #include <asm/set_memory.h>
42 #include <drm/ttm/ttm_pool.h>
43 #include <drm/ttm/ttm_bo_driver.h>
44 #include <drm/ttm/ttm_tt.h>
46 #include "ttm_module.h"
49 * struct ttm_pool_dma - Helper object for coherent DMA mappings
51 * @addr: original DMA address returned for the mapping
52 * @vaddr: original vaddr return for the mapping and order in the lower bits
59 static unsigned long page_pool_size;
61 MODULE_PARM_DESC(page_pool_size, "Number of pages in the WC/UC/DMA pool");
62 module_param(page_pool_size, ulong, 0644);
64 static atomic_long_t allocated_pages;
66 static struct ttm_pool_type global_write_combined[MAX_ORDER];
67 static struct ttm_pool_type global_uncached[MAX_ORDER];
69 static struct ttm_pool_type global_dma32_write_combined[MAX_ORDER];
70 static struct ttm_pool_type global_dma32_uncached[MAX_ORDER];
72 static spinlock_t shrinker_lock;
73 static struct list_head shrinker_list;
74 static struct shrinker mm_shrinker;
76 /* Allocate pages of size 1 << order with the given gfp_flags */
77 static struct page *ttm_pool_alloc_page(struct ttm_pool *pool, gfp_t gfp_flags,
80 unsigned long attr = DMA_ATTR_FORCE_CONTIGUOUS;
81 struct ttm_pool_dma *dma;
86 gfp_flags |= GFP_TRANSHUGE_LIGHT | __GFP_NORETRY |
88 gfp_flags &= ~__GFP_MOVABLE;
89 gfp_flags &= ~__GFP_COMP;
92 if (!pool->use_dma_alloc) {
93 p = alloc_pages(gfp_flags, order);
99 dma = kmalloc(sizeof(*dma), GFP_KERNEL);
104 attr |= DMA_ATTR_NO_WARN;
106 vaddr = dma_alloc_attrs(pool->dev, (1ULL << order) * PAGE_SIZE,
107 &dma->addr, gfp_flags, attr);
111 /* TODO: This is an illegal abuse of the DMA API, but we need to rework
112 * TTM page fault handling and extend the DMA API to clean this up.
114 if (is_vmalloc_addr(vaddr))
115 p = vmalloc_to_page(vaddr);
117 p = virt_to_page(vaddr);
119 dma->vaddr = (unsigned long)vaddr | order;
120 p->private = (unsigned long)dma;
128 /* Reset the caching and pages of size 1 << order */
129 static void ttm_pool_free_page(struct ttm_pool *pool, enum ttm_caching caching,
130 unsigned int order, struct page *p)
132 unsigned long attr = DMA_ATTR_FORCE_CONTIGUOUS;
133 struct ttm_pool_dma *dma;
137 /* We don't care that set_pages_wb is inefficient here. This is only
138 * used when we have to shrink and CPU overhead is irrelevant then.
140 if (caching != ttm_cached && !PageHighMem(p))
141 set_pages_wb(p, 1 << order);
144 if (!pool || !pool->use_dma_alloc) {
145 __free_pages(p, order);
150 attr |= DMA_ATTR_NO_WARN;
152 dma = (void *)p->private;
153 vaddr = (void *)(dma->vaddr & PAGE_MASK);
154 dma_free_attrs(pool->dev, (1UL << order) * PAGE_SIZE, vaddr, dma->addr,
159 /* Apply a new caching to an array of pages */
160 static int ttm_pool_apply_caching(struct page **first, struct page **last,
161 enum ttm_caching caching)
164 unsigned int num_pages = last - first;
172 case ttm_write_combined:
173 return set_pages_array_wc(first, num_pages);
175 return set_pages_array_uc(first, num_pages);
181 /* Map pages of 1 << order size and fill the DMA address array */
182 static int ttm_pool_map(struct ttm_pool *pool, unsigned int order,
183 struct page *p, dma_addr_t **dma_addr)
188 if (pool->use_dma_alloc) {
189 struct ttm_pool_dma *dma = (void *)p->private;
193 size_t size = (1ULL << order) * PAGE_SIZE;
195 addr = dma_map_page(pool->dev, p, 0, size, DMA_BIDIRECTIONAL);
196 if (dma_mapping_error(pool->dev, **dma_addr))
200 for (i = 1 << order; i ; --i) {
201 *(*dma_addr)++ = addr;
208 /* Unmap pages of 1 << order size */
209 static void ttm_pool_unmap(struct ttm_pool *pool, dma_addr_t dma_addr,
210 unsigned int num_pages)
212 /* Unmapped while freeing the page */
213 if (pool->use_dma_alloc)
216 dma_unmap_page(pool->dev, dma_addr, (long)num_pages << PAGE_SHIFT,
220 /* Give pages into a specific pool_type */
221 static void ttm_pool_type_give(struct ttm_pool_type *pt, struct page *p)
223 spin_lock(&pt->lock);
224 list_add(&p->lru, &pt->pages);
225 spin_unlock(&pt->lock);
226 atomic_long_add(1 << pt->order, &allocated_pages);
229 /* Take pages from a specific pool_type, return NULL when nothing available */
230 static struct page *ttm_pool_type_take(struct ttm_pool_type *pt)
234 spin_lock(&pt->lock);
235 p = list_first_entry_or_null(&pt->pages, typeof(*p), lru);
237 atomic_long_sub(1 << pt->order, &allocated_pages);
240 spin_unlock(&pt->lock);
245 /* Initialize and add a pool type to the global shrinker list */
246 static void ttm_pool_type_init(struct ttm_pool_type *pt, struct ttm_pool *pool,
247 enum ttm_caching caching, unsigned int order)
250 pt->caching = caching;
252 spin_lock_init(&pt->lock);
253 INIT_LIST_HEAD(&pt->pages);
255 spin_lock(&shrinker_lock);
256 list_add_tail(&pt->shrinker_list, &shrinker_list);
257 spin_unlock(&shrinker_lock);
260 /* Remove a pool_type from the global shrinker list and free all pages */
261 static void ttm_pool_type_fini(struct ttm_pool_type *pt)
263 struct page *p, *tmp;
265 spin_lock(&shrinker_lock);
266 list_del(&pt->shrinker_list);
267 spin_unlock(&shrinker_lock);
269 list_for_each_entry_safe(p, tmp, &pt->pages, lru)
270 ttm_pool_free_page(pt->pool, pt->caching, pt->order, p);
273 /* Return the pool_type to use for the given caching and order */
274 static struct ttm_pool_type *ttm_pool_select_type(struct ttm_pool *pool,
275 enum ttm_caching caching,
278 if (pool->use_dma_alloc)
279 return &pool->caching[caching].orders[order];
283 case ttm_write_combined:
285 return &global_dma32_write_combined[order];
287 return &global_write_combined[order];
290 return &global_dma32_uncached[order];
292 return &global_uncached[order];
301 /* Free pages using the global shrinker list */
302 static unsigned int ttm_pool_shrink(void)
304 struct ttm_pool_type *pt;
305 unsigned int num_freed;
308 spin_lock(&shrinker_lock);
309 pt = list_first_entry(&shrinker_list, typeof(*pt), shrinker_list);
311 p = ttm_pool_type_take(pt);
313 ttm_pool_free_page(pt->pool, pt->caching, pt->order, p);
314 num_freed = 1 << pt->order;
319 list_move_tail(&pt->shrinker_list, &shrinker_list);
320 spin_unlock(&shrinker_lock);
325 /* Return the allocation order based for a page */
326 static unsigned int ttm_pool_page_order(struct ttm_pool *pool, struct page *p)
328 if (pool->use_dma_alloc) {
329 struct ttm_pool_dma *dma = (void *)p->private;
331 return dma->vaddr & ~PAGE_MASK;
338 * ttm_pool_alloc - Fill a ttm_tt object
340 * @pool: ttm_pool to use
341 * @tt: ttm_tt object to fill
342 * @ctx: operation context
344 * Fill the ttm_tt object with pages and also make sure to DMA map them when
347 * Returns: 0 on successe, negative error code otherwise.
349 int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt,
350 struct ttm_operation_ctx *ctx)
352 unsigned long num_pages = tt->num_pages;
353 dma_addr_t *dma_addr = tt->dma_address;
354 struct page **caching = tt->pages;
355 struct page **pages = tt->pages;
356 gfp_t gfp_flags = GFP_USER;
357 unsigned int i, order;
361 WARN_ON(!num_pages || ttm_tt_is_populated(tt));
362 WARN_ON(dma_addr && !pool->dev);
364 if (tt->page_flags & TTM_PAGE_FLAG_ZERO_ALLOC)
365 gfp_flags |= __GFP_ZERO;
367 if (ctx->gfp_retry_mayfail)
368 gfp_flags |= __GFP_RETRY_MAYFAIL;
371 gfp_flags |= GFP_DMA32;
373 gfp_flags |= GFP_HIGHUSER;
375 for (order = min(MAX_ORDER - 1UL, __fls(num_pages)); num_pages;
376 order = min_t(unsigned int, order, __fls(num_pages))) {
377 bool apply_caching = false;
378 struct ttm_pool_type *pt;
380 pt = ttm_pool_select_type(pool, tt->caching, order);
381 p = pt ? ttm_pool_type_take(pt) : NULL;
383 apply_caching = true;
385 p = ttm_pool_alloc_page(pool, gfp_flags, order);
386 if (p && PageHighMem(p))
387 apply_caching = true;
400 r = ttm_pool_apply_caching(caching, pages,
403 goto error_free_page;
404 caching = pages + (1 << order);
407 r = ttm_mem_global_alloc_page(&ttm_mem_glob, p,
408 (1 << order) * PAGE_SIZE,
411 goto error_free_page;
414 r = ttm_pool_map(pool, order, p, &dma_addr);
416 goto error_global_free;
419 num_pages -= 1 << order;
420 for (i = 1 << order; i; --i)
424 r = ttm_pool_apply_caching(caching, pages, tt->caching);
431 ttm_mem_global_free_page(&ttm_mem_glob, p, (1 << order) * PAGE_SIZE);
434 ttm_pool_free_page(pool, tt->caching, order, p);
437 num_pages = tt->num_pages - num_pages;
438 for (i = 0; i < num_pages; ) {
439 order = ttm_pool_page_order(pool, tt->pages[i]);
440 ttm_pool_free_page(pool, tt->caching, order, tt->pages[i]);
446 EXPORT_SYMBOL(ttm_pool_alloc);
449 * ttm_pool_free - Free the backing pages from a ttm_tt object
451 * @pool: Pool to give pages back to.
452 * @tt: ttm_tt object to unpopulate
454 * Give the packing pages back to a pool or free them
456 void ttm_pool_free(struct ttm_pool *pool, struct ttm_tt *tt)
460 for (i = 0; i < tt->num_pages; ) {
461 struct page *p = tt->pages[i];
462 unsigned int order, num_pages;
463 struct ttm_pool_type *pt;
465 order = ttm_pool_page_order(pool, p);
466 num_pages = 1ULL << order;
467 ttm_mem_global_free_page(&ttm_mem_glob, p,
468 num_pages * PAGE_SIZE);
470 ttm_pool_unmap(pool, tt->dma_address[i], num_pages);
472 pt = ttm_pool_select_type(pool, tt->caching, order);
474 ttm_pool_type_give(pt, tt->pages[i]);
476 ttm_pool_free_page(pool, tt->caching, order,
482 while (atomic_long_read(&allocated_pages) > page_pool_size)
485 EXPORT_SYMBOL(ttm_pool_free);
488 * ttm_pool_init - Initialize a pool
490 * @pool: the pool to initialize
491 * @dev: device for DMA allocations and mappings
492 * @use_dma_alloc: true if coherent DMA alloc should be used
493 * @use_dma32: true if GFP_DMA32 should be used
495 * Initialize the pool and its pool types.
497 void ttm_pool_init(struct ttm_pool *pool, struct device *dev,
498 bool use_dma_alloc, bool use_dma32)
502 WARN_ON(!dev && use_dma_alloc);
505 pool->use_dma_alloc = use_dma_alloc;
506 pool->use_dma32 = use_dma32;
508 for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i)
509 for (j = 0; j < MAX_ORDER; ++j)
510 ttm_pool_type_init(&pool->caching[i].orders[j],
513 EXPORT_SYMBOL(ttm_pool_init);
516 * ttm_pool_fini - Cleanup a pool
518 * @pool: the pool to clean up
520 * Free all pages in the pool and unregister the types from the global
523 void ttm_pool_fini(struct ttm_pool *pool)
527 for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i)
528 for (j = 0; j < MAX_ORDER; ++j)
529 ttm_pool_type_fini(&pool->caching[i].orders[j]);
531 EXPORT_SYMBOL(ttm_pool_fini);
533 /* As long as pages are available make sure to release at least one */
534 static unsigned long ttm_pool_shrinker_scan(struct shrinker *shrink,
535 struct shrink_control *sc)
537 unsigned long num_freed = 0;
540 num_freed += ttm_pool_shrink();
541 while (!num_freed && atomic_long_read(&allocated_pages));
546 /* Return the number of pages available or SHRINK_EMPTY if we have none */
547 static unsigned long ttm_pool_shrinker_count(struct shrinker *shrink,
548 struct shrink_control *sc)
550 unsigned long num_pages = atomic_long_read(&allocated_pages);
552 return num_pages ? num_pages : SHRINK_EMPTY;
555 #ifdef CONFIG_DEBUG_FS
556 /* Count the number of pages available in a pool_type */
557 static unsigned int ttm_pool_type_count(struct ttm_pool_type *pt)
559 unsigned int count = 0;
562 spin_lock(&pt->lock);
563 /* Only used for debugfs, the overhead doesn't matter */
564 list_for_each_entry(p, &pt->pages, lru)
566 spin_unlock(&pt->lock);
571 /* Print a nice header for the order */
572 static void ttm_pool_debugfs_header(struct seq_file *m)
577 for (i = 0; i < MAX_ORDER; ++i)
578 seq_printf(m, " ---%2u---", i);
582 /* Dump information about the different pool types */
583 static void ttm_pool_debugfs_orders(struct ttm_pool_type *pt,
588 for (i = 0; i < MAX_ORDER; ++i)
589 seq_printf(m, " %8u", ttm_pool_type_count(&pt[i]));
593 /* Dump the total amount of allocated pages */
594 static void ttm_pool_debugfs_footer(struct seq_file *m)
596 seq_printf(m, "\ntotal\t: %8lu of %8lu\n",
597 atomic_long_read(&allocated_pages), page_pool_size);
600 /* Dump the information for the global pools */
601 static int ttm_pool_debugfs_globals_show(struct seq_file *m, void *data)
603 ttm_pool_debugfs_header(m);
605 spin_lock(&shrinker_lock);
606 seq_puts(m, "wc\t:");
607 ttm_pool_debugfs_orders(global_write_combined, m);
608 seq_puts(m, "uc\t:");
609 ttm_pool_debugfs_orders(global_uncached, m);
610 seq_puts(m, "wc 32\t:");
611 ttm_pool_debugfs_orders(global_dma32_write_combined, m);
612 seq_puts(m, "uc 32\t:");
613 ttm_pool_debugfs_orders(global_dma32_uncached, m);
614 spin_unlock(&shrinker_lock);
616 ttm_pool_debugfs_footer(m);
620 DEFINE_SHOW_ATTRIBUTE(ttm_pool_debugfs_globals);
623 * ttm_pool_debugfs - Debugfs dump function for a pool
625 * @pool: the pool to dump the information for
626 * @m: seq_file to dump to
628 * Make a debugfs dump with the per pool and global information.
630 int ttm_pool_debugfs(struct ttm_pool *pool, struct seq_file *m)
634 ttm_pool_debugfs_header(m);
636 spin_lock(&shrinker_lock);
637 for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) {
643 case ttm_write_combined:
644 seq_puts(m, "wc\t:");
647 seq_puts(m, "uc\t:");
650 ttm_pool_debugfs_orders(pool->caching[i].orders, m);
652 spin_unlock(&shrinker_lock);
654 ttm_pool_debugfs_footer(m);
657 EXPORT_SYMBOL(ttm_pool_debugfs);
659 /* Test the shrinker functions and dump the result */
660 static int ttm_pool_debugfs_shrink_show(struct seq_file *m, void *data)
662 struct shrink_control sc = { .gfp_mask = GFP_NOFS };
664 fs_reclaim_acquire(GFP_KERNEL);
665 seq_printf(m, "%lu/%lu\n", ttm_pool_shrinker_count(&mm_shrinker, &sc),
666 ttm_pool_shrinker_scan(&mm_shrinker, &sc));
667 fs_reclaim_release(GFP_KERNEL);
671 DEFINE_SHOW_ATTRIBUTE(ttm_pool_debugfs_shrink);
676 * ttm_pool_mgr_init - Initialize globals
678 * @num_pages: default number of pages
680 * Initialize the global locks and lists for the MM shrinker.
682 int ttm_pool_mgr_init(unsigned long num_pages)
687 page_pool_size = num_pages;
689 spin_lock_init(&shrinker_lock);
690 INIT_LIST_HEAD(&shrinker_list);
692 for (i = 0; i < MAX_ORDER; ++i) {
693 ttm_pool_type_init(&global_write_combined[i], NULL,
694 ttm_write_combined, i);
695 ttm_pool_type_init(&global_uncached[i], NULL, ttm_uncached, i);
697 ttm_pool_type_init(&global_dma32_write_combined[i], NULL,
698 ttm_write_combined, i);
699 ttm_pool_type_init(&global_dma32_uncached[i], NULL,
703 #ifdef CONFIG_DEBUG_FS
704 debugfs_create_file("page_pool", 0444, ttm_debugfs_root, NULL,
705 &ttm_pool_debugfs_globals_fops);
706 debugfs_create_file("page_pool_shrink", 0400, ttm_debugfs_root, NULL,
707 &ttm_pool_debugfs_shrink_fops);
710 mm_shrinker.count_objects = ttm_pool_shrinker_count;
711 mm_shrinker.scan_objects = ttm_pool_shrinker_scan;
712 mm_shrinker.seeks = 1;
713 return register_shrinker(&mm_shrinker);
717 * ttm_pool_mgr_fini - Finalize globals
719 * Cleanup the global pools and unregister the MM shrinker.
721 void ttm_pool_mgr_fini(void)
725 for (i = 0; i < MAX_ORDER; ++i) {
726 ttm_pool_type_fini(&global_write_combined[i]);
727 ttm_pool_type_fini(&global_uncached[i]);
729 ttm_pool_type_fini(&global_dma32_write_combined[i]);
730 ttm_pool_type_fini(&global_dma32_uncached[i]);
733 unregister_shrinker(&mm_shrinker);
734 WARN_ON(!list_empty(&shrinker_list));