1 /* SPDX-License-Identifier: GPL-2.0 OR MIT */
2 /**************************************************************************
4 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
27 **************************************************************************/
29 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
32 #define pr_fmt(fmt) "[TTM] " fmt
34 #include <linux/sched.h>
35 #include <linux/pagemap.h>
36 #include <linux/shmem_fs.h>
37 #include <linux/file.h>
38 #include <drm/drm_cache.h>
39 #include <drm/ttm/ttm_bo_driver.h>
40 #include <drm/ttm/ttm_page_alloc.h>
41 #include <drm/ttm/ttm_set_memory.h>
44 * Allocates a ttm structure for the given BO.
46 int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc)
48 struct ttm_bo_device *bdev = bo->bdev;
49 uint32_t page_flags = 0;
51 dma_resv_assert_held(bo->base.resv);
57 page_flags |= TTM_PAGE_FLAG_DMA32;
60 page_flags |= TTM_PAGE_FLAG_NO_RETRY;
63 case ttm_bo_type_device:
65 page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC;
67 case ttm_bo_type_kernel:
70 page_flags |= TTM_PAGE_FLAG_SG;
73 pr_err("Illegal buffer object type\n");
77 bo->ttm = bdev->driver->ttm_tt_create(bo, page_flags);
78 if (unlikely(bo->ttm == NULL))
85 * Allocates storage for pointers to the pages that back the ttm.
87 static int ttm_tt_alloc_page_directory(struct ttm_tt *ttm)
89 ttm->pages = kvmalloc_array(ttm->num_pages, sizeof(void*),
90 GFP_KERNEL | __GFP_ZERO);
96 static int ttm_dma_tt_alloc_page_directory(struct ttm_dma_tt *ttm)
98 ttm->ttm.pages = kvmalloc_array(ttm->ttm.num_pages,
99 sizeof(*ttm->ttm.pages) +
100 sizeof(*ttm->dma_address),
101 GFP_KERNEL | __GFP_ZERO);
104 ttm->dma_address = (void *) (ttm->ttm.pages + ttm->ttm.num_pages);
108 static int ttm_sg_tt_alloc_page_directory(struct ttm_dma_tt *ttm)
110 ttm->dma_address = kvmalloc_array(ttm->ttm.num_pages,
111 sizeof(*ttm->dma_address),
112 GFP_KERNEL | __GFP_ZERO);
113 if (!ttm->dma_address)
118 static int ttm_tt_set_page_caching(struct page *p,
119 enum ttm_caching_state c_old,
120 enum ttm_caching_state c_new)
127 if (c_old != tt_cached) {
128 /* p isn't in the default caching state, set it to
129 * writeback first to free its current memtype. */
131 ret = ttm_set_pages_wb(p, 1);
137 ret = ttm_set_pages_wc(p, 1);
138 else if (c_new == tt_uncached)
139 ret = ttm_set_pages_uc(p, 1);
145 * Change caching policy for the linear kernel map
146 * for range of pages in a ttm.
149 static int ttm_tt_set_caching(struct ttm_tt *ttm,
150 enum ttm_caching_state c_state)
153 struct page *cur_page;
156 if (ttm->caching_state == c_state)
159 if (!ttm_tt_is_populated(ttm)) {
160 /* Change caching but don't populate */
161 ttm->caching_state = c_state;
165 if (ttm->caching_state == tt_cached)
166 drm_clflush_pages(ttm->pages, ttm->num_pages);
168 for (i = 0; i < ttm->num_pages; ++i) {
169 cur_page = ttm->pages[i];
170 if (likely(cur_page != NULL)) {
171 ret = ttm_tt_set_page_caching(cur_page,
174 if (unlikely(ret != 0))
179 ttm->caching_state = c_state;
184 for (j = 0; j < i; ++j) {
185 cur_page = ttm->pages[j];
186 if (likely(cur_page != NULL)) {
187 (void)ttm_tt_set_page_caching(cur_page, c_state,
195 int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement)
197 enum ttm_caching_state state;
199 if (placement & TTM_PL_FLAG_WC)
201 else if (placement & TTM_PL_FLAG_UNCACHED)
206 return ttm_tt_set_caching(ttm, state);
208 EXPORT_SYMBOL(ttm_tt_set_placement_caching);
210 void ttm_tt_destroy(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
212 ttm_tt_unbind(bdev, ttm);
214 ttm_tt_unpopulate(bdev, ttm);
216 if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP) &&
218 fput(ttm->swap_storage);
220 ttm->swap_storage = NULL;
221 bdev->driver->ttm_tt_destroy(bdev, ttm);
224 static void ttm_tt_init_fields(struct ttm_tt *ttm,
225 struct ttm_buffer_object *bo,
228 ttm->num_pages = bo->num_pages;
229 ttm->caching_state = tt_cached;
230 ttm->page_flags = page_flags;
231 ttm_tt_set_unpopulated(ttm);
232 ttm->swap_storage = NULL;
236 int ttm_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo,
239 ttm_tt_init_fields(ttm, bo, page_flags);
241 if (ttm_tt_alloc_page_directory(ttm)) {
242 pr_err("Failed allocating page table\n");
247 EXPORT_SYMBOL(ttm_tt_init);
249 void ttm_tt_fini(struct ttm_tt *ttm)
254 EXPORT_SYMBOL(ttm_tt_fini);
256 int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo,
259 struct ttm_tt *ttm = &ttm_dma->ttm;
261 ttm_tt_init_fields(ttm, bo, page_flags);
263 INIT_LIST_HEAD(&ttm_dma->pages_list);
264 if (ttm_dma_tt_alloc_page_directory(ttm_dma)) {
265 pr_err("Failed allocating page table\n");
270 EXPORT_SYMBOL(ttm_dma_tt_init);
272 int ttm_sg_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo,
275 struct ttm_tt *ttm = &ttm_dma->ttm;
278 ttm_tt_init_fields(ttm, bo, page_flags);
280 INIT_LIST_HEAD(&ttm_dma->pages_list);
281 if (page_flags & TTM_PAGE_FLAG_SG)
282 ret = ttm_sg_tt_alloc_page_directory(ttm_dma);
284 ret = ttm_dma_tt_alloc_page_directory(ttm_dma);
286 pr_err("Failed allocating page table\n");
291 EXPORT_SYMBOL(ttm_sg_tt_init);
293 void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma)
295 struct ttm_tt *ttm = &ttm_dma->ttm;
300 kvfree(ttm_dma->dma_address);
302 ttm_dma->dma_address = NULL;
304 EXPORT_SYMBOL(ttm_dma_tt_fini);
306 void ttm_tt_unbind(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
308 if (ttm_tt_is_bound(ttm)) {
309 bdev->driver->ttm_tt_unbind(bdev, ttm);
310 ttm_tt_set_unbound(ttm);
314 int ttm_tt_bind(struct ttm_bo_device *bdev,
315 struct ttm_tt *ttm, struct ttm_resource *bo_mem,
316 struct ttm_operation_ctx *ctx)
323 if (ttm_tt_is_bound(ttm))
326 ret = ttm_tt_populate(bdev, ttm, ctx);
330 ret = bdev->driver->ttm_tt_bind(bdev, ttm, bo_mem);
331 if (unlikely(ret != 0))
334 ttm_tt_set_bound(ttm);
338 EXPORT_SYMBOL(ttm_tt_bind);
340 int ttm_tt_swapin(struct ttm_tt *ttm)
342 struct address_space *swap_space;
343 struct file *swap_storage;
344 struct page *from_page;
345 struct page *to_page;
349 swap_storage = ttm->swap_storage;
350 BUG_ON(swap_storage == NULL);
352 swap_space = swap_storage->f_mapping;
354 for (i = 0; i < ttm->num_pages; ++i) {
355 gfp_t gfp_mask = mapping_gfp_mask(swap_space);
357 gfp_mask |= (ttm->page_flags & TTM_PAGE_FLAG_NO_RETRY ? __GFP_RETRY_MAYFAIL : 0);
358 from_page = shmem_read_mapping_page_gfp(swap_space, i, gfp_mask);
360 if (IS_ERR(from_page)) {
361 ret = PTR_ERR(from_page);
364 to_page = ttm->pages[i];
365 if (unlikely(to_page == NULL))
368 copy_highpage(to_page, from_page);
372 if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP))
374 ttm->swap_storage = NULL;
375 ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED;
382 int ttm_tt_swapout(struct ttm_bo_device *bdev,
383 struct ttm_tt *ttm, struct file *persistent_swap_storage)
385 struct address_space *swap_space;
386 struct file *swap_storage;
387 struct page *from_page;
388 struct page *to_page;
392 BUG_ON(ttm->caching_state != tt_cached);
394 if (!persistent_swap_storage) {
395 swap_storage = shmem_file_setup("ttm swap",
396 ttm->num_pages << PAGE_SHIFT,
398 if (IS_ERR(swap_storage)) {
399 pr_err("Failed allocating swap storage\n");
400 return PTR_ERR(swap_storage);
403 swap_storage = persistent_swap_storage;
406 swap_space = swap_storage->f_mapping;
408 for (i = 0; i < ttm->num_pages; ++i) {
409 gfp_t gfp_mask = mapping_gfp_mask(swap_space);
411 gfp_mask |= (ttm->page_flags & TTM_PAGE_FLAG_NO_RETRY ? __GFP_RETRY_MAYFAIL : 0);
413 from_page = ttm->pages[i];
414 if (unlikely(from_page == NULL))
417 to_page = shmem_read_mapping_page_gfp(swap_space, i, gfp_mask);
418 if (IS_ERR(to_page)) {
419 ret = PTR_ERR(to_page);
422 copy_highpage(to_page, from_page);
423 set_page_dirty(to_page);
424 mark_page_accessed(to_page);
428 ttm_tt_unpopulate(bdev, ttm);
429 ttm->swap_storage = swap_storage;
430 ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
431 if (persistent_swap_storage)
432 ttm->page_flags |= TTM_PAGE_FLAG_PERSISTENT_SWAP;
436 if (!persistent_swap_storage)
442 static void ttm_tt_add_mapping(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
446 if (ttm->page_flags & TTM_PAGE_FLAG_SG)
449 for (i = 0; i < ttm->num_pages; ++i)
450 ttm->pages[i]->mapping = bdev->dev_mapping;
453 int ttm_tt_populate(struct ttm_bo_device *bdev,
454 struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
458 if (ttm_tt_is_populated(ttm))
461 if (bdev->driver->ttm_tt_populate)
462 ret = bdev->driver->ttm_tt_populate(bdev, ttm, ctx);
464 ret = ttm_pool_populate(ttm, ctx);
466 ttm_tt_add_mapping(bdev, ttm);
470 static void ttm_tt_clear_mapping(struct ttm_tt *ttm)
473 struct page **page = ttm->pages;
475 if (ttm->page_flags & TTM_PAGE_FLAG_SG)
478 for (i = 0; i < ttm->num_pages; ++i) {
479 (*page)->mapping = NULL;
480 (*page++)->index = 0;
484 void ttm_tt_unpopulate(struct ttm_bo_device *bdev,
487 if (!ttm_tt_is_populated(ttm))
490 ttm_tt_clear_mapping(ttm);
491 if (bdev->driver->ttm_tt_unpopulate)
492 bdev->driver->ttm_tt_unpopulate(bdev, ttm);
494 ttm_pool_unpopulate(ttm);