1 /* SPDX-License-Identifier: GPL-2.0 OR MIT */
2 /**************************************************************************
4 * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
27 **************************************************************************/
29 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
32 #include <drm/ttm/ttm_bo_driver.h>
33 #include <drm/ttm/ttm_placement.h>
34 #include <drm/drm_cache.h>
35 #include <drm/drm_vma_manager.h>
36 #include <linux/dma-buf-map.h>
38 #include <linux/highmem.h>
39 #include <linux/wait.h>
40 #include <linux/slab.h>
41 #include <linux/vmalloc.h>
42 #include <linux/module.h>
43 #include <linux/dma-resv.h>
45 struct ttm_transfer_obj {
46 struct ttm_buffer_object base;
47 struct ttm_buffer_object *bo;
50 int ttm_mem_io_reserve(struct ttm_device *bdev,
51 struct ttm_resource *mem)
53 if (mem->bus.offset || mem->bus.addr)
56 mem->bus.is_iomem = false;
57 if (!bdev->funcs->io_mem_reserve)
60 return bdev->funcs->io_mem_reserve(bdev, mem);
63 void ttm_mem_io_free(struct ttm_device *bdev,
64 struct ttm_resource *mem)
66 if (!mem->bus.offset && !mem->bus.addr)
69 if (bdev->funcs->io_mem_free)
70 bdev->funcs->io_mem_free(bdev, mem);
77 * ttm_move_memcpy - Helper to perform a memcpy ttm move operation.
78 * @bo: The struct ttm_buffer_object.
79 * @new_mem: The struct ttm_resource we're moving to (copy destination).
80 * @new_iter: A struct ttm_kmap_iter representing the destination resource.
81 * @src_iter: A struct ttm_kmap_iter representing the source resource.
83 * This function is intended to be able to move out async under a
84 * dma-fence if desired.
86 void ttm_move_memcpy(struct ttm_buffer_object *bo,
88 struct ttm_kmap_iter *dst_iter,
89 struct ttm_kmap_iter *src_iter)
91 const struct ttm_kmap_iter_ops *dst_ops = dst_iter->ops;
92 const struct ttm_kmap_iter_ops *src_ops = src_iter->ops;
93 struct ttm_tt *ttm = bo->ttm;
94 struct dma_buf_map src_map, dst_map;
97 /* Single TTM move. NOP */
98 if (dst_ops->maps_tt && src_ops->maps_tt)
101 /* Don't move nonexistent data. Clear destination instead. */
102 if (src_ops->maps_tt && (!ttm || !ttm_tt_is_populated(ttm))) {
103 if (ttm && !(ttm->page_flags & TTM_PAGE_FLAG_ZERO_ALLOC))
106 for (i = 0; i < num_pages; ++i) {
107 dst_ops->map_local(dst_iter, &dst_map, i);
108 if (dst_map.is_iomem)
109 memset_io(dst_map.vaddr_iomem, 0, PAGE_SIZE);
111 memset(dst_map.vaddr, 0, PAGE_SIZE);
112 if (dst_ops->unmap_local)
113 dst_ops->unmap_local(dst_iter, &dst_map);
118 for (i = 0; i < num_pages; ++i) {
119 dst_ops->map_local(dst_iter, &dst_map, i);
120 src_ops->map_local(src_iter, &src_map, i);
122 drm_memcpy_from_wc(&dst_map, &src_map, PAGE_SIZE);
124 if (src_ops->unmap_local)
125 src_ops->unmap_local(src_iter, &src_map);
126 if (dst_ops->unmap_local)
127 dst_ops->unmap_local(dst_iter, &dst_map);
130 EXPORT_SYMBOL(ttm_move_memcpy);
132 int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
133 struct ttm_operation_ctx *ctx,
134 struct ttm_resource *dst_mem)
136 struct ttm_device *bdev = bo->bdev;
137 struct ttm_resource_manager *dst_man =
138 ttm_manager_type(bo->bdev, dst_mem->mem_type);
139 struct ttm_tt *ttm = bo->ttm;
140 struct ttm_resource *src_mem = bo->resource;
141 struct ttm_resource_manager *src_man =
142 ttm_manager_type(bdev, src_mem->mem_type);
143 struct ttm_resource src_copy = *src_mem;
145 struct ttm_kmap_iter_tt tt;
146 struct ttm_kmap_iter_linear_io io;
147 } _dst_iter, _src_iter;
148 struct ttm_kmap_iter *dst_iter, *src_iter;
151 if (ttm && ((ttm->page_flags & TTM_PAGE_FLAG_SWAPPED) ||
153 ret = ttm_tt_populate(bdev, ttm, ctx);
158 dst_iter = ttm_kmap_iter_linear_io_init(&_dst_iter.io, bdev, dst_mem);
159 if (PTR_ERR(dst_iter) == -EINVAL && dst_man->use_tt)
160 dst_iter = ttm_kmap_iter_tt_init(&_dst_iter.tt, bo->ttm);
161 if (IS_ERR(dst_iter))
162 return PTR_ERR(dst_iter);
164 src_iter = ttm_kmap_iter_linear_io_init(&_src_iter.io, bdev, src_mem);
165 if (PTR_ERR(src_iter) == -EINVAL && src_man->use_tt)
166 src_iter = ttm_kmap_iter_tt_init(&_src_iter.tt, bo->ttm);
167 if (IS_ERR(src_iter)) {
168 ret = PTR_ERR(src_iter);
172 ttm_move_memcpy(bo, dst_mem->num_pages, dst_iter, src_iter);
174 ttm_bo_move_sync_cleanup(bo, dst_mem);
176 if (!src_iter->ops->maps_tt)
177 ttm_kmap_iter_linear_io_fini(&_src_iter.io, bdev, &src_copy);
179 if (!dst_iter->ops->maps_tt)
180 ttm_kmap_iter_linear_io_fini(&_dst_iter.io, bdev, dst_mem);
184 EXPORT_SYMBOL(ttm_bo_move_memcpy);
186 static void ttm_transfered_destroy(struct ttm_buffer_object *bo)
188 struct ttm_transfer_obj *fbo;
190 fbo = container_of(bo, struct ttm_transfer_obj, base);
196 * ttm_buffer_object_transfer
198 * @bo: A pointer to a struct ttm_buffer_object.
199 * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object,
200 * holding the data of @bo with the old placement.
202 * This is a utility function that may be called after an accelerated move
203 * has been scheduled. A new buffer object is created as a placeholder for
204 * the old data while it's being copied. When that buffer object is idle,
205 * it can be destroyed, releasing the space of the old placement.
210 static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
211 struct ttm_buffer_object **new_obj)
213 struct ttm_transfer_obj *fbo;
216 fbo = kmalloc(sizeof(*fbo), GFP_KERNEL);
226 * Fix up members that we shouldn't copy directly:
227 * TODO: Explicit member copy would probably be better here.
230 atomic_inc(&ttm_glob.bo_count);
231 INIT_LIST_HEAD(&fbo->base.ddestroy);
232 INIT_LIST_HEAD(&fbo->base.lru);
233 fbo->base.moving = NULL;
234 drm_vma_node_reset(&fbo->base.base.vma_node);
236 kref_init(&fbo->base.kref);
237 fbo->base.destroy = &ttm_transfered_destroy;
238 fbo->base.pin_count = 0;
239 if (bo->type != ttm_bo_type_sg)
240 fbo->base.base.resv = &fbo->base.base._resv;
242 dma_resv_init(&fbo->base.base._resv);
243 fbo->base.base.dev = NULL;
244 ret = dma_resv_trylock(&fbo->base.base._resv);
247 ttm_bo_move_to_lru_tail_unlocked(&fbo->base);
249 *new_obj = &fbo->base;
253 pgprot_t ttm_io_prot(struct ttm_buffer_object *bo, struct ttm_resource *res,
256 struct ttm_resource_manager *man;
257 enum ttm_caching caching;
259 man = ttm_manager_type(bo->bdev, res->mem_type);
260 caching = man->use_tt ? bo->ttm->caching : res->bus.caching;
262 return ttm_prot_from_caching(caching, tmp);
264 EXPORT_SYMBOL(ttm_io_prot);
266 static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
267 unsigned long offset,
269 struct ttm_bo_kmap_obj *map)
271 struct ttm_resource *mem = bo->resource;
273 if (bo->resource->bus.addr) {
274 map->bo_kmap_type = ttm_bo_map_premapped;
275 map->virtual = ((u8 *)bo->resource->bus.addr) + offset;
277 resource_size_t res = bo->resource->bus.offset + offset;
279 map->bo_kmap_type = ttm_bo_map_iomap;
280 if (mem->bus.caching == ttm_write_combined)
281 map->virtual = ioremap_wc(res, size);
283 else if (mem->bus.caching == ttm_cached)
284 map->virtual = ioremap_cache(res, size);
287 map->virtual = ioremap(res, size);
289 return (!map->virtual) ? -ENOMEM : 0;
292 static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
293 unsigned long start_page,
294 unsigned long num_pages,
295 struct ttm_bo_kmap_obj *map)
297 struct ttm_resource *mem = bo->resource;
298 struct ttm_operation_ctx ctx = {
299 .interruptible = false,
302 struct ttm_tt *ttm = bo->ttm;
308 ret = ttm_tt_populate(bo->bdev, ttm, &ctx);
312 if (num_pages == 1 && ttm->caching == ttm_cached) {
314 * We're mapping a single page, and the desired
315 * page protection is consistent with the bo.
318 map->bo_kmap_type = ttm_bo_map_kmap;
319 map->page = ttm->pages[start_page];
320 map->virtual = kmap(map->page);
323 * We need to use vmap to get the desired page protection
324 * or to make the buffer object look contiguous.
326 prot = ttm_io_prot(bo, mem, PAGE_KERNEL);
327 map->bo_kmap_type = ttm_bo_map_vmap;
328 map->virtual = vmap(ttm->pages + start_page, num_pages,
331 return (!map->virtual) ? -ENOMEM : 0;
334 int ttm_bo_kmap(struct ttm_buffer_object *bo,
335 unsigned long start_page, unsigned long num_pages,
336 struct ttm_bo_kmap_obj *map)
338 unsigned long offset, size;
343 if (num_pages > bo->resource->num_pages)
345 if ((start_page + num_pages) > bo->resource->num_pages)
348 ret = ttm_mem_io_reserve(bo->bdev, bo->resource);
351 if (!bo->resource->bus.is_iomem) {
352 return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
354 offset = start_page << PAGE_SHIFT;
355 size = num_pages << PAGE_SHIFT;
356 return ttm_bo_ioremap(bo, offset, size, map);
359 EXPORT_SYMBOL(ttm_bo_kmap);
361 void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
365 switch (map->bo_kmap_type) {
366 case ttm_bo_map_iomap:
367 iounmap(map->virtual);
369 case ttm_bo_map_vmap:
370 vunmap(map->virtual);
372 case ttm_bo_map_kmap:
375 case ttm_bo_map_premapped:
380 ttm_mem_io_free(map->bo->bdev, map->bo->resource);
384 EXPORT_SYMBOL(ttm_bo_kunmap);
386 int ttm_bo_vmap(struct ttm_buffer_object *bo, struct dma_buf_map *map)
388 struct ttm_resource *mem = bo->resource;
391 ret = ttm_mem_io_reserve(bo->bdev, mem);
395 if (mem->bus.is_iomem) {
396 void __iomem *vaddr_iomem;
399 vaddr_iomem = (void __iomem *)mem->bus.addr;
400 else if (mem->bus.caching == ttm_write_combined)
401 vaddr_iomem = ioremap_wc(mem->bus.offset,
404 else if (mem->bus.caching == ttm_cached)
405 vaddr_iomem = ioremap_cache(mem->bus.offset,
409 vaddr_iomem = ioremap(mem->bus.offset, bo->base.size);
414 dma_buf_map_set_vaddr_iomem(map, vaddr_iomem);
417 struct ttm_operation_ctx ctx = {
418 .interruptible = false,
421 struct ttm_tt *ttm = bo->ttm;
425 ret = ttm_tt_populate(bo->bdev, ttm, &ctx);
430 * We need to use vmap to get the desired page protection
431 * or to make the buffer object look contiguous.
433 prot = ttm_io_prot(bo, mem, PAGE_KERNEL);
434 vaddr = vmap(ttm->pages, ttm->num_pages, 0, prot);
438 dma_buf_map_set_vaddr(map, vaddr);
443 EXPORT_SYMBOL(ttm_bo_vmap);
445 void ttm_bo_vunmap(struct ttm_buffer_object *bo, struct dma_buf_map *map)
447 struct ttm_resource *mem = bo->resource;
449 if (dma_buf_map_is_null(map))
454 else if (!mem->bus.addr)
455 iounmap(map->vaddr_iomem);
456 dma_buf_map_clear(map);
458 ttm_mem_io_free(bo->bdev, bo->resource);
460 EXPORT_SYMBOL(ttm_bo_vunmap);
462 static int ttm_bo_wait_free_node(struct ttm_buffer_object *bo,
466 ret = ttm_bo_wait(bo, false, false);
471 ttm_bo_tt_destroy(bo);
472 ttm_resource_free(bo, &bo->resource);
476 static int ttm_bo_move_to_ghost(struct ttm_buffer_object *bo,
477 struct dma_fence *fence,
480 struct ttm_buffer_object *ghost_obj;
484 * This should help pipeline ordinary buffer moves.
486 * Hang old buffer memory on a new buffer object,
487 * and leave it to be released when the GPU
488 * operation has completed.
491 dma_fence_put(bo->moving);
492 bo->moving = dma_fence_get(fence);
494 ret = ttm_buffer_object_transfer(bo, &ghost_obj);
498 dma_resv_add_excl_fence(&ghost_obj->base._resv, fence);
501 * If we're not moving to fixed memory, the TTM object
502 * needs to stay alive. Otherwhise hang it on the ghost
503 * bo to be unbound and destroyed.
507 ghost_obj->ttm = NULL;
512 dma_resv_unlock(&ghost_obj->base._resv);
513 ttm_bo_put(ghost_obj);
517 static void ttm_bo_move_pipeline_evict(struct ttm_buffer_object *bo,
518 struct dma_fence *fence)
520 struct ttm_device *bdev = bo->bdev;
521 struct ttm_resource_manager *from;
523 from = ttm_manager_type(bdev, bo->resource->mem_type);
526 * BO doesn't have a TTM we need to bind/unbind. Just remember
527 * this eviction and free up the allocation
529 spin_lock(&from->move_lock);
530 if (!from->move || dma_fence_is_later(fence, from->move)) {
531 dma_fence_put(from->move);
532 from->move = dma_fence_get(fence);
534 spin_unlock(&from->move_lock);
536 ttm_resource_free(bo, &bo->resource);
538 dma_fence_put(bo->moving);
539 bo->moving = dma_fence_get(fence);
542 int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
543 struct dma_fence *fence,
546 struct ttm_resource *new_mem)
548 struct ttm_device *bdev = bo->bdev;
549 struct ttm_resource_manager *from = ttm_manager_type(bdev, bo->resource->mem_type);
550 struct ttm_resource_manager *man = ttm_manager_type(bdev, new_mem->mem_type);
553 dma_resv_add_excl_fence(bo->base.resv, fence);
555 ret = ttm_bo_move_to_ghost(bo, fence, man->use_tt);
556 else if (!from->use_tt && pipeline)
557 ttm_bo_move_pipeline_evict(bo, fence);
559 ret = ttm_bo_wait_free_node(bo, man->use_tt);
564 ttm_bo_assign_mem(bo, new_mem);
568 EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);
571 * ttm_bo_pipeline_gutting - purge the contents of a bo
572 * @bo: The buffer object
574 * Purge the contents of a bo, async if the bo is not idle.
575 * After a successful call, the bo is left unpopulated in
576 * system placement. The function may wait uninterruptible
579 * Return: 0 if successful, negative error code on failure.
581 int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo)
583 static const struct ttm_place sys_mem = { .mem_type = TTM_PL_SYSTEM };
584 struct ttm_buffer_object *ghost;
585 struct ttm_resource *sys_res;
589 ret = ttm_resource_alloc(bo, &sys_mem, &sys_res);
593 /* If already idle, no need for ghost object dance. */
594 ret = ttm_bo_wait(bo, false, true);
597 /* See comment below about clearing. */
598 ret = ttm_tt_create(bo, true);
600 goto error_free_sys_mem;
602 ttm_tt_unpopulate(bo->bdev, bo->ttm);
603 if (bo->type == ttm_bo_type_device)
604 ttm_tt_mark_for_clear(bo->ttm);
606 ttm_resource_free(bo, &bo->resource);
607 ttm_bo_assign_mem(bo, sys_res);
612 * We need an unpopulated ttm_tt after giving our current one,
613 * if any, to the ghost object. And we can't afford to fail
614 * creating one *after* the operation. If the bo subsequently gets
615 * resurrected, make sure it's cleared (if ttm_bo_type_device)
616 * to avoid leaking sensitive information to user-space.
621 ret = ttm_tt_create(bo, true);
624 goto error_free_sys_mem;
626 ret = ttm_buffer_object_transfer(bo, &ghost);
628 goto error_destroy_tt;
630 ret = dma_resv_copy_fences(&ghost->base._resv, bo->base.resv);
631 /* Last resort, wait for the BO to be idle when we are OOM */
633 ttm_bo_wait(bo, false, false);
635 dma_resv_unlock(&ghost->base._resv);
639 ttm_bo_assign_mem(bo, sys_res);
643 ttm_tt_destroy(bo->bdev, ttm);
646 ttm_resource_free(bo, &sys_res);