1 /* SPDX-License-Identifier: GPL-2.0 OR MIT */
2 /**************************************************************************
4 * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
27 **************************************************************************/
29 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
32 #include <drm/ttm/ttm_bo_driver.h>
33 #include <drm/ttm/ttm_placement.h>
34 #include <drm/drm_vma_manager.h>
36 #include <linux/highmem.h>
37 #include <linux/wait.h>
38 #include <linux/slab.h>
39 #include <linux/vmalloc.h>
40 #include <linux/module.h>
41 #include <linux/dma-resv.h>
43 struct ttm_transfer_obj {
44 struct ttm_buffer_object base;
45 struct ttm_buffer_object *bo;
48 void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
50 ttm_bo_mem_put(bo, &bo->mem);
53 int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
54 struct ttm_operation_ctx *ctx,
55 struct ttm_mem_reg *new_mem)
57 struct ttm_tt *ttm = bo->ttm;
58 struct ttm_mem_reg *old_mem = &bo->mem;
61 if (old_mem->mem_type != TTM_PL_SYSTEM) {
62 ret = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu);
64 if (unlikely(ret != 0)) {
65 if (ret != -ERESTARTSYS)
66 pr_err("Failed to expire sync object before unbinding TTM\n");
71 ttm_bo_free_old_node(bo);
72 ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM,
74 old_mem->mem_type = TTM_PL_SYSTEM;
77 ret = ttm_tt_set_placement_caching(ttm, new_mem->placement);
78 if (unlikely(ret != 0))
81 if (new_mem->mem_type != TTM_PL_SYSTEM) {
82 ret = ttm_tt_bind(ttm, new_mem, ctx);
83 if (unlikely(ret != 0))
88 new_mem->mm_node = NULL;
92 EXPORT_SYMBOL(ttm_bo_move_ttm);
94 int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible)
96 if (likely(man->io_reserve_fastpath))
100 return mutex_lock_interruptible(&man->io_reserve_mutex);
102 mutex_lock(&man->io_reserve_mutex);
106 void ttm_mem_io_unlock(struct ttm_mem_type_manager *man)
108 if (likely(man->io_reserve_fastpath))
111 mutex_unlock(&man->io_reserve_mutex);
114 static int ttm_mem_io_evict(struct ttm_mem_type_manager *man)
116 struct ttm_buffer_object *bo;
118 if (!man->use_io_reserve_lru || list_empty(&man->io_reserve_lru))
121 bo = list_first_entry(&man->io_reserve_lru,
122 struct ttm_buffer_object,
124 list_del_init(&bo->io_reserve_lru);
125 ttm_bo_unmap_virtual_locked(bo);
131 int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
132 struct ttm_mem_reg *mem)
134 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
137 if (!bdev->driver->io_mem_reserve)
139 if (likely(man->io_reserve_fastpath))
140 return bdev->driver->io_mem_reserve(bdev, mem);
142 if (bdev->driver->io_mem_reserve &&
143 mem->bus.io_reserved_count++ == 0) {
145 ret = bdev->driver->io_mem_reserve(bdev, mem);
146 if (ret == -EAGAIN) {
147 ret = ttm_mem_io_evict(man);
155 void ttm_mem_io_free(struct ttm_bo_device *bdev,
156 struct ttm_mem_reg *mem)
158 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
160 if (likely(man->io_reserve_fastpath))
163 if (bdev->driver->io_mem_reserve &&
164 --mem->bus.io_reserved_count == 0 &&
165 bdev->driver->io_mem_free)
166 bdev->driver->io_mem_free(bdev, mem);
170 int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo)
172 struct ttm_mem_reg *mem = &bo->mem;
175 if (!mem->bus.io_reserved_vm) {
176 struct ttm_mem_type_manager *man =
177 &bo->bdev->man[mem->mem_type];
179 ret = ttm_mem_io_reserve(bo->bdev, mem);
180 if (unlikely(ret != 0))
182 mem->bus.io_reserved_vm = true;
183 if (man->use_io_reserve_lru)
184 list_add_tail(&bo->io_reserve_lru,
185 &man->io_reserve_lru);
190 void ttm_mem_io_free_vm(struct ttm_buffer_object *bo)
192 struct ttm_mem_reg *mem = &bo->mem;
194 if (mem->bus.io_reserved_vm) {
195 mem->bus.io_reserved_vm = false;
196 list_del_init(&bo->io_reserve_lru);
197 ttm_mem_io_free(bo->bdev, mem);
201 static int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
204 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
209 (void) ttm_mem_io_lock(man, false);
210 ret = ttm_mem_io_reserve(bdev, mem);
211 ttm_mem_io_unlock(man);
212 if (ret || !mem->bus.is_iomem)
216 addr = mem->bus.addr;
218 if (mem->placement & TTM_PL_FLAG_WC)
219 addr = ioremap_wc(mem->bus.base + mem->bus.offset, mem->bus.size);
221 addr = ioremap(mem->bus.base + mem->bus.offset, mem->bus.size);
223 (void) ttm_mem_io_lock(man, false);
224 ttm_mem_io_free(bdev, mem);
225 ttm_mem_io_unlock(man);
233 static void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
236 struct ttm_mem_type_manager *man;
238 man = &bdev->man[mem->mem_type];
240 if (virtual && mem->bus.addr == NULL)
242 (void) ttm_mem_io_lock(man, false);
243 ttm_mem_io_free(bdev, mem);
244 ttm_mem_io_unlock(man);
247 static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
250 (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT));
252 (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT));
255 for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i)
256 iowrite32(ioread32(srcP++), dstP++);
261 #define __ttm_kmap_atomic_prot(__page, __prot) kmap_atomic_prot(__page, __prot)
262 #define __ttm_kunmap_atomic(__addr) kunmap_atomic(__addr)
264 #define __ttm_kmap_atomic_prot(__page, __prot) vmap(&__page, 1, 0, __prot)
265 #define __ttm_kunmap_atomic(__addr) vunmap(__addr)
270 * ttm_kmap_atomic_prot - Efficient kernel map of a single page with
271 * specified page protection.
273 * @page: The page to map.
274 * @prot: The page protection.
276 * This function maps a TTM page using the kmap_atomic api if available,
277 * otherwise falls back to vmap. The user must make sure that the
278 * specified page does not have an aliased mapping with a different caching
279 * policy unless the architecture explicitly allows it. Also mapping and
280 * unmapping using this api must be correctly nested. Unmapping should
281 * occur in the reverse order of mapping.
283 void *ttm_kmap_atomic_prot(struct page *page, pgprot_t prot)
285 if (pgprot_val(prot) == pgprot_val(PAGE_KERNEL))
286 return kmap_atomic(page);
288 return __ttm_kmap_atomic_prot(page, prot);
290 EXPORT_SYMBOL(ttm_kmap_atomic_prot);
293 * ttm_kunmap_atomic_prot - Unmap a page that was mapped using
294 * ttm_kmap_atomic_prot.
296 * @addr: The virtual address from the map.
297 * @prot: The page protection.
299 void ttm_kunmap_atomic_prot(void *addr, pgprot_t prot)
301 if (pgprot_val(prot) == pgprot_val(PAGE_KERNEL))
304 __ttm_kunmap_atomic(addr);
306 EXPORT_SYMBOL(ttm_kunmap_atomic_prot);
308 static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
312 struct page *d = ttm->pages[page];
318 src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
319 dst = ttm_kmap_atomic_prot(d, prot);
323 memcpy_fromio(dst, src, PAGE_SIZE);
325 ttm_kunmap_atomic_prot(dst, prot);
330 static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
334 struct page *s = ttm->pages[page];
340 dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
341 src = ttm_kmap_atomic_prot(s, prot);
345 memcpy_toio(dst, src, PAGE_SIZE);
347 ttm_kunmap_atomic_prot(src, prot);
352 int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
353 struct ttm_operation_ctx *ctx,
354 struct ttm_mem_reg *new_mem)
356 struct ttm_bo_device *bdev = bo->bdev;
357 struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
358 struct ttm_tt *ttm = bo->ttm;
359 struct ttm_mem_reg *old_mem = &bo->mem;
360 struct ttm_mem_reg old_copy = *old_mem;
366 unsigned long add = 0;
369 ret = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu);
373 ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap);
376 ret = ttm_mem_reg_ioremap(bdev, new_mem, &new_iomap);
381 * Single TTM move. NOP.
383 if (old_iomap == NULL && new_iomap == NULL)
387 * Don't move nonexistent data. Clear destination instead.
389 if (old_iomap == NULL &&
390 (ttm == NULL || (ttm->state == tt_unpopulated &&
391 !(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)))) {
392 memset_io(new_iomap, 0, new_mem->num_pages*PAGE_SIZE);
397 * TTM might be null for moves within the same region.
400 ret = ttm_tt_populate(ttm, ctx);
408 if ((old_mem->mem_type == new_mem->mem_type) &&
409 (new_mem->start < old_mem->start + old_mem->size)) {
411 add = new_mem->num_pages - 1;
414 for (i = 0; i < new_mem->num_pages; ++i) {
415 page = i * dir + add;
416 if (old_iomap == NULL) {
417 pgprot_t prot = ttm_io_prot(old_mem->placement,
419 ret = ttm_copy_ttm_io_page(ttm, new_iomap, page,
421 } else if (new_iomap == NULL) {
422 pgprot_t prot = ttm_io_prot(new_mem->placement,
424 ret = ttm_copy_io_ttm_page(ttm, old_iomap, page,
427 ret = ttm_copy_io_page(new_iomap, old_iomap, page);
436 new_mem->mm_node = NULL;
438 if (man->flags & TTM_MEMTYPE_FLAG_FIXED) {
444 ttm_mem_reg_iounmap(bdev, old_mem, new_iomap);
446 ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap);
449 * On error, keep the mm node!
452 ttm_bo_mem_put(bo, &old_copy);
455 EXPORT_SYMBOL(ttm_bo_move_memcpy);
457 static void ttm_transfered_destroy(struct ttm_buffer_object *bo)
459 struct ttm_transfer_obj *fbo;
461 fbo = container_of(bo, struct ttm_transfer_obj, base);
467 * ttm_buffer_object_transfer
469 * @bo: A pointer to a struct ttm_buffer_object.
470 * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object,
471 * holding the data of @bo with the old placement.
473 * This is a utility function that may be called after an accelerated move
474 * has been scheduled. A new buffer object is created as a placeholder for
475 * the old data while it's being copied. When that buffer object is idle,
476 * it can be destroyed, releasing the space of the old placement.
481 static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
482 struct ttm_buffer_object **new_obj)
484 struct ttm_transfer_obj *fbo;
487 fbo = kmalloc(sizeof(*fbo), GFP_KERNEL);
492 fbo->base.mem.placement |= TTM_PL_FLAG_NO_EVICT;
498 * Fix up members that we shouldn't copy directly:
499 * TODO: Explicit member copy would probably be better here.
502 atomic_inc(&ttm_bo_glob.bo_count);
503 INIT_LIST_HEAD(&fbo->base.ddestroy);
504 INIT_LIST_HEAD(&fbo->base.lru);
505 INIT_LIST_HEAD(&fbo->base.swap);
506 INIT_LIST_HEAD(&fbo->base.io_reserve_lru);
507 fbo->base.moving = NULL;
508 drm_vma_node_reset(&fbo->base.base.vma_node);
510 kref_init(&fbo->base.kref);
511 fbo->base.destroy = &ttm_transfered_destroy;
512 fbo->base.acc_size = 0;
513 if (bo->type != ttm_bo_type_sg)
514 fbo->base.base.resv = &fbo->base.base._resv;
516 dma_resv_init(&fbo->base.base._resv);
517 ret = dma_resv_trylock(&fbo->base.base._resv);
520 *new_obj = &fbo->base;
524 pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
526 /* Cached mappings need no adjustment */
527 if (caching_flags & TTM_PL_FLAG_CACHED)
530 #if defined(__i386__) || defined(__x86_64__)
531 if (caching_flags & TTM_PL_FLAG_WC)
532 tmp = pgprot_writecombine(tmp);
533 else if (boot_cpu_data.x86 > 3)
534 tmp = pgprot_noncached(tmp);
536 #if defined(__ia64__) || defined(__arm__) || defined(__aarch64__) || \
537 defined(__powerpc__) || defined(__mips__)
538 if (caching_flags & TTM_PL_FLAG_WC)
539 tmp = pgprot_writecombine(tmp);
541 tmp = pgprot_noncached(tmp);
543 #if defined(__sparc__)
544 tmp = pgprot_noncached(tmp);
548 EXPORT_SYMBOL(ttm_io_prot);
550 static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
551 unsigned long offset,
553 struct ttm_bo_kmap_obj *map)
555 struct ttm_mem_reg *mem = &bo->mem;
557 if (bo->mem.bus.addr) {
558 map->bo_kmap_type = ttm_bo_map_premapped;
559 map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset);
561 map->bo_kmap_type = ttm_bo_map_iomap;
562 if (mem->placement & TTM_PL_FLAG_WC)
563 map->virtual = ioremap_wc(bo->mem.bus.base + bo->mem.bus.offset + offset,
566 map->virtual = ioremap(bo->mem.bus.base + bo->mem.bus.offset + offset,
569 return (!map->virtual) ? -ENOMEM : 0;
572 static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
573 unsigned long start_page,
574 unsigned long num_pages,
575 struct ttm_bo_kmap_obj *map)
577 struct ttm_mem_reg *mem = &bo->mem;
578 struct ttm_operation_ctx ctx = {
579 .interruptible = false,
582 struct ttm_tt *ttm = bo->ttm;
588 ret = ttm_tt_populate(ttm, &ctx);
592 if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) {
594 * We're mapping a single page, and the desired
595 * page protection is consistent with the bo.
598 map->bo_kmap_type = ttm_bo_map_kmap;
599 map->page = ttm->pages[start_page];
600 map->virtual = kmap(map->page);
603 * We need to use vmap to get the desired page protection
604 * or to make the buffer object look contiguous.
606 prot = ttm_io_prot(mem->placement, PAGE_KERNEL);
607 map->bo_kmap_type = ttm_bo_map_vmap;
608 map->virtual = vmap(ttm->pages + start_page, num_pages,
611 return (!map->virtual) ? -ENOMEM : 0;
614 int ttm_bo_kmap(struct ttm_buffer_object *bo,
615 unsigned long start_page, unsigned long num_pages,
616 struct ttm_bo_kmap_obj *map)
618 struct ttm_mem_type_manager *man =
619 &bo->bdev->man[bo->mem.mem_type];
620 unsigned long offset, size;
625 if (num_pages > bo->num_pages)
627 if (start_page > bo->num_pages)
630 (void) ttm_mem_io_lock(man, false);
631 ret = ttm_mem_io_reserve(bo->bdev, &bo->mem);
632 ttm_mem_io_unlock(man);
635 if (!bo->mem.bus.is_iomem) {
636 return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
638 offset = start_page << PAGE_SHIFT;
639 size = num_pages << PAGE_SHIFT;
640 return ttm_bo_ioremap(bo, offset, size, map);
643 EXPORT_SYMBOL(ttm_bo_kmap);
645 void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
647 struct ttm_buffer_object *bo = map->bo;
648 struct ttm_mem_type_manager *man =
649 &bo->bdev->man[bo->mem.mem_type];
653 switch (map->bo_kmap_type) {
654 case ttm_bo_map_iomap:
655 iounmap(map->virtual);
657 case ttm_bo_map_vmap:
658 vunmap(map->virtual);
660 case ttm_bo_map_kmap:
663 case ttm_bo_map_premapped:
668 (void) ttm_mem_io_lock(man, false);
669 ttm_mem_io_free(map->bo->bdev, &map->bo->mem);
670 ttm_mem_io_unlock(man);
674 EXPORT_SYMBOL(ttm_bo_kunmap);
676 int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
677 struct dma_fence *fence,
679 struct ttm_mem_reg *new_mem)
681 struct ttm_bo_device *bdev = bo->bdev;
682 struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
683 struct ttm_mem_reg *old_mem = &bo->mem;
685 struct ttm_buffer_object *ghost_obj;
687 dma_resv_add_excl_fence(bo->base.resv, fence);
689 ret = ttm_bo_wait(bo, false, false);
693 if (man->flags & TTM_MEMTYPE_FLAG_FIXED) {
694 ttm_tt_destroy(bo->ttm);
697 ttm_bo_free_old_node(bo);
700 * This should help pipeline ordinary buffer moves.
702 * Hang old buffer memory on a new buffer object,
703 * and leave it to be released when the GPU
704 * operation has completed.
707 dma_fence_put(bo->moving);
708 bo->moving = dma_fence_get(fence);
710 ret = ttm_buffer_object_transfer(bo, &ghost_obj);
714 dma_resv_add_excl_fence(&ghost_obj->base._resv, fence);
717 * If we're not moving to fixed memory, the TTM object
718 * needs to stay alive. Otherwhise hang it on the ghost
719 * bo to be unbound and destroyed.
722 if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED))
723 ghost_obj->ttm = NULL;
727 dma_resv_unlock(&ghost_obj->base._resv);
728 ttm_bo_put(ghost_obj);
732 new_mem->mm_node = NULL;
736 EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);
738 int ttm_bo_pipeline_move(struct ttm_buffer_object *bo,
739 struct dma_fence *fence, bool evict,
740 struct ttm_mem_reg *new_mem)
742 struct ttm_bo_device *bdev = bo->bdev;
743 struct ttm_mem_reg *old_mem = &bo->mem;
745 struct ttm_mem_type_manager *from = &bdev->man[old_mem->mem_type];
746 struct ttm_mem_type_manager *to = &bdev->man[new_mem->mem_type];
750 dma_resv_add_excl_fence(bo->base.resv, fence);
753 struct ttm_buffer_object *ghost_obj;
756 * This should help pipeline ordinary buffer moves.
758 * Hang old buffer memory on a new buffer object,
759 * and leave it to be released when the GPU
760 * operation has completed.
763 dma_fence_put(bo->moving);
764 bo->moving = dma_fence_get(fence);
766 ret = ttm_buffer_object_transfer(bo, &ghost_obj);
770 dma_resv_add_excl_fence(&ghost_obj->base._resv, fence);
773 * If we're not moving to fixed memory, the TTM object
774 * needs to stay alive. Otherwhise hang it on the ghost
775 * bo to be unbound and destroyed.
778 if (!(to->flags & TTM_MEMTYPE_FLAG_FIXED))
779 ghost_obj->ttm = NULL;
783 dma_resv_unlock(&ghost_obj->base._resv);
784 ttm_bo_put(ghost_obj);
786 } else if (from->flags & TTM_MEMTYPE_FLAG_FIXED) {
789 * BO doesn't have a TTM we need to bind/unbind. Just remember
790 * this eviction and free up the allocation
793 spin_lock(&from->move_lock);
794 if (!from->move || dma_fence_is_later(fence, from->move)) {
795 dma_fence_put(from->move);
796 from->move = dma_fence_get(fence);
798 spin_unlock(&from->move_lock);
800 ttm_bo_free_old_node(bo);
802 dma_fence_put(bo->moving);
803 bo->moving = dma_fence_get(fence);
807 * Last resort, wait for the move to be completed.
809 * Should never happen in pratice.
812 ret = ttm_bo_wait(bo, false, false);
816 if (to->flags & TTM_MEMTYPE_FLAG_FIXED) {
817 ttm_tt_destroy(bo->ttm);
820 ttm_bo_free_old_node(bo);
824 new_mem->mm_node = NULL;
828 EXPORT_SYMBOL(ttm_bo_pipeline_move);
830 int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo)
832 struct ttm_buffer_object *ghost;
835 ret = ttm_buffer_object_transfer(bo, &ghost);
839 ret = dma_resv_copy_fences(&ghost->base._resv, bo->base.resv);
840 /* Last resort, wait for the BO to be idle when we are OOM */
842 ttm_bo_wait(bo, false, false);
844 memset(&bo->mem, 0, sizeof(bo->mem));
845 bo->mem.mem_type = TTM_PL_SYSTEM;
848 dma_resv_unlock(&ghost->base._resv);