1 /* SPDX-License-Identifier: GPL-2.0 OR MIT */
2 /**************************************************************************
4 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
27 **************************************************************************/
29 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
32 #define pr_fmt(fmt) "[TTM] " fmt
34 #include <drm/ttm/ttm_bo_driver.h>
35 #include <drm/ttm/ttm_placement.h>
36 #include <linux/jiffies.h>
37 #include <linux/slab.h>
38 #include <linux/sched.h>
40 #include <linux/file.h>
41 #include <linux/module.h>
42 #include <linux/atomic.h>
43 #include <linux/dma-resv.h>
45 #include "ttm_module.h"
47 /* default destructor */
48 static void ttm_bo_default_destroy(struct ttm_buffer_object *bo)
53 static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
54 struct ttm_placement *placement)
56 struct drm_printer p = drm_debug_printer(TTM_PFX);
57 struct ttm_resource_manager *man;
60 drm_printf(&p, "No space for %p (%lu pages, %zuK, %zuM)\n",
61 bo, bo->mem.num_pages, bo->base.size >> 10,
63 for (i = 0; i < placement->num_placement; i++) {
64 mem_type = placement->placement[i].mem_type;
65 drm_printf(&p, " placement[%d]=0x%08X (%d)\n",
66 i, placement->placement[i].flags, mem_type);
67 man = ttm_manager_type(bo->bdev, mem_type);
68 ttm_resource_manager_debug(man, &p);
72 static void ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
74 struct ttm_device *bdev = bo->bdev;
76 list_del_init(&bo->lru);
78 if (bdev->funcs->del_from_lru_notify)
79 bdev->funcs->del_from_lru_notify(bo);
82 static void ttm_bo_bulk_move_set_pos(struct ttm_lru_bulk_move_pos *pos,
83 struct ttm_buffer_object *bo)
90 void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo,
91 struct ttm_resource *mem,
92 struct ttm_lru_bulk_move *bulk)
94 struct ttm_device *bdev = bo->bdev;
95 struct ttm_resource_manager *man;
98 dma_resv_assert_held(bo->base.resv);
101 ttm_bo_del_from_lru(bo);
105 man = ttm_manager_type(bdev, mem->mem_type);
106 list_move_tail(&bo->lru, &man->lru[bo->priority]);
108 if (bdev->funcs->del_from_lru_notify)
109 bdev->funcs->del_from_lru_notify(bo);
111 if (bulk && !bo->pin_count) {
112 switch (bo->mem.mem_type) {
114 ttm_bo_bulk_move_set_pos(&bulk->tt[bo->priority], bo);
118 ttm_bo_bulk_move_set_pos(&bulk->vram[bo->priority], bo);
123 EXPORT_SYMBOL(ttm_bo_move_to_lru_tail);
125 void ttm_bo_bulk_move_lru_tail(struct ttm_lru_bulk_move *bulk)
129 for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
130 struct ttm_lru_bulk_move_pos *pos = &bulk->tt[i];
131 struct ttm_resource_manager *man;
136 dma_resv_assert_held(pos->first->base.resv);
137 dma_resv_assert_held(pos->last->base.resv);
139 man = ttm_manager_type(pos->first->bdev, TTM_PL_TT);
140 list_bulk_move_tail(&man->lru[i], &pos->first->lru,
144 for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
145 struct ttm_lru_bulk_move_pos *pos = &bulk->vram[i];
146 struct ttm_resource_manager *man;
151 dma_resv_assert_held(pos->first->base.resv);
152 dma_resv_assert_held(pos->last->base.resv);
154 man = ttm_manager_type(pos->first->bdev, TTM_PL_VRAM);
155 list_bulk_move_tail(&man->lru[i], &pos->first->lru,
159 EXPORT_SYMBOL(ttm_bo_bulk_move_lru_tail);
161 static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
162 struct ttm_resource *mem, bool evict,
163 struct ttm_operation_ctx *ctx,
164 struct ttm_place *hop)
166 struct ttm_device *bdev = bo->bdev;
167 struct ttm_resource_manager *old_man = ttm_manager_type(bdev, bo->mem.mem_type);
168 struct ttm_resource_manager *new_man = ttm_manager_type(bdev, mem->mem_type);
171 ttm_bo_unmap_virtual(bo);
174 * Create and bind a ttm if required.
177 if (new_man->use_tt) {
178 /* Zero init the new TTM structure if the old location should
179 * have used one as well.
181 ret = ttm_tt_create(bo, old_man->use_tt);
185 if (mem->mem_type != TTM_PL_SYSTEM) {
186 ret = ttm_tt_populate(bo->bdev, bo->ttm, ctx);
192 ret = bdev->funcs->move(bo, evict, ctx, mem, hop);
194 if (ret == -EMULTIHOP)
199 ctx->bytes_moved += bo->base.size;
203 new_man = ttm_manager_type(bdev, bo->mem.mem_type);
204 if (!new_man->use_tt)
205 ttm_bo_tt_destroy(bo);
212 * Will release GPU memory type usage on destruction.
213 * This is the place to put in driver specific hooks to release
214 * driver private resources.
215 * Will release the bo::reserved lock.
218 static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
220 if (bo->bdev->funcs->delete_mem_notify)
221 bo->bdev->funcs->delete_mem_notify(bo);
223 ttm_bo_tt_destroy(bo);
224 ttm_resource_free(bo, &bo->mem);
227 static int ttm_bo_individualize_resv(struct ttm_buffer_object *bo)
231 if (bo->base.resv == &bo->base._resv)
234 BUG_ON(!dma_resv_trylock(&bo->base._resv));
236 r = dma_resv_copy_fences(&bo->base._resv, bo->base.resv);
237 dma_resv_unlock(&bo->base._resv);
241 if (bo->type != ttm_bo_type_sg) {
242 /* This works because the BO is about to be destroyed and nobody
243 * reference it any more. The only tricky case is the trylock on
244 * the resv object while holding the lru_lock.
246 spin_lock(&ttm_glob.lru_lock);
247 bo->base.resv = &bo->base._resv;
248 spin_unlock(&ttm_glob.lru_lock);
254 static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo)
256 struct dma_resv *resv = &bo->base._resv;
257 struct dma_resv_list *fobj;
258 struct dma_fence *fence;
262 fobj = rcu_dereference(resv->fence);
263 fence = rcu_dereference(resv->fence_excl);
264 if (fence && !fence->ops->signaled)
265 dma_fence_enable_sw_signaling(fence);
267 for (i = 0; fobj && i < fobj->shared_count; ++i) {
268 fence = rcu_dereference(fobj->shared[i]);
270 if (!fence->ops->signaled)
271 dma_fence_enable_sw_signaling(fence);
277 * function ttm_bo_cleanup_refs
278 * If bo idle, remove from lru lists, and unref.
279 * If not idle, block if possible.
281 * Must be called with lru_lock and reservation held, this function
282 * will drop the lru lock and optionally the reservation lock before returning.
284 * @bo: The buffer object to clean-up
285 * @interruptible: Any sleeps should occur interruptibly.
286 * @no_wait_gpu: Never wait for gpu. Return -EBUSY instead.
287 * @unlock_resv: Unlock the reservation lock as well.
290 static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
291 bool interruptible, bool no_wait_gpu,
294 struct dma_resv *resv = &bo->base._resv;
297 if (dma_resv_test_signaled_rcu(resv, true))
302 if (ret && !no_wait_gpu) {
306 dma_resv_unlock(bo->base.resv);
307 spin_unlock(&ttm_glob.lru_lock);
309 lret = dma_resv_wait_timeout_rcu(resv, true, interruptible,
317 spin_lock(&ttm_glob.lru_lock);
318 if (unlock_resv && !dma_resv_trylock(bo->base.resv)) {
320 * We raced, and lost, someone else holds the reservation now,
321 * and is probably busy in ttm_bo_cleanup_memtype_use.
323 * Even if it's not the case, because we finished waiting any
324 * delayed destruction would succeed, so just return success
327 spin_unlock(&ttm_glob.lru_lock);
333 if (ret || unlikely(list_empty(&bo->ddestroy))) {
335 dma_resv_unlock(bo->base.resv);
336 spin_unlock(&ttm_glob.lru_lock);
340 ttm_bo_del_from_lru(bo);
341 list_del_init(&bo->ddestroy);
342 spin_unlock(&ttm_glob.lru_lock);
343 ttm_bo_cleanup_memtype_use(bo);
346 dma_resv_unlock(bo->base.resv);
354 * Traverse the delayed list, and call ttm_bo_cleanup_refs on all
355 * encountered buffers.
357 bool ttm_bo_delayed_delete(struct ttm_device *bdev, bool remove_all)
359 struct ttm_global *glob = &ttm_glob;
360 struct list_head removed;
363 INIT_LIST_HEAD(&removed);
365 spin_lock(&glob->lru_lock);
366 while (!list_empty(&bdev->ddestroy)) {
367 struct ttm_buffer_object *bo;
369 bo = list_first_entry(&bdev->ddestroy, struct ttm_buffer_object,
371 list_move_tail(&bo->ddestroy, &removed);
372 if (!ttm_bo_get_unless_zero(bo))
375 if (remove_all || bo->base.resv != &bo->base._resv) {
376 spin_unlock(&glob->lru_lock);
377 dma_resv_lock(bo->base.resv, NULL);
379 spin_lock(&glob->lru_lock);
380 ttm_bo_cleanup_refs(bo, false, !remove_all, true);
382 } else if (dma_resv_trylock(bo->base.resv)) {
383 ttm_bo_cleanup_refs(bo, false, !remove_all, true);
385 spin_unlock(&glob->lru_lock);
389 spin_lock(&glob->lru_lock);
391 list_splice_tail(&removed, &bdev->ddestroy);
392 empty = list_empty(&bdev->ddestroy);
393 spin_unlock(&glob->lru_lock);
398 static void ttm_bo_release(struct kref *kref)
400 struct ttm_buffer_object *bo =
401 container_of(kref, struct ttm_buffer_object, kref);
402 struct ttm_device *bdev = bo->bdev;
406 ret = ttm_bo_individualize_resv(bo);
408 /* Last resort, if we fail to allocate memory for the
409 * fences block for the BO to become idle
411 dma_resv_wait_timeout_rcu(bo->base.resv, true, false,
415 if (bo->bdev->funcs->release_notify)
416 bo->bdev->funcs->release_notify(bo);
418 drm_vma_offset_remove(bdev->vma_manager, &bo->base.vma_node);
419 ttm_mem_io_free(bdev, &bo->mem);
422 if (!dma_resv_test_signaled_rcu(bo->base.resv, true) ||
423 !dma_resv_trylock(bo->base.resv)) {
424 /* The BO is not idle, resurrect it for delayed destroy */
425 ttm_bo_flush_all_fences(bo);
428 spin_lock(&ttm_glob.lru_lock);
431 * Make pinned bos immediately available to
432 * shrinkers, now that they are queued for
435 * FIXME: QXL is triggering this. Can be removed when the
438 if (WARN_ON_ONCE(bo->pin_count)) {
440 ttm_bo_move_to_lru_tail(bo, &bo->mem, NULL);
443 kref_init(&bo->kref);
444 list_add_tail(&bo->ddestroy, &bdev->ddestroy);
445 spin_unlock(&ttm_glob.lru_lock);
447 schedule_delayed_work(&bdev->wq,
448 ((HZ / 100) < 1) ? 1 : HZ / 100);
452 spin_lock(&ttm_glob.lru_lock);
453 ttm_bo_del_from_lru(bo);
454 list_del(&bo->ddestroy);
455 spin_unlock(&ttm_glob.lru_lock);
457 ttm_bo_cleanup_memtype_use(bo);
458 dma_resv_unlock(bo->base.resv);
460 atomic_dec(&ttm_glob.bo_count);
461 dma_fence_put(bo->moving);
462 if (!ttm_bo_uses_embedded_gem_object(bo))
463 dma_resv_fini(&bo->base._resv);
467 void ttm_bo_put(struct ttm_buffer_object *bo)
469 kref_put(&bo->kref, ttm_bo_release);
471 EXPORT_SYMBOL(ttm_bo_put);
473 int ttm_bo_lock_delayed_workqueue(struct ttm_device *bdev)
475 return cancel_delayed_work_sync(&bdev->wq);
477 EXPORT_SYMBOL(ttm_bo_lock_delayed_workqueue);
479 void ttm_bo_unlock_delayed_workqueue(struct ttm_device *bdev, int resched)
482 schedule_delayed_work(&bdev->wq,
483 ((HZ / 100) < 1) ? 1 : HZ / 100);
485 EXPORT_SYMBOL(ttm_bo_unlock_delayed_workqueue);
487 static int ttm_bo_evict(struct ttm_buffer_object *bo,
488 struct ttm_operation_ctx *ctx)
490 struct ttm_device *bdev = bo->bdev;
491 struct ttm_resource evict_mem;
492 struct ttm_placement placement;
493 struct ttm_place hop;
496 memset(&hop, 0, sizeof(hop));
498 dma_resv_assert_held(bo->base.resv);
500 placement.num_placement = 0;
501 placement.num_busy_placement = 0;
502 bdev->funcs->evict_flags(bo, &placement);
504 if (!placement.num_placement && !placement.num_busy_placement) {
505 ttm_bo_wait(bo, false, false);
507 ttm_bo_cleanup_memtype_use(bo);
508 return ttm_tt_create(bo, false);
512 evict_mem.mm_node = NULL;
513 evict_mem.bus.offset = 0;
514 evict_mem.bus.addr = NULL;
516 ret = ttm_bo_mem_space(bo, &placement, &evict_mem, ctx);
518 if (ret != -ERESTARTSYS) {
519 pr_err("Failed to find memory space for buffer 0x%p eviction\n",
521 ttm_bo_mem_space_debug(bo, &placement);
526 ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, ctx, &hop);
528 WARN(ret == -EMULTIHOP, "Unexpected multihop in eviction - likely driver bug\n");
529 if (ret != -ERESTARTSYS)
530 pr_err("Buffer eviction failed\n");
531 ttm_resource_free(bo, &evict_mem);
537 bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
538 const struct ttm_place *place)
540 /* Don't evict this BO if it's outside of the
541 * requested placement range
543 if (place->fpfn >= (bo->mem.start + bo->mem.num_pages) ||
544 (place->lpfn && place->lpfn <= bo->mem.start))
549 EXPORT_SYMBOL(ttm_bo_eviction_valuable);
552 * Check the target bo is allowable to be evicted or swapout, including cases:
554 * a. if share same reservation object with ctx->resv, have assumption
555 * reservation objects should already be locked, so not lock again and
556 * return true directly when either the opreation allow_reserved_eviction
557 * or the target bo already is in delayed free list;
559 * b. Otherwise, trylock it.
561 static bool ttm_bo_evict_swapout_allowable(struct ttm_buffer_object *bo,
562 struct ttm_operation_ctx *ctx, bool *locked, bool *busy)
566 if (bo->base.resv == ctx->resv) {
567 dma_resv_assert_held(bo->base.resv);
568 if (ctx->allow_res_evict)
574 ret = dma_resv_trylock(bo->base.resv);
584 * ttm_mem_evict_wait_busy - wait for a busy BO to become available
586 * @busy_bo: BO which couldn't be locked with trylock
587 * @ctx: operation context
588 * @ticket: acquire ticket
590 * Try to lock a busy buffer object to avoid failing eviction.
592 static int ttm_mem_evict_wait_busy(struct ttm_buffer_object *busy_bo,
593 struct ttm_operation_ctx *ctx,
594 struct ww_acquire_ctx *ticket)
598 if (!busy_bo || !ticket)
601 if (ctx->interruptible)
602 r = dma_resv_lock_interruptible(busy_bo->base.resv,
605 r = dma_resv_lock(busy_bo->base.resv, ticket);
608 * TODO: It would be better to keep the BO locked until allocation is at
609 * least tried one more time, but that would mean a much larger rework
613 dma_resv_unlock(busy_bo->base.resv);
615 return r == -EDEADLK ? -EBUSY : r;
618 int ttm_mem_evict_first(struct ttm_device *bdev,
619 struct ttm_resource_manager *man,
620 const struct ttm_place *place,
621 struct ttm_operation_ctx *ctx,
622 struct ww_acquire_ctx *ticket)
624 struct ttm_buffer_object *bo = NULL, *busy_bo = NULL;
629 spin_lock(&ttm_glob.lru_lock);
630 for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
631 list_for_each_entry(bo, &man->lru[i], lru) {
634 if (!ttm_bo_evict_swapout_allowable(bo, ctx, &locked,
636 if (busy && !busy_bo && ticket !=
637 dma_resv_locking_ctx(bo->base.resv))
642 if (place && !bdev->funcs->eviction_valuable(bo,
645 dma_resv_unlock(bo->base.resv);
648 if (!ttm_bo_get_unless_zero(bo)) {
650 dma_resv_unlock(bo->base.resv);
656 /* If the inner loop terminated early, we have our candidate */
657 if (&bo->lru != &man->lru[i])
664 if (busy_bo && !ttm_bo_get_unless_zero(busy_bo))
666 spin_unlock(&ttm_glob.lru_lock);
667 ret = ttm_mem_evict_wait_busy(busy_bo, ctx, ticket);
674 ret = ttm_bo_cleanup_refs(bo, ctx->interruptible,
675 ctx->no_wait_gpu, locked);
680 spin_unlock(&ttm_glob.lru_lock);
682 ret = ttm_bo_evict(bo, ctx);
684 ttm_bo_unreserve(bo);
691 * Add the last move fence to the BO and reserve a new shared slot.
693 static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
694 struct ttm_resource_manager *man,
695 struct ttm_resource *mem,
698 struct dma_fence *fence;
701 spin_lock(&man->move_lock);
702 fence = dma_fence_get(man->move);
703 spin_unlock(&man->move_lock);
709 dma_fence_put(fence);
713 dma_resv_add_shared_fence(bo->base.resv, fence);
715 ret = dma_resv_reserve_shared(bo->base.resv, 1);
717 dma_fence_put(fence);
721 dma_fence_put(bo->moving);
727 * Repeatedly evict memory from the LRU for @mem_type until we create enough
728 * space, or we've evicted everything and there isn't enough space.
730 static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
731 const struct ttm_place *place,
732 struct ttm_resource *mem,
733 struct ttm_operation_ctx *ctx)
735 struct ttm_device *bdev = bo->bdev;
736 struct ttm_resource_manager *man = ttm_manager_type(bdev, mem->mem_type);
737 struct ww_acquire_ctx *ticket;
740 ticket = dma_resv_locking_ctx(bo->base.resv);
742 ret = ttm_resource_alloc(bo, place, mem);
745 if (unlikely(ret != -ENOSPC))
747 ret = ttm_mem_evict_first(bdev, man, place, ctx,
749 if (unlikely(ret != 0))
753 return ttm_bo_add_move_fence(bo, man, mem, ctx->no_wait_gpu);
757 * ttm_bo_mem_placement - check if placement is compatible
758 * @bo: BO to find memory for
759 * @place: where to search
760 * @mem: the memory object to fill in
762 * Check if placement is compatible and fill in mem structure.
763 * Returns -EBUSY if placement won't work or negative error code.
764 * 0 when placement can be used.
766 static int ttm_bo_mem_placement(struct ttm_buffer_object *bo,
767 const struct ttm_place *place,
768 struct ttm_resource *mem)
770 struct ttm_device *bdev = bo->bdev;
771 struct ttm_resource_manager *man;
773 man = ttm_manager_type(bdev, place->mem_type);
774 if (!man || !ttm_resource_manager_used(man))
777 mem->mem_type = place->mem_type;
778 mem->placement = place->flags;
780 spin_lock(&ttm_glob.lru_lock);
781 ttm_bo_move_to_lru_tail(bo, mem, NULL);
782 spin_unlock(&ttm_glob.lru_lock);
788 * Creates space for memory region @mem according to its type.
790 * This function first searches for free space in compatible memory types in
791 * the priority order defined by the driver. If free space isn't found, then
792 * ttm_bo_mem_force_space is attempted in priority order to evict and find
795 int ttm_bo_mem_space(struct ttm_buffer_object *bo,
796 struct ttm_placement *placement,
797 struct ttm_resource *mem,
798 struct ttm_operation_ctx *ctx)
800 struct ttm_device *bdev = bo->bdev;
801 bool type_found = false;
804 ret = dma_resv_reserve_shared(bo->base.resv, 1);
808 for (i = 0; i < placement->num_placement; ++i) {
809 const struct ttm_place *place = &placement->placement[i];
810 struct ttm_resource_manager *man;
812 ret = ttm_bo_mem_placement(bo, place, mem);
817 ret = ttm_resource_alloc(bo, place, mem);
823 man = ttm_manager_type(bdev, mem->mem_type);
824 ret = ttm_bo_add_move_fence(bo, man, mem, ctx->no_wait_gpu);
826 ttm_resource_free(bo, mem);
835 for (i = 0; i < placement->num_busy_placement; ++i) {
836 const struct ttm_place *place = &placement->busy_placement[i];
838 ret = ttm_bo_mem_placement(bo, place, mem);
843 ret = ttm_bo_mem_force_space(bo, place, mem, ctx);
847 if (ret && ret != -EBUSY)
853 pr_err(TTM_PFX "No compatible memory type found\n");
858 if (bo->mem.mem_type == TTM_PL_SYSTEM && !bo->pin_count)
859 ttm_bo_move_to_lru_tail_unlocked(bo);
863 EXPORT_SYMBOL(ttm_bo_mem_space);
865 static int ttm_bo_bounce_temp_buffer(struct ttm_buffer_object *bo,
866 struct ttm_resource *mem,
867 struct ttm_operation_ctx *ctx,
868 struct ttm_place *hop)
870 struct ttm_placement hop_placement;
872 struct ttm_resource hop_mem = *mem;
874 hop_mem.mm_node = NULL;
875 hop_mem.mem_type = TTM_PL_SYSTEM;
876 hop_mem.placement = 0;
878 hop_placement.num_placement = hop_placement.num_busy_placement = 1;
879 hop_placement.placement = hop_placement.busy_placement = hop;
881 /* find space in the bounce domain */
882 ret = ttm_bo_mem_space(bo, &hop_placement, &hop_mem, ctx);
885 /* move to the bounce domain */
886 ret = ttm_bo_handle_move_mem(bo, &hop_mem, false, ctx, NULL);
888 ttm_resource_free(bo, &hop_mem);
894 static int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
895 struct ttm_placement *placement,
896 struct ttm_operation_ctx *ctx)
899 struct ttm_place hop;
900 struct ttm_resource mem;
902 dma_resv_assert_held(bo->base.resv);
904 memset(&hop, 0, sizeof(hop));
906 mem.num_pages = PAGE_ALIGN(bo->base.size) >> PAGE_SHIFT;
907 mem.page_alignment = bo->mem.page_alignment;
913 * Determine where to move the buffer.
915 * If driver determines move is going to need
916 * an extra step then it will return -EMULTIHOP
917 * and the buffer will be moved to the temporary
918 * stop and the driver will be called to make
921 ret = ttm_bo_mem_space(bo, placement, &mem, ctx);
925 ret = ttm_bo_handle_move_mem(bo, &mem, false, ctx, &hop);
926 if (ret == -EMULTIHOP) {
927 ret = ttm_bo_bounce_temp_buffer(bo, &mem, ctx, &hop);
930 /* try and move to final place now. */
935 ttm_resource_free(bo, &mem);
939 static bool ttm_bo_places_compat(const struct ttm_place *places,
940 unsigned num_placement,
941 struct ttm_resource *mem,
946 for (i = 0; i < num_placement; i++) {
947 const struct ttm_place *heap = &places[i];
949 if ((mem->start < heap->fpfn ||
950 (heap->lpfn != 0 && (mem->start + mem->num_pages) > heap->lpfn)))
953 *new_flags = heap->flags;
954 if ((mem->mem_type == heap->mem_type) &&
955 (!(*new_flags & TTM_PL_FLAG_CONTIGUOUS) ||
956 (mem->placement & TTM_PL_FLAG_CONTIGUOUS)))
962 bool ttm_bo_mem_compat(struct ttm_placement *placement,
963 struct ttm_resource *mem,
966 if (ttm_bo_places_compat(placement->placement, placement->num_placement,
970 if ((placement->busy_placement != placement->placement ||
971 placement->num_busy_placement > placement->num_placement) &&
972 ttm_bo_places_compat(placement->busy_placement,
973 placement->num_busy_placement,
979 EXPORT_SYMBOL(ttm_bo_mem_compat);
981 int ttm_bo_validate(struct ttm_buffer_object *bo,
982 struct ttm_placement *placement,
983 struct ttm_operation_ctx *ctx)
988 dma_resv_assert_held(bo->base.resv);
991 * Remove the backing store if no placement is given.
993 if (!placement->num_placement && !placement->num_busy_placement) {
994 ret = ttm_bo_pipeline_gutting(bo);
998 return ttm_tt_create(bo, false);
1002 * Check whether we need to move buffer.
1004 if (!ttm_bo_mem_compat(placement, &bo->mem, &new_flags)) {
1005 ret = ttm_bo_move_buffer(bo, placement, ctx);
1010 * We might need to add a TTM.
1012 if (bo->mem.mem_type == TTM_PL_SYSTEM) {
1013 ret = ttm_tt_create(bo, true);
1019 EXPORT_SYMBOL(ttm_bo_validate);
1021 int ttm_bo_init_reserved(struct ttm_device *bdev,
1022 struct ttm_buffer_object *bo,
1024 enum ttm_bo_type type,
1025 struct ttm_placement *placement,
1026 uint32_t page_alignment,
1027 struct ttm_operation_ctx *ctx,
1028 struct sg_table *sg,
1029 struct dma_resv *resv,
1030 void (*destroy) (struct ttm_buffer_object *))
1035 bo->destroy = destroy ? destroy : ttm_bo_default_destroy;
1037 kref_init(&bo->kref);
1038 INIT_LIST_HEAD(&bo->lru);
1039 INIT_LIST_HEAD(&bo->ddestroy);
1042 bo->mem.mem_type = TTM_PL_SYSTEM;
1043 bo->mem.num_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
1044 bo->mem.mm_node = NULL;
1045 bo->mem.page_alignment = page_alignment;
1046 bo->mem.bus.offset = 0;
1047 bo->mem.bus.addr = NULL;
1049 bo->mem.placement = 0;
1053 bo->base.resv = resv;
1054 dma_resv_assert_held(bo->base.resv);
1056 bo->base.resv = &bo->base._resv;
1058 if (!ttm_bo_uses_embedded_gem_object(bo)) {
1060 * bo.base is not initialized, so we have to setup the
1061 * struct elements we want use regardless.
1063 bo->base.size = size;
1064 dma_resv_init(&bo->base._resv);
1065 drm_vma_node_reset(&bo->base.vma_node);
1067 atomic_inc(&ttm_glob.bo_count);
1070 * For ttm_bo_type_device buffers, allocate
1071 * address space from the device.
1073 if (bo->type == ttm_bo_type_device ||
1074 bo->type == ttm_bo_type_sg)
1075 ret = drm_vma_offset_add(bdev->vma_manager, &bo->base.vma_node,
1078 /* passed reservation objects should already be locked,
1079 * since otherwise lockdep will be angered in radeon.
1082 locked = dma_resv_trylock(bo->base.resv);
1087 ret = ttm_bo_validate(bo, placement, ctx);
1089 if (unlikely(ret)) {
1091 ttm_bo_unreserve(bo);
1097 ttm_bo_move_to_lru_tail_unlocked(bo);
1101 EXPORT_SYMBOL(ttm_bo_init_reserved);
1103 int ttm_bo_init(struct ttm_device *bdev,
1104 struct ttm_buffer_object *bo,
1106 enum ttm_bo_type type,
1107 struct ttm_placement *placement,
1108 uint32_t page_alignment,
1110 struct sg_table *sg,
1111 struct dma_resv *resv,
1112 void (*destroy) (struct ttm_buffer_object *))
1114 struct ttm_operation_ctx ctx = { interruptible, false };
1117 ret = ttm_bo_init_reserved(bdev, bo, size, type, placement,
1118 page_alignment, &ctx, sg, resv, destroy);
1123 ttm_bo_unreserve(bo);
1127 EXPORT_SYMBOL(ttm_bo_init);
1130 * buffer object vm functions.
1133 void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
1135 struct ttm_device *bdev = bo->bdev;
1137 drm_vma_node_unmap(&bo->base.vma_node, bdev->dev_mapping);
1138 ttm_mem_io_free(bdev, &bo->mem);
1140 EXPORT_SYMBOL(ttm_bo_unmap_virtual);
1142 int ttm_bo_wait(struct ttm_buffer_object *bo,
1143 bool interruptible, bool no_wait)
1145 long timeout = 15 * HZ;
1148 if (dma_resv_test_signaled_rcu(bo->base.resv, true))
1154 timeout = dma_resv_wait_timeout_rcu(bo->base.resv, true,
1155 interruptible, timeout);
1162 dma_resv_add_excl_fence(bo->base.resv, NULL);
1165 EXPORT_SYMBOL(ttm_bo_wait);
1167 int ttm_bo_swapout(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx,
1170 struct ttm_global *glob = &ttm_glob;
1174 if (!ttm_bo_evict_swapout_allowable(bo, ctx, &locked, NULL))
1177 if (!ttm_bo_get_unless_zero(bo)) {
1179 dma_resv_unlock(bo->base.resv);
1184 ttm_bo_cleanup_refs(bo, false, false, locked);
1189 ttm_bo_del_from_lru(bo);
1190 /* TODO: Cleanup the locking */
1191 spin_unlock(&glob->lru_lock);
1194 * Move to system cached
1196 if (bo->mem.mem_type != TTM_PL_SYSTEM) {
1197 struct ttm_operation_ctx ctx = { false, false };
1198 struct ttm_resource evict_mem;
1199 struct ttm_place hop;
1201 memset(&hop, 0, sizeof(hop));
1203 evict_mem = bo->mem;
1204 evict_mem.mm_node = NULL;
1205 evict_mem.placement = 0;
1206 evict_mem.mem_type = TTM_PL_SYSTEM;
1208 ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, &ctx, &hop);
1209 if (unlikely(ret != 0)) {
1210 WARN(ret == -EMULTIHOP, "Unexpected multihop in swaput - likely driver bug.\n");
1216 * Make sure BO is idle.
1218 ret = ttm_bo_wait(bo, false, false);
1219 if (unlikely(ret != 0))
1222 ttm_bo_unmap_virtual(bo);
1225 * Swap out. Buffer will be swapped in again as soon as
1226 * anyone tries to access a ttm page.
1228 if (bo->bdev->funcs->swap_notify)
1229 bo->bdev->funcs->swap_notify(bo);
1231 ret = ttm_tt_swapout(bo->bdev, bo->ttm, gfp_flags);
1235 * Unreserve without putting on LRU to avoid swapping out an
1236 * already swapped buffer.
1239 dma_resv_unlock(bo->base.resv);
1244 void ttm_bo_tt_destroy(struct ttm_buffer_object *bo)
1246 if (bo->ttm == NULL)
1249 ttm_tt_destroy(bo->bdev, bo->ttm);