Merge tag 'io_uring-5.15-2021-09-11' of git://git.kernel.dk/linux-block
[linux-2.6-microblaze.git] / drivers / gpu / drm / ttm / ttm_bo.c
index ebcffe7..bb9e02c 100644 (file)
@@ -58,7 +58,7 @@ static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
        int i, mem_type;
 
        drm_printf(&p, "No space for %p (%lu pages, %zuK, %zuM)\n",
-                  bo, bo->mem.num_pages, bo->base.size >> 10,
+                  bo, bo->resource->num_pages, bo->base.size >> 10,
                   bo->base.size >> 20);
        for (i = 0; i < placement->num_placement; i++) {
                mem_type = placement->placement[i].mem_type;
@@ -102,6 +102,9 @@ void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo,
                return;
        }
 
+       if (!mem)
+               return;
+
        man = ttm_manager_type(bdev, mem->mem_type);
        list_move_tail(&bo->lru, &man->lru[bo->priority]);
 
@@ -109,7 +112,7 @@ void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo,
                bdev->funcs->del_from_lru_notify(bo);
 
        if (bulk && !bo->pin_count) {
-               switch (bo->mem.mem_type) {
+               switch (bo->resource->mem_type) {
                case TTM_PL_TT:
                        ttm_bo_bulk_move_set_pos(&bulk->tt[bo->priority], bo);
                        break;
@@ -163,11 +166,13 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
                                  struct ttm_operation_ctx *ctx,
                                  struct ttm_place *hop)
 {
+       struct ttm_resource_manager *old_man, *new_man;
        struct ttm_device *bdev = bo->bdev;
-       struct ttm_resource_manager *old_man = ttm_manager_type(bdev, bo->mem.mem_type);
-       struct ttm_resource_manager *new_man = ttm_manager_type(bdev, mem->mem_type);
        int ret;
 
+       old_man = ttm_manager_type(bdev, bo->resource->mem_type);
+       new_man = ttm_manager_type(bdev, mem->mem_type);
+
        ttm_bo_unmap_virtual(bo);
 
        /*
@@ -200,7 +205,7 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
        return 0;
 
 out_err:
-       new_man = ttm_manager_type(bdev, bo->mem.mem_type);
+       new_man = ttm_manager_type(bdev, bo->resource->mem_type);
        if (!new_man->use_tt)
                ttm_bo_tt_destroy(bo);
 
@@ -221,7 +226,7 @@ static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
                bo->bdev->funcs->delete_mem_notify(bo);
 
        ttm_bo_tt_destroy(bo);
-       ttm_resource_free(bo, &bo->mem);
+       ttm_resource_free(bo, &bo->resource);
 }
 
 static int ttm_bo_individualize_resv(struct ttm_buffer_object *bo)
@@ -259,8 +264,8 @@ static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo)
        int i;
 
        rcu_read_lock();
-       fobj = rcu_dereference(resv->fence);
-       fence = rcu_dereference(resv->fence_excl);
+       fobj = dma_resv_shared_list(resv);
+       fence = dma_resv_excl_fence(resv);
        if (fence && !fence->ops->signaled)
                dma_fence_enable_sw_signaling(fence);
 
@@ -274,7 +279,7 @@ static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo)
 }
 
 /**
- * function ttm_bo_cleanup_refs
+ * ttm_bo_cleanup_refs
  * If bo idle, remove from lru lists, and unref.
  * If not idle, block if possible.
  *
@@ -294,7 +299,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
        struct dma_resv *resv = &bo->base._resv;
        int ret;
 
-       if (dma_resv_test_signaled_rcu(resv, true))
+       if (dma_resv_test_signaled(resv, true))
                ret = 0;
        else
                ret = -EBUSY;
@@ -306,8 +311,8 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
                        dma_resv_unlock(bo->base.resv);
                spin_unlock(&bo->bdev->lru_lock);
 
-               lret = dma_resv_wait_timeout_rcu(resv, true, interruptible,
-                                                30 * HZ);
+               lret = dma_resv_wait_timeout(resv, true, interruptible,
+                                            30 * HZ);
 
                if (lret < 0)
                        return lret;
@@ -401,24 +406,26 @@ static void ttm_bo_release(struct kref *kref)
        struct ttm_device *bdev = bo->bdev;
        int ret;
 
+       WARN_ON_ONCE(bo->pin_count);
+
        if (!bo->deleted) {
                ret = ttm_bo_individualize_resv(bo);
                if (ret) {
                        /* Last resort, if we fail to allocate memory for the
                         * fences block for the BO to become idle
                         */
-                       dma_resv_wait_timeout_rcu(bo->base.resv, true, false,
-                                                 30 * HZ);
+                       dma_resv_wait_timeout(bo->base.resv, true, false,
+                                             30 * HZ);
                }
 
                if (bo->bdev->funcs->release_notify)
                        bo->bdev->funcs->release_notify(bo);
 
                drm_vma_offset_remove(bdev->vma_manager, &bo->base.vma_node);
-               ttm_mem_io_free(bdev, &bo->mem);
+               ttm_mem_io_free(bdev, bo->resource);
        }
 
-       if (!dma_resv_test_signaled_rcu(bo->base.resv, true) ||
+       if (!dma_resv_test_signaled(bo->base.resv, true) ||
            !dma_resv_trylock(bo->base.resv)) {
                /* The BO is not idle, resurrect it for delayed destroy */
                ttm_bo_flush_all_fences(bo);
@@ -434,9 +441,9 @@ static void ttm_bo_release(struct kref *kref)
                 * FIXME: QXL is triggering this. Can be removed when the
                 * driver is fixed.
                 */
-               if (WARN_ON_ONCE(bo->pin_count)) {
+               if (bo->pin_count) {
                        bo->pin_count = 0;
-                       ttm_bo_move_to_lru_tail(bo, &bo->mem, NULL);
+                       ttm_bo_move_to_lru_tail(bo, bo->resource, NULL);
                }
 
                kref_init(&bo->kref);
@@ -458,8 +465,6 @@ static void ttm_bo_release(struct kref *kref)
 
        atomic_dec(&ttm_glob.bo_count);
        dma_fence_put(bo->moving);
-       if (!ttm_bo_uses_embedded_gem_object(bo))
-               dma_resv_fini(&bo->base._resv);
        bo->destroy(bo);
 }
 
@@ -483,11 +488,36 @@ void ttm_bo_unlock_delayed_workqueue(struct ttm_device *bdev, int resched)
 }
 EXPORT_SYMBOL(ttm_bo_unlock_delayed_workqueue);
 
+static int ttm_bo_bounce_temp_buffer(struct ttm_buffer_object *bo,
+                                    struct ttm_resource **mem,
+                                    struct ttm_operation_ctx *ctx,
+                                    struct ttm_place *hop)
+{
+       struct ttm_placement hop_placement;
+       struct ttm_resource *hop_mem;
+       int ret;
+
+       hop_placement.num_placement = hop_placement.num_busy_placement = 1;
+       hop_placement.placement = hop_placement.busy_placement = hop;
+
+       /* find space in the bounce domain */
+       ret = ttm_bo_mem_space(bo, &hop_placement, &hop_mem, ctx);
+       if (ret)
+               return ret;
+       /* move to the bounce domain */
+       ret = ttm_bo_handle_move_mem(bo, hop_mem, false, ctx, NULL);
+       if (ret) {
+               ttm_resource_free(bo, &hop_mem);
+               return ret;
+       }
+       return 0;
+}
+
 static int ttm_bo_evict(struct ttm_buffer_object *bo,
                        struct ttm_operation_ctx *ctx)
 {
        struct ttm_device *bdev = bo->bdev;
-       struct ttm_resource evict_mem;
+       struct ttm_resource *evict_mem;
        struct ttm_placement placement;
        struct ttm_place hop;
        int ret = 0;
@@ -501,17 +531,17 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo,
        bdev->funcs->evict_flags(bo, &placement);
 
        if (!placement.num_placement && !placement.num_busy_placement) {
-               ttm_bo_wait(bo, false, false);
+               ret = ttm_bo_wait(bo, true, false);
+               if (ret)
+                       return ret;
 
-               ttm_bo_cleanup_memtype_use(bo);
-               return ttm_tt_create(bo, false);
+               /*
+                * Since we've already synced, this frees backing store
+                * immediately.
+                */
+               return ttm_bo_pipeline_gutting(bo);
        }
 
-       evict_mem = bo->mem;
-       evict_mem.mm_node = NULL;
-       evict_mem.bus.offset = 0;
-       evict_mem.bus.addr = NULL;
-
        ret = ttm_bo_mem_space(bo, &placement, &evict_mem, ctx);
        if (ret) {
                if (ret != -ERESTARTSYS) {
@@ -522,12 +552,17 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo,
                goto out;
        }
 
-       ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, ctx, &hop);
-       if (unlikely(ret)) {
-               WARN(ret == -EMULTIHOP, "Unexpected multihop in eviction - likely driver bug\n");
-               if (ret != -ERESTARTSYS)
+bounce:
+       ret = ttm_bo_handle_move_mem(bo, evict_mem, true, ctx, &hop);
+       if (ret == -EMULTIHOP) {
+               ret = ttm_bo_bounce_temp_buffer(bo, &evict_mem, ctx, &hop);
+               if (ret) {
                        pr_err("Buffer eviction failed\n");
-               ttm_resource_free(bo, &evict_mem);
+                       ttm_resource_free(bo, &evict_mem);
+                       goto out;
+               }
+               /* try and move to final place now. */
+               goto bounce;
        }
 out:
        return ret;
@@ -536,11 +571,15 @@ out:
 bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
                              const struct ttm_place *place)
 {
+       dma_resv_assert_held(bo->base.resv);
+       if (bo->resource->mem_type == TTM_PL_SYSTEM)
+               return true;
+
        /* Don't evict this BO if it's outside of the
         * requested placement range
         */
-       if (place->fpfn >= (bo->mem.start + bo->mem.num_pages) ||
-           (place->lpfn && place->lpfn <= bo->mem.start))
+       if (place->fpfn >= (bo->resource->start + bo->resource->num_pages) ||
+           (place->lpfn && place->lpfn <= bo->resource->start))
                return false;
 
        return true;
@@ -558,7 +597,9 @@ EXPORT_SYMBOL(ttm_bo_eviction_valuable);
  * b. Otherwise, trylock it.
  */
 static bool ttm_bo_evict_swapout_allowable(struct ttm_buffer_object *bo,
-                       struct ttm_operation_ctx *ctx, bool *locked, bool *busy)
+                                          struct ttm_operation_ctx *ctx,
+                                          const struct ttm_place *place,
+                                          bool *locked, bool *busy)
 {
        bool ret = false;
 
@@ -576,6 +617,14 @@ static bool ttm_bo_evict_swapout_allowable(struct ttm_buffer_object *bo,
                        *busy = !ret;
        }
 
+       if (ret && place && !bo->bdev->funcs->eviction_valuable(bo, place)) {
+               ret = false;
+               if (*locked) {
+                       dma_resv_unlock(bo->base.resv);
+                       *locked = false;
+               }
+       }
+
        return ret;
 }
 
@@ -630,20 +679,14 @@ int ttm_mem_evict_first(struct ttm_device *bdev,
                list_for_each_entry(bo, &man->lru[i], lru) {
                        bool busy;
 
-                       if (!ttm_bo_evict_swapout_allowable(bo, ctx, &locked,
-                                                           &busy)) {
+                       if (!ttm_bo_evict_swapout_allowable(bo, ctx, place,
+                                                           &locked, &busy)) {
                                if (busy && !busy_bo && ticket !=
                                    dma_resv_locking_ctx(bo->base.resv))
                                        busy_bo = bo;
                                continue;
                        }
 
-                       if (place && !bdev->funcs->eviction_valuable(bo,
-                                                                     place)) {
-                               if (locked)
-                                       dma_resv_unlock(bo->base.resv);
-                               continue;
-                       }
                        if (!ttm_bo_get_unless_zero(bo)) {
                                if (locked)
                                        dma_resv_unlock(bo->base.resv);
@@ -687,7 +730,9 @@ int ttm_mem_evict_first(struct ttm_device *bdev,
 }
 
 /*
- * Add the last move fence to the BO and reserve a new shared slot.
+ * Add the last move fence to the BO and reserve a new shared slot. We only use
+ * a shared slot to avoid unecessary sync and rely on the subsequent bo move to
+ * either stall or use an exclusive fence respectively set bo->moving.
  */
 static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
                                 struct ttm_resource_manager *man,
@@ -729,14 +774,15 @@ static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
  */
 static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
                                  const struct ttm_place *place,
-                                 struct ttm_resource *mem,
+                                 struct ttm_resource **mem,
                                  struct ttm_operation_ctx *ctx)
 {
        struct ttm_device *bdev = bo->bdev;
-       struct ttm_resource_manager *man = ttm_manager_type(bdev, mem->mem_type);
+       struct ttm_resource_manager *man;
        struct ww_acquire_ctx *ticket;
        int ret;
 
+       man = ttm_manager_type(bdev, place->mem_type);
        ticket = dma_resv_locking_ctx(bo->base.resv);
        do {
                ret = ttm_resource_alloc(bo, place, mem);
@@ -750,37 +796,7 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
                        return ret;
        } while (1);
 
-       return ttm_bo_add_move_fence(bo, man, mem, ctx->no_wait_gpu);
-}
-
-/**
- * ttm_bo_mem_placement - check if placement is compatible
- * @bo: BO to find memory for
- * @place: where to search
- * @mem: the memory object to fill in
- *
- * Check if placement is compatible and fill in mem structure.
- * Returns -EBUSY if placement won't work or negative error code.
- * 0 when placement can be used.
- */
-static int ttm_bo_mem_placement(struct ttm_buffer_object *bo,
-                               const struct ttm_place *place,
-                               struct ttm_resource *mem)
-{
-       struct ttm_device *bdev = bo->bdev;
-       struct ttm_resource_manager *man;
-
-       man = ttm_manager_type(bdev, place->mem_type);
-       if (!man || !ttm_resource_manager_used(man))
-               return -EBUSY;
-
-       mem->mem_type = place->mem_type;
-       mem->placement = place->flags;
-
-       spin_lock(&bo->bdev->lru_lock);
-       ttm_bo_move_to_lru_tail(bo, mem, NULL);
-       spin_unlock(&bo->bdev->lru_lock);
-       return 0;
+       return ttm_bo_add_move_fence(bo, man, *mem, ctx->no_wait_gpu);
 }
 
 /*
@@ -793,7 +809,7 @@ static int ttm_bo_mem_placement(struct ttm_buffer_object *bo,
  */
 int ttm_bo_mem_space(struct ttm_buffer_object *bo,
                        struct ttm_placement *placement,
-                       struct ttm_resource *mem,
+                       struct ttm_resource **mem,
                        struct ttm_operation_ctx *ctx)
 {
        struct ttm_device *bdev = bo->bdev;
@@ -808,8 +824,8 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
                const struct ttm_place *place = &placement->placement[i];
                struct ttm_resource_manager *man;
 
-               ret = ttm_bo_mem_placement(bo, place, mem);
-               if (ret)
+               man = ttm_manager_type(bdev, place->mem_type);
+               if (!man || !ttm_resource_manager_used(man))
                        continue;
 
                type_found = true;
@@ -819,8 +835,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
                if (unlikely(ret))
                        goto error;
 
-               man = ttm_manager_type(bdev, mem->mem_type);
-               ret = ttm_bo_add_move_fence(bo, man, mem, ctx->no_wait_gpu);
+               ret = ttm_bo_add_move_fence(bo, man, *mem, ctx->no_wait_gpu);
                if (unlikely(ret)) {
                        ttm_resource_free(bo, mem);
                        if (ret == -EBUSY)
@@ -833,9 +848,10 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
 
        for (i = 0; i < placement->num_busy_placement; ++i) {
                const struct ttm_place *place = &placement->busy_placement[i];
+               struct ttm_resource_manager *man;
 
-               ret = ttm_bo_mem_placement(bo, place, mem);
-               if (ret)
+               man = ttm_manager_type(bdev, place->mem_type);
+               if (!man || !ttm_resource_manager_used(man))
                        continue;
 
                type_found = true;
@@ -854,60 +870,23 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
        }
 
 error:
-       if (bo->mem.mem_type == TTM_PL_SYSTEM && !bo->pin_count)
+       if (bo->resource->mem_type == TTM_PL_SYSTEM && !bo->pin_count)
                ttm_bo_move_to_lru_tail_unlocked(bo);
 
        return ret;
 }
 EXPORT_SYMBOL(ttm_bo_mem_space);
 
-static int ttm_bo_bounce_temp_buffer(struct ttm_buffer_object *bo,
-                                    struct ttm_resource *mem,
-                                    struct ttm_operation_ctx *ctx,
-                                    struct ttm_place *hop)
-{
-       struct ttm_placement hop_placement;
-       int ret;
-       struct ttm_resource hop_mem = *mem;
-
-       hop_mem.mm_node = NULL;
-       hop_mem.mem_type = TTM_PL_SYSTEM;
-       hop_mem.placement = 0;
-
-       hop_placement.num_placement = hop_placement.num_busy_placement = 1;
-       hop_placement.placement = hop_placement.busy_placement = hop;
-
-       /* find space in the bounce domain */
-       ret = ttm_bo_mem_space(bo, &hop_placement, &hop_mem, ctx);
-       if (ret)
-               return ret;
-       /* move to the bounce domain */
-       ret = ttm_bo_handle_move_mem(bo, &hop_mem, false, ctx, NULL);
-       if (ret) {
-               ttm_resource_free(bo, &hop_mem);
-               return ret;
-       }
-       return 0;
-}
-
 static int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
                              struct ttm_placement *placement,
                              struct ttm_operation_ctx *ctx)
 {
-       int ret = 0;
+       struct ttm_resource *mem;
        struct ttm_place hop;
-       struct ttm_resource mem;
+       int ret;
 
        dma_resv_assert_held(bo->base.resv);
 
-       memset(&hop, 0, sizeof(hop));
-
-       mem.num_pages = PAGE_ALIGN(bo->base.size) >> PAGE_SHIFT;
-       mem.page_alignment = bo->mem.page_alignment;
-       mem.bus.offset = 0;
-       mem.bus.addr = NULL;
-       mem.mm_node = NULL;
-
        /*
         * Determine where to move the buffer.
         *
@@ -921,7 +900,7 @@ static int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
        if (ret)
                return ret;
 bounce:
-       ret = ttm_bo_handle_move_mem(bo, &mem, false, ctx, &hop);
+       ret = ttm_bo_handle_move_mem(bo, mem, false, ctx, &hop);
        if (ret == -EMULTIHOP) {
                ret = ttm_bo_bounce_temp_buffer(bo, &mem, ctx, &hop);
                if (ret)
@@ -942,6 +921,9 @@ static bool ttm_bo_places_compat(const struct ttm_place *places,
 {
        unsigned i;
 
+       if (mem->placement & TTM_PL_FLAG_TEMPORARY)
+               return false;
+
        for (i = 0; i < num_placement; i++) {
                const struct ttm_place *heap = &places[i];
 
@@ -989,18 +971,13 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
        /*
         * Remove the backing store if no placement is given.
         */
-       if (!placement->num_placement && !placement->num_busy_placement) {
-               ret = ttm_bo_pipeline_gutting(bo);
-               if (ret)
-                       return ret;
-
-               return ttm_tt_create(bo, false);
-       }
+       if (!placement->num_placement && !placement->num_busy_placement)
+               return ttm_bo_pipeline_gutting(bo);
 
        /*
         * Check whether we need to move buffer.
         */
-       if (!ttm_bo_mem_compat(placement, &bo->mem, &new_flags)) {
+       if (!ttm_bo_mem_compat(placement, bo->resource, &new_flags)) {
                ret = ttm_bo_move_buffer(bo, placement, ctx);
                if (ret)
                        return ret;
@@ -1008,7 +985,7 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
        /*
         * We might need to add a TTM.
         */
-       if (bo->mem.mem_type == TTM_PL_SYSTEM) {
+       if (bo->resource->mem_type == TTM_PL_SYSTEM) {
                ret = ttm_tt_create(bo, true);
                if (ret)
                        return ret;
@@ -1028,8 +1005,9 @@ int ttm_bo_init_reserved(struct ttm_device *bdev,
                         struct dma_resv *resv,
                         void (*destroy) (struct ttm_buffer_object *))
 {
+       static const struct ttm_place sys_mem = { .mem_type = TTM_PL_SYSTEM };
        bool locked;
-       int ret = 0;
+       int ret;
 
        bo->destroy = destroy ? destroy : ttm_bo_default_destroy;
 
@@ -1038,14 +1016,8 @@ int ttm_bo_init_reserved(struct ttm_device *bdev,
        INIT_LIST_HEAD(&bo->ddestroy);
        bo->bdev = bdev;
        bo->type = type;
-       bo->mem.mem_type = TTM_PL_SYSTEM;
-       bo->mem.num_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
-       bo->mem.mm_node = NULL;
-       bo->mem.page_alignment = page_alignment;
-       bo->mem.bus.offset = 0;
-       bo->mem.bus.addr = NULL;
+       bo->page_alignment = page_alignment;
        bo->moving = NULL;
-       bo->mem.placement = 0;
        bo->pin_count = 0;
        bo->sg = sg;
        if (resv) {
@@ -1054,17 +1026,14 @@ int ttm_bo_init_reserved(struct ttm_device *bdev,
        } else {
                bo->base.resv = &bo->base._resv;
        }
-       if (!ttm_bo_uses_embedded_gem_object(bo)) {
-               /*
-                * bo.base is not initialized, so we have to setup the
-                * struct elements we want use regardless.
-                */
-               bo->base.size = size;
-               dma_resv_init(&bo->base._resv);
-               drm_vma_node_reset(&bo->base.vma_node);
-       }
        atomic_inc(&ttm_glob.bo_count);
 
+       ret = ttm_resource_alloc(bo, &sys_mem, &bo->resource);
+       if (unlikely(ret)) {
+               ttm_bo_put(bo);
+               return ret;
+       }
+
        /*
         * For ttm_bo_type_device buffers, allocate
         * address space from the device.
@@ -1072,7 +1041,7 @@ int ttm_bo_init_reserved(struct ttm_device *bdev,
        if (bo->type == ttm_bo_type_device ||
            bo->type == ttm_bo_type_sg)
                ret = drm_vma_offset_add(bdev->vma_manager, &bo->base.vma_node,
-                                        bo->mem.num_pages);
+                                        bo->resource->num_pages);
 
        /* passed reservation objects should already be locked,
         * since otherwise lockdep will be angered in radeon.
@@ -1134,7 +1103,7 @@ void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
        struct ttm_device *bdev = bo->bdev;
 
        drm_vma_node_unmap(&bo->base.vma_node, bdev->dev_mapping);
-       ttm_mem_io_free(bdev, &bo->mem);
+       ttm_mem_io_free(bdev, bo->resource);
 }
 EXPORT_SYMBOL(ttm_bo_unmap_virtual);
 
@@ -1144,14 +1113,14 @@ int ttm_bo_wait(struct ttm_buffer_object *bo,
        long timeout = 15 * HZ;
 
        if (no_wait) {
-               if (dma_resv_test_signaled_rcu(bo->base.resv, true))
+               if (dma_resv_test_signaled(bo->base.resv, true))
                        return 0;
                else
                        return -EBUSY;
        }
 
-       timeout = dma_resv_wait_timeout_rcu(bo->base.resv, true,
-                                                     interruptible, timeout);
+       timeout = dma_resv_wait_timeout(bo->base.resv, true, interruptible,
+                                       timeout);
        if (timeout < 0)
                return timeout;
 
@@ -1166,10 +1135,19 @@ EXPORT_SYMBOL(ttm_bo_wait);
 int ttm_bo_swapout(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx,
                   gfp_t gfp_flags)
 {
+       struct ttm_place place;
        bool locked;
        int ret;
 
-       if (!ttm_bo_evict_swapout_allowable(bo, ctx, &locked, NULL))
+       /*
+        * While the bo may already reside in SYSTEM placement, set
+        * SYSTEM as new placement to cover also the move further below.
+        * The driver may use the fact that we're moving from SYSTEM
+        * as an indication that we're about to swap out.
+        */
+       memset(&place, 0, sizeof(place));
+       place.mem_type = TTM_PL_SYSTEM;
+       if (!ttm_bo_evict_swapout_allowable(bo, ctx, &place, &locked, NULL))
                return -EBUSY;
 
        if (!bo->ttm || !ttm_tt_is_populated(bo->ttm) ||
@@ -1182,9 +1160,9 @@ int ttm_bo_swapout(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx,
        }
 
        if (bo->deleted) {
-               ttm_bo_cleanup_refs(bo, false, false, locked);
+               ret = ttm_bo_cleanup_refs(bo, false, false, locked);
                ttm_bo_put(bo);
-               return 0;
+               return ret == -EBUSY ? -ENOSPC : ret;
        }
 
        ttm_bo_del_from_lru(bo);
@@ -1194,19 +1172,17 @@ int ttm_bo_swapout(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx,
        /*
         * Move to system cached
         */
-       if (bo->mem.mem_type != TTM_PL_SYSTEM) {
+       if (bo->resource->mem_type != TTM_PL_SYSTEM) {
                struct ttm_operation_ctx ctx = { false, false };
-               struct ttm_resource evict_mem;
+               struct ttm_resource *evict_mem;
                struct ttm_place hop;
 
                memset(&hop, 0, sizeof(hop));
+               ret = ttm_resource_alloc(bo, &place, &evict_mem);
+               if (unlikely(ret))
+                       goto out;
 
-               evict_mem = bo->mem;
-               evict_mem.mm_node = NULL;
-               evict_mem.placement = 0;
-               evict_mem.mem_type = TTM_PL_SYSTEM;
-
-               ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, &ctx, &hop);
+               ret = ttm_bo_handle_move_mem(bo, evict_mem, true, &ctx, &hop);
                if (unlikely(ret != 0)) {
                        WARN(ret == -EMULTIHOP, "Unexpected multihop in swaput - likely driver bug.\n");
                        goto out;
@@ -1229,7 +1205,8 @@ int ttm_bo_swapout(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx,
        if (bo->bdev->funcs->swap_notify)
                bo->bdev->funcs->swap_notify(bo);
 
-       ret = ttm_tt_swapout(bo->bdev, bo->ttm, gfp_flags);
+       if (ttm_tt_is_populated(bo->ttm))
+               ret = ttm_tt_swapout(bo->bdev, bo->ttm, gfp_flags);
 out:
 
        /*
@@ -1239,7 +1216,7 @@ out:
        if (locked)
                dma_resv_unlock(bo->base.resv);
        ttm_bo_put(bo);
-       return ret;
+       return ret == -EBUSY ? -ENOSPC : ret;
 }
 
 void ttm_bo_tt_destroy(struct ttm_buffer_object *bo)