}
if (bo->ttm && !ttm_tt_is_populated(bo->ttm)) {
- ret = ttm_tt_populate(bo->bdev, bo->ttm, &ctx);
+ ret = ttm_bo_populate(bo, &ctx);
if (ret)
return ret;
/* Populate ttm with pages if needed. Typically system memory. */
if (ttm && (dst_man->use_tt || (ttm->page_flags & TTM_TT_FLAG_SWAPPED))) {
- ret = ttm_tt_populate(bo->bdev, ttm, ctx);
+ ret = ttm_bo_populate(bo, ctx);
if (ret)
return ret;
}
goto out_no_lock;
backup_bo = i915_gem_to_ttm(backup);
- err = ttm_tt_populate(backup_bo->bdev, backup_bo->ttm, &ctx);
+ err = ttm_bo_populate(backup_bo, &ctx);
if (err)
goto out_no_populate;
if (!backup_bo->resource)
err = ttm_bo_validate(backup_bo, i915_ttm_sys_placement(), &ctx);
if (!err)
- err = ttm_tt_populate(backup_bo->bdev, backup_bo->ttm, &ctx);
+ err = ttm_bo_populate(backup_bo, &ctx);
if (!err) {
err = i915_gem_obj_copy_ttm(obj, backup, pm_apply->allow_gpu,
false);
err = ttm_resource_alloc(bo, place, &res2);
KUNIT_ASSERT_EQ(test, err, 0);
KUNIT_ASSERT_EQ(test,
- list_is_last(&res2->lru.link, &priv->ttm_dev->pinned), 1);
+ list_is_last(&res2->lru.link, &priv->ttm_dev->unevictable), 1);
ttm_bo_unreserve(bo);
KUNIT_ASSERT_EQ(test,
- list_is_last(&res1->lru.link, &priv->ttm_dev->pinned), 1);
+ list_is_last(&res1->lru.link, &priv->ttm_dev->unevictable), 1);
ttm_resource_free(bo, &res1);
ttm_resource_free(bo, &res2);
res = kunit_kzalloc(test, sizeof(*res), GFP_KERNEL);
KUNIT_ASSERT_NOT_NULL(test, res);
- KUNIT_ASSERT_TRUE(test, list_empty(&bo->bdev->pinned));
+ KUNIT_ASSERT_TRUE(test, list_empty(&bo->bdev->unevictable));
dma_resv_lock(bo->base.resv, NULL);
ttm_bo_pin(bo);
ttm_resource_init(bo, place, res);
- KUNIT_ASSERT_TRUE(test, list_is_singular(&bo->bdev->pinned));
+ KUNIT_ASSERT_TRUE(test, list_is_singular(&bo->bdev->unevictable));
ttm_bo_unpin(bo);
ttm_resource_fini(man, res);
dma_resv_unlock(bo->base.resv);
- KUNIT_ASSERT_TRUE(test, list_empty(&bo->bdev->pinned));
+ KUNIT_ASSERT_TRUE(test, list_empty(&bo->bdev->unevictable));
}
static void ttm_resource_fini_basic(struct kunit *test)
goto out_err;
if (mem->mem_type != TTM_PL_SYSTEM) {
- ret = ttm_tt_populate(bo->bdev, bo->ttm, ctx);
+ ret = ttm_bo_populate(bo, ctx);
if (ret)
goto out_err;
}
if (bo->bdev->funcs->swap_notify)
bo->bdev->funcs->swap_notify(bo);
- if (ttm_tt_is_populated(bo->ttm))
+ if (ttm_tt_is_populated(bo->ttm)) {
+ spin_lock(&bo->bdev->lru_lock);
+ ttm_resource_del_bulk_move(bo->resource, bo);
+ spin_unlock(&bo->bdev->lru_lock);
+
ret = ttm_tt_swapout(bo->bdev, bo->ttm, swapout_walk->gfp_flags);
+ spin_lock(&bo->bdev->lru_lock);
+ if (ret)
+ ttm_resource_add_bulk_move(bo->resource, bo);
+ ttm_resource_move_to_lru_tail(bo->resource);
+ spin_unlock(&bo->bdev->lru_lock);
+ }
+
out:
/* Consider -ENOMEM and -ENOSPC non-fatal. */
if (ret == -ENOMEM || ret == -ENOSPC)
ttm_tt_destroy(bo->bdev, bo->ttm);
bo->ttm = NULL;
}
+
+/**
+ * ttm_bo_populate() - Ensure that a buffer object has backing pages
+ * @bo: The buffer object
+ * @ctx: The ttm_operation_ctx governing the operation.
+ *
+ * For buffer objects in a memory type whose manager uses
+ * struct ttm_tt for backing pages, ensure those backing pages
+ * are present and with valid content. The bo's resource is also
+ * placed on the correct LRU list if it was previously swapped
+ * out.
+ *
+ * Return: 0 if successful, negative error code on failure.
+ * Note: May return -EINTR or -ERESTARTSYS if @ctx::interruptible
+ * is set to true.
+ */
+int ttm_bo_populate(struct ttm_buffer_object *bo,
+ struct ttm_operation_ctx *ctx)
+{
+ struct ttm_tt *tt = bo->ttm;
+ bool swapped;
+ int ret;
+
+ dma_resv_assert_held(bo->base.resv);
+
+ if (!tt)
+ return 0;
+
+ swapped = ttm_tt_is_swapped(tt);
+ ret = ttm_tt_populate(bo->bdev, tt, ctx);
+ if (ret)
+ return ret;
+
+ if (swapped && !ttm_tt_is_swapped(tt) && !bo->pin_count &&
+ bo->resource) {
+ spin_lock(&bo->bdev->lru_lock);
+ ttm_resource_add_bulk_move(bo->resource, bo);
+ ttm_resource_move_to_lru_tail(bo->resource);
+ spin_unlock(&bo->bdev->lru_lock);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(ttm_bo_populate);
src_man = ttm_manager_type(bdev, src_mem->mem_type);
if (ttm && ((ttm->page_flags & TTM_TT_FLAG_SWAPPED) ||
dst_man->use_tt)) {
- ret = ttm_tt_populate(bdev, ttm, ctx);
+ ret = ttm_bo_populate(bo, ctx);
if (ret)
return ret;
}
BUG_ON(!ttm);
- ret = ttm_tt_populate(bo->bdev, ttm, &ctx);
+ ret = ttm_bo_populate(bo, &ctx);
if (ret)
return ret;
pgprot_t prot;
void *vaddr;
- ret = ttm_tt_populate(bo->bdev, ttm, &ctx);
+ ret = ttm_bo_populate(bo, &ctx);
if (ret)
return ret;
};
ttm = bo->ttm;
- err = ttm_tt_populate(bdev, bo->ttm, &ctx);
+ err = ttm_bo_populate(bo, &ctx);
if (err) {
if (err == -EINTR || err == -ERESTARTSYS ||
err == -EAGAIN)
bdev->vma_manager = vma_manager;
spin_lock_init(&bdev->lru_lock);
- INIT_LIST_HEAD(&bdev->pinned);
+ INIT_LIST_HEAD(&bdev->unevictable);
bdev->dev_mapping = mapping;
mutex_lock(&ttm_global_mutex);
list_add_tail(&bdev->device_list, &glob->device_list);
struct ttm_resource_manager *man;
unsigned int i, j;
- ttm_device_clear_lru_dma_mappings(bdev, &bdev->pinned);
+ ttm_device_clear_lru_dma_mappings(bdev, &bdev->unevictable);
for (i = TTM_PL_SYSTEM; i < TTM_NUM_MEM_TYPES; ++i) {
man = ttm_manager_type(bdev, i);
#include <drm/ttm/ttm_bo.h>
#include <drm/ttm/ttm_placement.h>
#include <drm/ttm/ttm_resource.h>
+#include <drm/ttm/ttm_tt.h>
#include <drm/drm_util.h>
}
}
+static bool ttm_resource_is_swapped(struct ttm_resource *res, struct ttm_buffer_object *bo)
+{
+ /*
+ * Take care when creating a new resource for a bo, that it is not considered
+ * swapped if it's not the current resource for the bo and is thus logically
+ * associated with the ttm_tt. Think a VRAM resource created to move a
+ * swapped-out bo to VRAM.
+ */
+ if (bo->resource != res || !bo->ttm)
+ return false;
+
+ dma_resv_assert_held(bo->base.resv);
+ return ttm_tt_is_swapped(bo->ttm);
+}
+
/* Add the resource to a bulk move if the BO is configured for it */
void ttm_resource_add_bulk_move(struct ttm_resource *res,
struct ttm_buffer_object *bo)
{
- if (bo->bulk_move && !bo->pin_count)
+ if (bo->bulk_move && !bo->pin_count && !ttm_resource_is_swapped(res, bo))
ttm_lru_bulk_move_add(bo->bulk_move, res);
}
void ttm_resource_del_bulk_move(struct ttm_resource *res,
struct ttm_buffer_object *bo)
{
- if (bo->bulk_move && !bo->pin_count)
+ if (bo->bulk_move && !bo->pin_count && !ttm_resource_is_swapped(res, bo))
ttm_lru_bulk_move_del(bo->bulk_move, res);
}
lockdep_assert_held(&bo->bdev->lru_lock);
- if (bo->pin_count) {
- list_move_tail(&res->lru.link, &bdev->pinned);
+ if (bo->pin_count || ttm_resource_is_swapped(res, bo)) {
+ list_move_tail(&res->lru.link, &bdev->unevictable);
} else if (bo->bulk_move) {
struct ttm_lru_bulk_move_pos *pos =
man = ttm_manager_type(bo->bdev, place->mem_type);
spin_lock(&bo->bdev->lru_lock);
- if (bo->pin_count)
- list_add_tail(&res->lru.link, &bo->bdev->pinned);
+ if (bo->pin_count || ttm_resource_is_swapped(res, bo))
+ list_add_tail(&res->lru.link, &bo->bdev->unevictable);
else
list_add_tail(&res->lru.link, &man->lru[bo->priority]);
man->usage += res->size;
}
return ret;
}
+
+#if IS_ENABLED(CONFIG_DRM_TTM_KUNIT_TEST)
EXPORT_SYMBOL(ttm_tt_populate);
+#endif
void ttm_tt_unpopulate(struct ttm_device *bdev, struct ttm_tt *ttm)
{
}
}
- ret = ttm_tt_populate(bo->ttm.bdev, bo->ttm.ttm, &ctx);
+ ret = ttm_bo_populate(&bo->ttm, &ctx);
if (ret)
goto err_res_free;
if (ret)
return ret;
- ret = ttm_tt_populate(bo->ttm.bdev, bo->ttm.ttm, &ctx);
+ ret = ttm_bo_populate(&bo->ttm, &ctx);
if (ret)
goto err_res_free;
pgprot_t ttm_io_prot(struct ttm_buffer_object *bo, struct ttm_resource *res,
pgprot_t tmp);
void ttm_bo_tt_destroy(struct ttm_buffer_object *bo);
+int ttm_bo_populate(struct ttm_buffer_object *bo,
+ struct ttm_operation_ctx *ctx);
#endif
spinlock_t lru_lock;
/**
- * @pinned: Buffer objects which are pinned and so not on any LRU list.
+ * @unevictable Buffer objects which are pinned or swapped and as such
+ * not on an LRU list.
*/
- struct list_head pinned;
+ struct list_head unevictable;
/**
* @dev_mapping: A pointer to the struct address_space for invalidating
return tt->page_flags & TTM_TT_FLAG_PRIV_POPULATED;
}
+static inline bool ttm_tt_is_swapped(const struct ttm_tt *tt)
+{
+ return tt->page_flags & TTM_TT_FLAG_SWAPPED;
+}
+
/**
* ttm_tt_create
*