if (cpu_addr)
amdgpu_bo_kunmap(*bo_ptr);
- ttm_resource_free(&(*bo_ptr)->tbo, (*bo_ptr)->tbo.resource);
+ ttm_resource_free(&(*bo_ptr)->tbo, &(*bo_ptr)->tbo.resource);
for (i = 0; i < (*bo_ptr)->placement.num_placement; ++i) {
(*bo_ptr)->placements[i].fpfn = offset >> PAGE_SHIFT;
(*bo_ptr)->placements[i].lpfn = (offset + size) >> PAGE_SHIFT;
}
r = ttm_bo_mem_space(&(*bo_ptr)->tbo, &(*bo_ptr)->placement,
- (*bo_ptr)->tbo.resource, &ctx);
+ &(*bo_ptr)->tbo.resource, &ctx);
if (r)
goto error;
return r;
amdgpu_ttm_backend_unbind(bo->bdev, bo->ttm);
- ttm_resource_free(bo, bo->resource);
+ ttm_resource_free(bo, &bo->resource);
ttm_bo_assign_mem(bo, new_mem);
goto out;
}
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
struct ttm_operation_ctx ctx = { false, false };
struct amdgpu_ttm_tt *gtt = (void *)bo->ttm;
- struct ttm_resource tmp;
struct ttm_placement placement;
struct ttm_place placements;
+ struct ttm_resource *tmp;
uint64_t addr, flags;
int r;
addr = amdgpu_gmc_agp_addr(bo);
if (addr != AMDGPU_BO_INVALID_OFFSET) {
bo->resource->start = addr >> PAGE_SHIFT;
- } else {
+ return 0;
+ }
- /* allocate GART space */
- placement.num_placement = 1;
- placement.placement = &placements;
- placement.num_busy_placement = 1;
- placement.busy_placement = &placements;
- placements.fpfn = 0;
- placements.lpfn = adev->gmc.gart_size >> PAGE_SHIFT;
- placements.mem_type = TTM_PL_TT;
- placements.flags = bo->resource->placement;
-
- r = ttm_bo_mem_space(bo, &placement, &tmp, &ctx);
- if (unlikely(r))
- return r;
+ /* allocate GART space */
+ placement.num_placement = 1;
+ placement.placement = &placements;
+ placement.num_busy_placement = 1;
+ placement.busy_placement = &placements;
+ placements.fpfn = 0;
+ placements.lpfn = adev->gmc.gart_size >> PAGE_SHIFT;
+ placements.mem_type = TTM_PL_TT;
+ placements.flags = bo->resource->placement;
- /* compute PTE flags for this buffer object */
- flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, &tmp);
+ r = ttm_bo_mem_space(bo, &placement, &tmp, &ctx);
+ if (unlikely(r))
+ return r;
- /* Bind pages */
- gtt->offset = (u64)tmp.start << PAGE_SHIFT;
- r = amdgpu_ttm_gart_bind(adev, bo, flags);
- if (unlikely(r)) {
- ttm_resource_free(bo, &tmp);
- return r;
- }
+ /* compute PTE flags for this buffer object */
+ flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, tmp);
- ttm_resource_free(bo, bo->resource);
- ttm_bo_assign_mem(bo, &tmp);
+ /* Bind pages */
+ gtt->offset = (u64)tmp->start << PAGE_SHIFT;
+ r = amdgpu_ttm_gart_bind(adev, bo, flags);
+ if (unlikely(r)) {
+ ttm_resource_free(bo, &tmp);
+ return r;
}
+ ttm_resource_free(bo, &bo->resource);
+ ttm_bo_assign_mem(bo, tmp);
+
return 0;
}
if (old_reg->mem_type == TTM_PL_TT &&
new_reg->mem_type == TTM_PL_SYSTEM) {
nouveau_ttm_tt_unbind(bo->bdev, bo->ttm);
- ttm_resource_free(bo, bo->resource);
+ ttm_resource_free(bo, &bo->resource);
ttm_bo_assign_mem(bo, new_reg);
goto out;
}
if (old_mem->mem_type == TTM_PL_TT &&
new_mem->mem_type == TTM_PL_SYSTEM) {
radeon_ttm_tt_unbind(bo->bdev, bo->ttm);
- ttm_resource_free(bo, bo->resource);
+ ttm_resource_free(bo, &bo->resource);
ttm_bo_assign_mem(bo, new_mem);
goto out;
}
bo->bdev->funcs->delete_mem_notify(bo);
ttm_bo_tt_destroy(bo);
- ttm_resource_free(bo, bo->resource);
+ ttm_resource_free(bo, &bo->resource);
}
static int ttm_bo_individualize_resv(struct ttm_buffer_object *bo)
struct ttm_operation_ctx *ctx)
{
struct ttm_device *bdev = bo->bdev;
- struct ttm_resource evict_mem;
+ struct ttm_resource *evict_mem;
struct ttm_placement placement;
struct ttm_place hop;
int ret = 0;
goto out;
}
- ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, ctx, &hop);
+ ret = ttm_bo_handle_move_mem(bo, evict_mem, true, ctx, &hop);
if (unlikely(ret)) {
WARN(ret == -EMULTIHOP, "Unexpected multihop in eviction - likely driver bug\n");
if (ret != -ERESTARTSYS)
*/
static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
const struct ttm_place *place,
- struct ttm_resource *mem,
+ struct ttm_resource **mem,
struct ttm_operation_ctx *ctx)
{
struct ttm_device *bdev = bo->bdev;
- struct ttm_resource_manager *man = ttm_manager_type(bdev, mem->mem_type);
+ struct ttm_resource_manager *man;
struct ww_acquire_ctx *ticket;
int ret;
+ man = ttm_manager_type(bdev, (*mem)->mem_type);
ticket = dma_resv_locking_ctx(bo->base.resv);
do {
ret = ttm_resource_alloc(bo, place, mem);
return ret;
} while (1);
- return ttm_bo_add_move_fence(bo, man, mem, ctx->no_wait_gpu);
-}
-
-/**
- * ttm_bo_mem_placement - check if placement is compatible
- * @bo: BO to find memory for
- * @place: where to search
- * @mem: the memory object to fill in
- *
- * Check if placement is compatible and fill in mem structure.
- * Returns -EBUSY if placement won't work or negative error code.
- * 0 when placement can be used.
- */
-static int ttm_bo_mem_placement(struct ttm_buffer_object *bo,
- const struct ttm_place *place,
- struct ttm_resource *mem)
-{
- struct ttm_device *bdev = bo->bdev;
- struct ttm_resource_manager *man;
-
- man = ttm_manager_type(bdev, place->mem_type);
- if (!man || !ttm_resource_manager_used(man))
- return -EBUSY;
-
- mem->mem_type = place->mem_type;
- mem->placement = place->flags;
-
- spin_lock(&bo->bdev->lru_lock);
- ttm_bo_move_to_lru_tail(bo, mem, NULL);
- spin_unlock(&bo->bdev->lru_lock);
- return 0;
+ return ttm_bo_add_move_fence(bo, man, *mem, ctx->no_wait_gpu);
}
/*
*/
int ttm_bo_mem_space(struct ttm_buffer_object *bo,
struct ttm_placement *placement,
- struct ttm_resource *mem,
+ struct ttm_resource **mem,
struct ttm_operation_ctx *ctx)
{
struct ttm_device *bdev = bo->bdev;
const struct ttm_place *place = &placement->placement[i];
struct ttm_resource_manager *man;
- ret = ttm_bo_mem_placement(bo, place, mem);
- if (ret)
+ man = ttm_manager_type(bdev, place->mem_type);
+ if (!man || !ttm_resource_manager_used(man))
continue;
type_found = true;
if (unlikely(ret))
goto error;
- man = ttm_manager_type(bdev, mem->mem_type);
- ret = ttm_bo_add_move_fence(bo, man, mem, ctx->no_wait_gpu);
+ ret = ttm_bo_add_move_fence(bo, man, *mem, ctx->no_wait_gpu);
if (unlikely(ret)) {
ttm_resource_free(bo, mem);
if (ret == -EBUSY)
for (i = 0; i < placement->num_busy_placement; ++i) {
const struct ttm_place *place = &placement->busy_placement[i];
+ struct ttm_resource_manager *man;
- ret = ttm_bo_mem_placement(bo, place, mem);
- if (ret)
+ man = ttm_manager_type(bdev, place->mem_type);
+ if (!man || !ttm_resource_manager_used(man))
continue;
type_found = true;
EXPORT_SYMBOL(ttm_bo_mem_space);
static int ttm_bo_bounce_temp_buffer(struct ttm_buffer_object *bo,
- struct ttm_resource *mem,
+ struct ttm_resource **mem,
struct ttm_operation_ctx *ctx,
struct ttm_place *hop)
{
struct ttm_placement hop_placement;
- struct ttm_resource hop_mem;
+ struct ttm_resource *hop_mem;
int ret;
hop_placement.num_placement = hop_placement.num_busy_placement = 1;
if (ret)
return ret;
/* move to the bounce domain */
- ret = ttm_bo_handle_move_mem(bo, &hop_mem, false, ctx, NULL);
+ ret = ttm_bo_handle_move_mem(bo, hop_mem, false, ctx, NULL);
if (ret) {
ttm_resource_free(bo, &hop_mem);
return ret;
struct ttm_placement *placement,
struct ttm_operation_ctx *ctx)
{
+ struct ttm_resource *mem;
struct ttm_place hop;
- struct ttm_resource mem;
int ret;
dma_resv_assert_held(bo->base.resv);
- memset(&hop, 0, sizeof(hop));
-
/*
* Determine where to move the buffer.
*
if (ret)
return ret;
bounce:
- ret = ttm_bo_handle_move_mem(bo, &mem, false, ctx, &hop);
+ ret = ttm_bo_handle_move_mem(bo, mem, false, ctx, &hop);
if (ret == -EMULTIHOP) {
ret = ttm_bo_bounce_temp_buffer(bo, &mem, ctx, &hop);
if (ret)
{
static const struct ttm_place sys_mem = { .mem_type = TTM_PL_SYSTEM };
bool locked;
- int ret = 0;
+ int ret;
bo->destroy = destroy ? destroy : ttm_bo_default_destroy;
bo->bdev = bdev;
bo->type = type;
bo->page_alignment = page_alignment;
- bo->resource = &bo->_mem;
- ttm_resource_alloc(bo, &sys_mem, bo->resource);
bo->moving = NULL;
bo->pin_count = 0;
bo->sg = sg;
}
atomic_inc(&ttm_glob.bo_count);
+ ret = ttm_resource_alloc(bo, &sys_mem, &bo->resource);
+ if (unlikely(ret)) {
+ ttm_bo_put(bo);
+ return ret;
+ }
+
/*
* For ttm_bo_type_device buffers, allocate
* address space from the device.
*/
if (bo->resource->mem_type != TTM_PL_SYSTEM) {
struct ttm_operation_ctx ctx = { false, false };
- struct ttm_resource evict_mem;
+ struct ttm_resource *evict_mem;
struct ttm_place place, hop;
memset(&place, 0, sizeof(place));
if (unlikely(ret))
goto out;
- ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, &ctx, &hop);
+ ret = ttm_bo_handle_move_mem(bo, evict_mem, true, &ctx, &hop);
if (unlikely(ret != 0)) {
WARN(ret == -EMULTIHOP, "Unexpected multihop in swaput - likely driver bug.\n");
goto out;
struct ttm_operation_ctx *ctx,
struct ttm_resource *new_mem)
{
+ struct ttm_resource *old_mem = bo->resource;
struct ttm_device *bdev = bo->bdev;
- struct ttm_resource_manager *man = ttm_manager_type(bdev, new_mem->mem_type);
+ struct ttm_resource_manager *man;
struct ttm_tt *ttm = bo->ttm;
- struct ttm_resource *old_mem = bo->resource;
- struct ttm_resource old_copy = *old_mem;
void *old_iomap;
void *new_iomap;
int ret;
unsigned long i;
+ man = ttm_manager_type(bdev, new_mem->mem_type);
+
ret = ttm_bo_wait_ctx(bo, ctx);
if (ret)
return ret;
* Single TTM move. NOP.
*/
if (old_iomap == NULL && new_iomap == NULL)
- goto out2;
+ goto out1;
/*
* Don't move nonexistent data. Clear destination instead.
(ttm == NULL || (!ttm_tt_is_populated(ttm) &&
!(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)))) {
memset_io(new_iomap, 0, new_mem->num_pages*PAGE_SIZE);
- goto out2;
+ goto out1;
}
/*
ret = ttm_copy_io_page(new_iomap, old_iomap, i);
}
if (ret)
- goto out1;
+ break;
}
mb();
-out2:
- old_copy = *old_mem;
+out1:
+ ttm_resource_iounmap(bdev, new_mem, new_iomap);
+out:
+ ttm_resource_iounmap(bdev, old_mem, old_iomap);
+
+ if (ret) {
+ ttm_resource_free(bo, &new_mem);
+ return ret;
+ }
+ ttm_resource_free(bo, &bo->resource);
ttm_bo_assign_mem(bo, new_mem);
if (!man->use_tt)
ttm_bo_tt_destroy(bo);
-out1:
- ttm_resource_iounmap(bdev, old_mem, new_iomap);
-out:
- ttm_resource_iounmap(bdev, &old_copy, old_iomap);
-
- /*
- * On error, keep the mm node!
- */
- if (!ret)
- ttm_resource_free(bo, &old_copy);
return ret;
}
EXPORT_SYMBOL(ttm_bo_move_memcpy);
if (!dst_use_tt)
ttm_bo_tt_destroy(bo);
- ttm_resource_free(bo, bo->resource);
+ ttm_resource_free(bo, &bo->resource);
return 0;
}
}
spin_unlock(&from->move_lock);
- ttm_resource_free(bo, bo->resource);
+ ttm_resource_free(bo, &bo->resource);
dma_fence_put(bo->moving);
bo->moving = dma_fence_get(fence);
if (ret)
ttm_bo_wait(bo, false, false);
- ttm_resource_alloc(bo, &sys_mem, bo->resource);
+ ret = ttm_resource_alloc(bo, &sys_mem, &bo->resource);
bo->ttm = NULL;
dma_resv_unlock(&ghost->base._resv);
ttm_bo_put(ghost);
- return 0;
+ return ret;
}
int ttm_resource_alloc(struct ttm_buffer_object *bo,
const struct ttm_place *place,
- struct ttm_resource *res)
+ struct ttm_resource **res_ptr)
{
struct ttm_resource_manager *man =
ttm_manager_type(bo->bdev, place->mem_type);
+ struct ttm_resource *res;
+ int r;
+
+ res = kmalloc(sizeof(*res), GFP_KERNEL);
+ if (!res)
+ return -ENOMEM;
res->mm_node = NULL;
res->start = 0;
res->bus.offset = 0;
res->bus.is_iomem = false;
res->bus.caching = ttm_cached;
+ r = man->func->alloc(man, bo, place, res);
+ if (r) {
+ kfree(res);
+ return r;
+ }
- return man->func->alloc(man, bo, place, res);
+ *res_ptr = res;
+ return 0;
}
-void ttm_resource_free(struct ttm_buffer_object *bo, struct ttm_resource *res)
+void ttm_resource_free(struct ttm_buffer_object *bo, struct ttm_resource **res)
{
- struct ttm_resource_manager *man =
- ttm_manager_type(bo->bdev, res->mem_type);
+ struct ttm_resource_manager *man;
- man->func->free(man, res);
- res->mm_node = NULL;
- res->mem_type = TTM_PL_SYSTEM;
+ if (!*res)
+ return;
+
+ man = ttm_manager_type(bo->bdev, (*res)->mem_type);
+ man->func->free(man, *res);
+ kfree(*res);
+ *res = NULL;
}
EXPORT_SYMBOL(ttm_resource_free);
goto fail;
vmw_ttm_unbind(bo->bdev, bo->ttm);
- ttm_resource_free(bo, bo->resource);
+ ttm_resource_free(bo, &bo->resource);
ttm_bo_assign_mem(bo, new_mem);
return 0;
} else {
*/
struct ttm_resource *resource;
- struct ttm_resource _mem;
struct ttm_tt *ttm;
bool deleted;
*/
int ttm_bo_mem_space(struct ttm_buffer_object *bo,
struct ttm_placement *placement,
- struct ttm_resource *mem,
+ struct ttm_resource **mem,
struct ttm_operation_ctx *ctx);
/**
static inline void ttm_bo_assign_mem(struct ttm_buffer_object *bo,
struct ttm_resource *new_mem)
{
- bo->_mem = *new_mem;
- new_mem->mm_node = NULL;
+ WARN_ON(bo->resource);
+ bo->resource = new_mem;
}
/**
static inline void ttm_bo_move_null(struct ttm_buffer_object *bo,
struct ttm_resource *new_mem)
{
- struct ttm_resource *old_mem = bo->resource;
-
- WARN_ON(old_mem->mm_node != NULL);
+ ttm_resource_free(bo, &bo->resource);
ttm_bo_assign_mem(bo, new_mem);
}
int ttm_resource_alloc(struct ttm_buffer_object *bo,
const struct ttm_place *place,
- struct ttm_resource *res);
-void ttm_resource_free(struct ttm_buffer_object *bo, struct ttm_resource *res);
+ struct ttm_resource **res);
+void ttm_resource_free(struct ttm_buffer_object *bo, struct ttm_resource **res);
void ttm_resource_manager_init(struct ttm_resource_manager *man,
unsigned long p_size);