struct list_head bucket[AMDGPU_CS_NUM_BUCKETS];
};
-static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser,
- int error, bool backoff);
-static void amdgpu_cs_parser_fini_early(struct amdgpu_cs_parser *parser, int error, bool backoff);
-static void amdgpu_cs_parser_fini_late(struct amdgpu_cs_parser *parser);
-
static void amdgpu_cs_buckets_init(struct amdgpu_cs_buckets *b)
{
unsigned i;
container_of(work, struct amdgpu_cs_parser,
job_work);
mutex_lock(&sched_job->job_lock);
- sched_job->free_job(sched_job);
+ if (sched_job->free_job)
+ sched_job->free_job(sched_job);
mutex_unlock(&sched_job->job_lock);
/* after processing job, free memory */
+ fence_put(&sched_job->s_fence->base);
kfree(sched_job);
}
struct amdgpu_cs_parser *amdgpu_cs_parser_create(struct amdgpu_device *adev,
}
bo_list = amdgpu_bo_list_get(fpriv, cs->in.bo_list_handle);
if (bo_list && !bo_list->has_userptr) {
- p->bo_list = kzalloc(sizeof(struct amdgpu_bo_list), GFP_KERNEL);
+ p->bo_list = amdgpu_bo_list_clone(bo_list);
+ amdgpu_bo_list_put(bo_list);
if (!p->bo_list)
return -ENOMEM;
- amdgpu_bo_list_copy(p->adev, p->bo_list, bo_list);
- amdgpu_bo_list_put(bo_list);
} else if (bo_list && bo_list->has_userptr)
p->bo_list = bo_list;
else
return (int)la->robj->tbo.num_pages - (int)lb->robj->tbo.num_pages;
}
-/**
- * cs_parser_fini() - clean parser states
- * @parser: parser structure holding parsing context.
- * @error: error number
- *
- * If error is set than unvalidate buffer, otherwise just free memory
- * used by parsing context.
- **/
-static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bool backoff)
-{
- amdgpu_cs_parser_fini_early(parser, error, backoff);
- amdgpu_cs_parser_fini_late(parser);
-}
-
-static int amdgpu_cs_parser_run_job(
- struct amdgpu_cs_parser *sched_job)
-{
- amdgpu_cs_parser_fini_early(sched_job, 0, true);
- return 0;
-}
-
-static int amdgpu_cs_parser_free_job(
- struct amdgpu_cs_parser *sched_job)
-{
- amdgpu_cs_parser_fini_late(sched_job);
- return 0;
-}
-
static void amdgpu_cs_parser_fini_early(struct amdgpu_cs_parser *parser, int error, bool backoff)
{
if (!error) {
kfree(parser);
}
+/**
+ * cs_parser_fini() - clean parser states
+ * @parser: parser structure holding parsing context.
+ * @error: error number
+ *
+ * If error is set than unvalidate buffer, otherwise just free memory
+ * used by parsing context.
+ **/
+static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bool backoff)
+{
+ amdgpu_cs_parser_fini_early(parser, error, backoff);
+ amdgpu_cs_parser_fini_late(parser);
+}
+
+static int amdgpu_cs_parser_run_job(struct amdgpu_cs_parser *sched_job)
+{
+ amdgpu_cs_parser_fini_early(sched_job, 0, true);
+ return 0;
+}
+
+static int amdgpu_cs_parser_free_job(struct amdgpu_cs_parser *sched_job)
+{
+ amdgpu_cs_parser_fini_late(sched_job);
+ return 0;
+}
+
static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p,
struct amdgpu_vm *vm)
{
if (r)
return r;
- f = &bo_va->last_pt_update->base;
+ f = bo_va->last_pt_update;
r = amdgpu_sync_fence(adev, &p->ibs[0].sync, f);
if (r)
return r;
ib->oa_size = amdgpu_bo_size(oa);
}
}
-
/* wrap the last IB with user fence */
if (parser->uf.bo) {
struct amdgpu_ib *ib = &parser->ibs[parser->num_ibs - 1];
static int amdgpu_cs_parser_prepare_job(struct amdgpu_cs_parser *sched_job)
{
- int r, i;
+ int r, i;
struct amdgpu_cs_parser *parser = sched_job;
struct amdgpu_device *adev = sched_job->adev;
- bool reserved_buffers = false;
+ bool reserved_buffers = false;
- r = amdgpu_cs_parser_relocs(parser);
- if (r) {
- if (r != -ERESTARTSYS) {
+ r = amdgpu_cs_parser_relocs(parser);
+ if (r) {
+ if (r != -ERESTARTSYS) {
if (r == -ENOMEM)
DRM_ERROR("Not enough memory for command submission!\n");
else
if (r)
DRM_ERROR("Failed in the dependencies handling %d!\n", r);
}
- if (r) {
- amdgpu_cs_parser_fini(parser, r, reserved_buffers);
- return r;
- }
+ if (r) {
+ amdgpu_cs_parser_fini(parser, r, reserved_buffers);
+ return r;
+ }
- for (i = 0; i < parser->num_ibs; i++)
- trace_amdgpu_cs(parser, i);
+ for (i = 0; i < parser->num_ibs; i++)
+ trace_amdgpu_cs(parser, i);
- r = amdgpu_cs_ib_vm_chunk(adev, parser);
- return r;
+ r = amdgpu_cs_ib_vm_chunk(adev, parser);
+ return r;
}
static struct amdgpu_ring *amdgpu_cs_parser_get_ring(
if (amdgpu_enable_scheduler && parser->num_ibs) {
struct amdgpu_ring * ring =
amdgpu_cs_parser_get_ring(adev, parser);
- parser->uf.sequence = atomic64_inc_return(
- &parser->ctx->rings[ring->idx].c_entity.last_queued_v_seq);
- if ((parser->bo_list && parser->bo_list->has_userptr)) {
+ if (ring->is_pte_ring || (parser->bo_list && parser->bo_list->has_userptr)) {
r = amdgpu_cs_parser_prepare_job(parser);
if (r)
goto out;
} else
parser->prepare_job = amdgpu_cs_parser_prepare_job;
-
+ parser->ring = ring;
parser->run_job = amdgpu_cs_parser_run_job;
parser->free_job = amdgpu_cs_parser_free_job;
- amd_sched_push_job(ring->scheduler,
- &parser->ctx->rings[ring->idx].c_entity,
- parser);
- cs->out.handle = parser->uf.sequence;
+ mutex_lock(&parser->job_lock);
+ r = amd_sched_push_job(ring->scheduler,
+ &parser->ctx->rings[ring->idx].entity,
+ parser,
+ &parser->s_fence);
+ if (r) {
+ mutex_unlock(&parser->job_lock);
+ goto out;
+ }
+ parser->ibs[parser->num_ibs - 1].sequence =
+ amdgpu_ctx_add_fence(parser->ctx, ring,
+ &parser->s_fence->base,
+ parser->s_fence->v_seq);
+ cs->out.handle = parser->s_fence->v_seq;
+ mutex_unlock(&parser->job_lock);
up_read(&adev->exclusive_lock);
return 0;
}
ctx = amdgpu_ctx_get(filp->driver_priv, wait->in.ctx_id);
if (ctx == NULL)
return -EINVAL;
- if (amdgpu_enable_scheduler) {
- r = amd_sched_wait_ts(&ctx->rings[ring->idx].c_entity,
- wait->in.handle, true, timeout);
- if (r)
- return r;
- r = 1;
- } else {
- fence = amdgpu_ctx_get_fence(ctx, ring, wait->in.handle);
- if (IS_ERR(fence))
- r = PTR_ERR(fence);
- else if (fence) {
- r = fence_wait_timeout(fence, true, timeout);
- fence_put(fence);
+ fence = amdgpu_ctx_get_fence(ctx, ring, wait->in.handle);
+ if (IS_ERR(fence))
+ r = PTR_ERR(fence);
+ else if (fence) {
+ r = fence_wait_timeout(fence, true, timeout);
+ fence_put(fence);
+ } else
+ r = 1;
- } else
- r = 1;
- }
amdgpu_ctx_put(ctx);
if (r < 0)
return r;
if (!reloc->bo_va)
continue;
- list_for_each_entry(mapping, &reloc->bo_va->mappings, list) {
+ list_for_each_entry(mapping, &reloc->bo_va->valids, list) {
+ if (mapping->it.start > addr ||
+ addr > mapping->it.last)
+ continue;
+
+ *bo = reloc->bo_va->bo;
+ return mapping;
+ }
+
+ list_for_each_entry(mapping, &reloc->bo_va->invalids, list) {
if (mapping->it.start > addr ||
addr > mapping->it.last)
continue;