drm/amdgpu: add reference for **fence
[linux-2.6-microblaze.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_cs.c
index f9d4fe9..8796938 100644 (file)
@@ -41,11 +41,6 @@ struct amdgpu_cs_buckets {
        struct list_head bucket[AMDGPU_CS_NUM_BUCKETS];
 };
 
-static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser,
-                                 int error, bool backoff);
-static void amdgpu_cs_parser_fini_early(struct amdgpu_cs_parser *parser, int error, bool backoff);
-static void amdgpu_cs_parser_fini_late(struct amdgpu_cs_parser *parser);
-
 static void amdgpu_cs_buckets_init(struct amdgpu_cs_buckets *b)
 {
        unsigned i;
@@ -137,9 +132,11 @@ static void amdgpu_job_work_func(struct work_struct *work)
                container_of(work, struct amdgpu_cs_parser,
                             job_work);
        mutex_lock(&sched_job->job_lock);
-       sched_job->free_job(sched_job);
+       if (sched_job->free_job)
+               sched_job->free_job(sched_job);
        mutex_unlock(&sched_job->job_lock);
        /* after processing job, free memory */
+       fence_put(&sched_job->s_fence->base);
        kfree(sched_job);
 }
 struct amdgpu_cs_parser *amdgpu_cs_parser_create(struct amdgpu_device *adev,
@@ -190,11 +187,10 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
        }
        bo_list = amdgpu_bo_list_get(fpriv, cs->in.bo_list_handle);
        if (bo_list && !bo_list->has_userptr) {
-               p->bo_list = kzalloc(sizeof(struct amdgpu_bo_list), GFP_KERNEL);
+               p->bo_list = amdgpu_bo_list_clone(bo_list);
+               amdgpu_bo_list_put(bo_list);
                if (!p->bo_list)
                        return -ENOMEM;
-               amdgpu_bo_list_copy(p->adev, p->bo_list, bo_list);
-               amdgpu_bo_list_put(bo_list);
        } else if (bo_list && bo_list->has_userptr)
                p->bo_list = bo_list;
        else
@@ -469,34 +465,6 @@ static int cmp_size_smaller_first(void *priv, struct list_head *a,
        return (int)la->robj->tbo.num_pages - (int)lb->robj->tbo.num_pages;
 }
 
-/**
- * cs_parser_fini() - clean parser states
- * @parser:    parser structure holding parsing context.
- * @error:     error number
- *
- * If error is set than unvalidate buffer, otherwise just free memory
- * used by parsing context.
- **/
-static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bool backoff)
-{
-       amdgpu_cs_parser_fini_early(parser, error, backoff);
-       amdgpu_cs_parser_fini_late(parser);
-}
-
-static int amdgpu_cs_parser_run_job(
-       struct amdgpu_cs_parser *sched_job)
-{
-       amdgpu_cs_parser_fini_early(sched_job, 0, true);
-       return 0;
-}
-
-static int amdgpu_cs_parser_free_job(
-       struct amdgpu_cs_parser *sched_job)
-{
-       amdgpu_cs_parser_fini_late(sched_job);
-       return 0;
-}
-
 static void amdgpu_cs_parser_fini_early(struct amdgpu_cs_parser *parser, int error, bool backoff)
 {
        if (!error) {
@@ -547,6 +515,32 @@ static void amdgpu_cs_parser_fini_late(struct amdgpu_cs_parser *parser)
                kfree(parser);
 }
 
+/**
+ * cs_parser_fini() - clean parser states
+ * @parser:    parser structure holding parsing context.
+ * @error:     error number
+ *
+ * If error is set than unvalidate buffer, otherwise just free memory
+ * used by parsing context.
+ **/
+static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bool backoff)
+{
+       amdgpu_cs_parser_fini_early(parser, error, backoff);
+       amdgpu_cs_parser_fini_late(parser);
+}
+
+static int amdgpu_cs_parser_run_job(struct amdgpu_cs_parser *sched_job)
+{
+       amdgpu_cs_parser_fini_early(sched_job, 0, true);
+       return 0;
+}
+
+static int amdgpu_cs_parser_free_job(struct amdgpu_cs_parser *sched_job)
+{
+       amdgpu_cs_parser_fini_late(sched_job);
+       return 0;
+}
+
 static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p,
                                   struct amdgpu_vm *vm)
 {
@@ -580,7 +574,7 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p,
                        if (r)
                                return r;
 
-                       f = &bo_va->last_pt_update->base;
+                       f = bo_va->last_pt_update;
                        r = amdgpu_sync_fence(adev, &p->ibs[0].sync, f);
                        if (r)
                                return r;
@@ -738,7 +732,6 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
                        ib->oa_size = amdgpu_bo_size(oa);
                }
        }
-
        /* wrap the last IB with user fence */
        if (parser->uf.bo) {
                struct amdgpu_ib *ib = &parser->ibs[parser->num_ibs - 1];
@@ -817,14 +810,14 @@ static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
 
 static int amdgpu_cs_parser_prepare_job(struct amdgpu_cs_parser *sched_job)
 {
-        int r, i;
+       int r, i;
        struct amdgpu_cs_parser *parser = sched_job;
        struct amdgpu_device *adev = sched_job->adev;
-        bool reserved_buffers = false;
+       bool reserved_buffers = false;
 
-        r = amdgpu_cs_parser_relocs(parser);
-        if (r) {
-                if (r != -ERESTARTSYS) {
+       r = amdgpu_cs_parser_relocs(parser);
+       if (r) {
+               if (r != -ERESTARTSYS) {
                        if (r == -ENOMEM)
                                DRM_ERROR("Not enough memory for command submission!\n");
                        else
@@ -841,16 +834,16 @@ static int amdgpu_cs_parser_prepare_job(struct amdgpu_cs_parser *sched_job)
                if (r)
                        DRM_ERROR("Failed in the dependencies handling %d!\n", r);
        }
-        if (r) {
-                amdgpu_cs_parser_fini(parser, r, reserved_buffers);
-               return r;
-        }
+       if (r) {
+               amdgpu_cs_parser_fini(parser, r, reserved_buffers);
+               return r;
+       }
 
-        for (i = 0; i < parser->num_ibs; i++)
-                trace_amdgpu_cs(parser, i);
+       for (i = 0; i < parser->num_ibs; i++)
+               trace_amdgpu_cs(parser, i);
 
-       r = amdgpu_cs_ib_vm_chunk(adev, parser);
-       return r;
+       r = amdgpu_cs_ib_vm_chunk(adev, parser);
+       return r;
 }
 
 static struct amdgpu_ring *amdgpu_cs_parser_get_ring(
@@ -907,21 +900,30 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
        if (amdgpu_enable_scheduler && parser->num_ibs) {
                struct amdgpu_ring * ring =
                        amdgpu_cs_parser_get_ring(adev, parser);
-               parser->uf.sequence = atomic64_inc_return(
-                       &parser->ctx->rings[ring->idx].c_entity.last_queued_v_seq);
-               if ((parser->bo_list && parser->bo_list->has_userptr)) {
+               if (ring->is_pte_ring || (parser->bo_list && parser->bo_list->has_userptr)) {
                        r = amdgpu_cs_parser_prepare_job(parser);
                        if (r)
                                goto out;
                } else
                        parser->prepare_job = amdgpu_cs_parser_prepare_job;
-
+               parser->ring = ring;
                parser->run_job = amdgpu_cs_parser_run_job;
                parser->free_job = amdgpu_cs_parser_free_job;
-               amd_sched_push_job(ring->scheduler,
-                                  &parser->ctx->rings[ring->idx].c_entity,
-                                  parser);
-               cs->out.handle = parser->uf.sequence;
+               mutex_lock(&parser->job_lock);
+               r = amd_sched_push_job(ring->scheduler,
+                                      &parser->ctx->rings[ring->idx].entity,
+                                      parser,
+                                      &parser->s_fence);
+               if (r) {
+                       mutex_unlock(&parser->job_lock);
+                       goto out;
+               }
+               parser->ibs[parser->num_ibs - 1].sequence =
+                       amdgpu_ctx_add_fence(parser->ctx, ring,
+                                            &parser->s_fence->base,
+                                            parser->s_fence->v_seq);
+               cs->out.handle = parser->s_fence->v_seq;
+               mutex_unlock(&parser->job_lock);
                up_read(&adev->exclusive_lock);
                return 0;
        }
@@ -965,24 +967,16 @@ int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data,
        ctx = amdgpu_ctx_get(filp->driver_priv, wait->in.ctx_id);
        if (ctx == NULL)
                return -EINVAL;
-       if (amdgpu_enable_scheduler) {
-               r = amd_sched_wait_ts(&ctx->rings[ring->idx].c_entity,
-                                     wait->in.handle, true, timeout);
-               if (r)
-                       return r;
-               r = 1;
-       } else {
-               fence = amdgpu_ctx_get_fence(ctx, ring, wait->in.handle);
-               if (IS_ERR(fence))
-                       r = PTR_ERR(fence);
 
-               else if (fence) {
-                       r = fence_wait_timeout(fence, true, timeout);
-                       fence_put(fence);
+       fence = amdgpu_ctx_get_fence(ctx, ring, wait->in.handle);
+       if (IS_ERR(fence))
+               r = PTR_ERR(fence);
+       else if (fence) {
+               r = fence_wait_timeout(fence, true, timeout);
+               fence_put(fence);
+       } else
+               r = 1;
 
-               } else
-                       r = 1;
-       }
        amdgpu_ctx_put(ctx);
        if (r < 0)
                return r;
@@ -1017,7 +1011,16 @@ amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
                if (!reloc->bo_va)
                        continue;
 
-               list_for_each_entry(mapping, &reloc->bo_va->mappings, list) {
+               list_for_each_entry(mapping, &reloc->bo_va->valids, list) {
+                       if (mapping->it.start > addr ||
+                           addr > mapping->it.last)
+                               continue;
+
+                       *bo = reloc->bo_va->bo;
+                       return mapping;
+               }
+
+               list_for_each_entry(mapping, &reloc->bo_va->invalids, list) {
                        if (mapping->it.start > addr ||
                            addr > mapping->it.last)
                                continue;