Merge commit drm-intel-fixes into topic/ppgtt
[linux-2.6-microblaze.git] / drivers / gpu / drm / i915 / i915_gem_context.c
index b0f42b9..165a5c7 100644 (file)
  * I've seen in a spec to date, and that was a workaround for a non-shipping
  * part. It should be safe to decrease this, but it's more future proof as is.
  */
-#define CONTEXT_ALIGN (64<<10)
+#define GEN6_CONTEXT_ALIGN (64<<10)
+#define GEN7_CONTEXT_ALIGN 4096
 
-static struct i915_hw_context *
-i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id);
-static int do_switch(struct i915_hw_context *to);
+static int do_switch(struct intel_ring_buffer *ring,
+                    struct i915_hw_context *to);
+
+static size_t get_context_alignment(struct drm_device *dev)
+{
+       if (IS_GEN6(dev))
+               return GEN6_CONTEXT_ALIGN;
+
+       return GEN7_CONTEXT_ALIGN;
+}
 
 static int get_context_size(struct drm_device *dev)
 {
@@ -131,14 +139,43 @@ void i915_gem_context_free(struct kref *ctx_ref)
 {
        struct i915_hw_context *ctx = container_of(ctx_ref,
                                                   typeof(*ctx), ref);
+       struct i915_hw_ppgtt *ppgtt = NULL;
 
-       list_del(&ctx->link);
+       /* We refcount even the aliasing PPGTT to keep the code symmetric */
+       if (USES_ALIASING_PPGTT(ctx->obj->base.dev))
+               ppgtt = ctx_to_ppgtt(ctx);
+
+       /* XXX: Free up the object before tearing down the address space, in
+        * case we're bound in the PPGTT */
        drm_gem_object_unreference(&ctx->obj->base);
+
+       if (ppgtt)
+               kref_put(&ppgtt->ref, ppgtt_release);
+       list_del(&ctx->link);
        kfree(ctx);
 }
 
+static struct i915_hw_ppgtt *
+create_vm_for_ctx(struct drm_device *dev, struct i915_hw_context *ctx)
+{
+       struct i915_hw_ppgtt *ppgtt;
+       int ret;
+
+       ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
+       if (!ppgtt)
+               return ERR_PTR(-ENOMEM);
+
+       ret = i915_gem_init_ppgtt(dev, ppgtt);
+       if (ret) {
+               kfree(ppgtt);
+               return ERR_PTR(ret);
+       }
+
+       return ppgtt;
+}
+
 static struct i915_hw_context *
-create_hw_context(struct drm_device *dev,
+__create_hw_context(struct drm_device *dev,
                  struct drm_i915_file_private *file_priv)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -166,18 +203,13 @@ create_hw_context(struct drm_device *dev,
                        goto err_out;
        }
 
-       /* The ring associated with the context object is handled by the normal
-        * object tracking code. We give an initial ring value simple to pass an
-        * assertion in the context switch code.
-        */
-       ctx->ring = &dev_priv->ring[RCS];
        list_add_tail(&ctx->link, &dev_priv->context_list);
 
        /* Default context will never have a file_priv */
        if (file_priv == NULL)
                return ctx;
 
-       ret = idr_alloc(&file_priv->context_idr, ctx, DEFAULT_CONTEXT_ID + 1, 0,
+       ret = idr_alloc(&file_priv->context_idr, ctx, DEFAULT_CONTEXT_ID, 0,
                        GFP_KERNEL);
        if (ret < 0)
                goto err_out;
@@ -198,7 +230,7 @@ err_out:
 
 static inline bool is_default_context(struct i915_hw_context *ctx)
 {
-       return (ctx == ctx->ring->default_context);
+       return (ctx->id == DEFAULT_CONTEXT_ID);
 }
 
 /**
@@ -206,85 +238,170 @@ static inline bool is_default_context(struct i915_hw_context *ctx)
  * context state of the GPU for applications that don't utilize HW contexts, as
  * well as an idle case.
  */
-static int create_default_context(struct drm_i915_private *dev_priv)
+static struct i915_hw_context *
+i915_gem_create_context(struct drm_device *dev,
+                       struct drm_i915_file_private *file_priv,
+                       bool create_vm)
 {
+       struct drm_i915_private *dev_priv = dev->dev_private;
        struct i915_hw_context *ctx;
-       int ret;
+       int ret = 0;
 
-       BUG_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
+       BUG_ON(!mutex_is_locked(&dev->struct_mutex));
 
-       ctx = create_hw_context(dev_priv->dev, NULL);
+       ctx = __create_hw_context(dev, file_priv);
        if (IS_ERR(ctx))
-               return PTR_ERR(ctx);
-
-       /* We may need to do things with the shrinker which require us to
-        * immediately switch back to the default context. This can cause a
-        * problem as pinning the default context also requires GTT space which
-        * may not be available. To avoid this we always pin the
-        * default context.
-        */
-       ret = i915_gem_obj_ggtt_pin(ctx->obj, CONTEXT_ALIGN, false, false);
-       if (ret) {
-               DRM_DEBUG_DRIVER("Couldn't pin %d\n", ret);
-               goto err_destroy;
-       }
+               return ctx;
 
-       ret = do_switch(ctx);
-       if (ret) {
-               DRM_DEBUG_DRIVER("Switch failed %d\n", ret);
-               goto err_unpin;
+       if (create_vm) {
+               struct i915_hw_ppgtt *ppgtt = create_vm_for_ctx(dev, ctx);
+
+               if (IS_ERR_OR_NULL(ppgtt)) {
+                       DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n",
+                                        PTR_ERR(ppgtt));
+                       ret = PTR_ERR(ppgtt);
+                       goto err_destroy;
+               } else
+                       ctx->vm = &ppgtt->base;
+
+               /* This case is reserved for the global default context and
+                * should only happen once. */
+               if (!file_priv) {
+                       if (WARN_ON(dev_priv->mm.aliasing_ppgtt)) {
+                               ret = -EEXIST;
+                               goto err_destroy;
+                       }
+
+                       dev_priv->mm.aliasing_ppgtt = ppgtt;
+
+                       /* We may need to do things with the shrinker which
+                        * require us to immediately switch back to the default
+                        * context. This can cause a problem as pinning the
+                        * default context also requires GTT space which may not
+                        * be available. To avoid this we always pin the default
+                        * context.
+                        */
+                       ret = i915_gem_obj_ggtt_pin(ctx->obj,
+                                                   get_context_alignment(dev),
+                                                   false, false);
+                       if (ret) {
+                               DRM_DEBUG_DRIVER("Couldn't pin %d\n", ret);
+                               goto err_destroy;
+                       }
+               }
+       } else if (USES_ALIASING_PPGTT(dev)) {
+               /* For platforms which only have aliasing PPGTT, we fake the
+                * address space and refcounting. */
+               kref_get(&dev_priv->mm.aliasing_ppgtt->ref);
        }
 
-       dev_priv->ring[RCS].default_context = ctx;
+       /* TODO: Until full ppgtt... */
+       if (USES_ALIASING_PPGTT(dev))
+               ctx->vm = &dev_priv->mm.aliasing_ppgtt->base;
+       else
+               ctx->vm = &dev_priv->gtt.base;
 
-       DRM_DEBUG_DRIVER("Default HW context loaded\n");
-       return 0;
+       return ctx;
 
-err_unpin:
-       i915_gem_object_unpin(ctx->obj);
 err_destroy:
        i915_gem_context_unreference(ctx);
-       return ret;
+       return ERR_PTR(ret);
 }
 
-void i915_gem_context_init(struct drm_device *dev)
+void i915_gem_context_reset(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_ring_buffer *ring;
+       int i;
 
-       if (!HAS_HW_CONTEXTS(dev)) {
-               dev_priv->hw_contexts_disabled = true;
-               DRM_DEBUG_DRIVER("Disabling HW Contexts; old hardware\n");
+       if (!HAS_HW_CONTEXTS(dev))
                return;
+
+       /* Prevent the hardware from restoring the last context (which hung) on
+        * the next switch */
+       for (i = 0; i < I915_NUM_RINGS; i++) {
+               struct i915_hw_context *dctx;
+               if (!(INTEL_INFO(dev)->ring_mask & (1<<i)))
+                       continue;
+
+               /* Do a fake switch to the default context */
+               ring = &dev_priv->ring[i];
+               dctx = ring->default_context;
+               if (WARN_ON(!dctx))
+                       continue;
+
+               if (!ring->last_context)
+                       continue;
+
+               if (ring->last_context == dctx)
+                       continue;
+
+               if (i == RCS) {
+                       WARN_ON(i915_gem_obj_ggtt_pin(dctx->obj,
+                                                     get_context_alignment(dev),
+                                                     false, false));
+                       /* Fake a finish/inactive */
+                       dctx->obj->base.write_domain = 0;
+                       dctx->obj->active = 0;
+               }
+
+               i915_gem_context_unreference(ring->last_context);
+               i915_gem_context_reference(dctx);
+               ring->last_context = dctx;
        }
+}
 
-       /* If called from reset, or thaw... we've been here already */
-       if (dev_priv->hw_contexts_disabled ||
-           dev_priv->ring[RCS].default_context)
-               return;
+int i915_gem_context_init(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_ring_buffer *ring;
+       int i;
+
+       if (!HAS_HW_CONTEXTS(dev))
+               return 0;
+
+       /* Init should only be called once per module load. Eventually the
+        * restriction on the context_disabled check can be loosened. */
+       if (WARN_ON(dev_priv->ring[RCS].default_context))
+               return 0;
 
        dev_priv->hw_context_size = round_up(get_context_size(dev), 4096);
 
        if (dev_priv->hw_context_size > (1<<20)) {
-               dev_priv->hw_contexts_disabled = true;
                DRM_DEBUG_DRIVER("Disabling HW Contexts; invalid size\n");
-               return;
+               return -E2BIG;
        }
 
-       if (create_default_context(dev_priv)) {
-               dev_priv->hw_contexts_disabled = true;
-               DRM_DEBUG_DRIVER("Disabling HW Contexts; create failed\n");
-               return;
+       dev_priv->ring[RCS].default_context =
+               i915_gem_create_context(dev, NULL, USES_ALIASING_PPGTT(dev));
+
+       if (IS_ERR_OR_NULL(dev_priv->ring[RCS].default_context)) {
+               DRM_DEBUG_DRIVER("Disabling HW Contexts; create failed %ld\n",
+                                PTR_ERR(dev_priv->ring[RCS].default_context));
+               return PTR_ERR(dev_priv->ring[RCS].default_context);
+       }
+
+       for (i = RCS + 1; i < I915_NUM_RINGS; i++) {
+               if (!(INTEL_INFO(dev)->ring_mask & (1<<i)))
+                       continue;
+
+               ring = &dev_priv->ring[i];
+
+               /* NB: RCS will hold a ref for all rings */
+               ring->default_context = dev_priv->ring[RCS].default_context;
        }
 
        DRM_DEBUG_DRIVER("HW context support initialized\n");
+       return 0;
 }
 
 void i915_gem_context_fini(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct i915_hw_context *dctx = dev_priv->ring[RCS].default_context;
+       int i;
 
-       if (dev_priv->hw_contexts_disabled)
+       if (!HAS_HW_CONTEXTS(dev))
                return;
 
        /* The only known way to stop the gpu from accessing the hw context is
@@ -302,58 +419,118 @@ void i915_gem_context_fini(struct drm_device *dev)
        if (dev_priv->ring[RCS].last_context == dctx) {
                /* Fake switch to NULL context */
                WARN_ON(dctx->obj->active);
-               i915_gem_object_unpin(dctx->obj);
+               i915_gem_object_ggtt_unpin(dctx->obj);
                i915_gem_context_unreference(dctx);
+               dev_priv->ring[RCS].last_context = NULL;
        }
 
-       i915_gem_object_unpin(dctx->obj);
+       for (i = 0; i < I915_NUM_RINGS; i++) {
+               struct intel_ring_buffer *ring = &dev_priv->ring[i];
+               if (!(INTEL_INFO(dev)->ring_mask & (1<<i)))
+                       continue;
+
+               if (ring->last_context)
+                       i915_gem_context_unreference(ring->last_context);
+
+               ring->default_context = NULL;
+               ring->last_context = NULL;
+       }
+
+       i915_gem_object_ggtt_unpin(dctx->obj);
        i915_gem_context_unreference(dctx);
-       dev_priv->ring[RCS].default_context = NULL;
-       dev_priv->ring[RCS].last_context = NULL;
+       dev_priv->mm.aliasing_ppgtt = NULL;
+}
+
+int i915_gem_context_enable(struct drm_i915_private *dev_priv)
+{
+       struct intel_ring_buffer *ring;
+       int ret, i;
+
+       if (!HAS_HW_CONTEXTS(dev_priv->dev))
+               return 0;
+
+       /* This is the only place the aliasing PPGTT gets enabled, which means
+        * it has to happen before we bail on reset */
+       if (dev_priv->mm.aliasing_ppgtt) {
+               struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
+               ppgtt->enable(ppgtt);
+       }
+
+       /* FIXME: We should make this work, even in reset */
+       if (i915_reset_in_progress(&dev_priv->gpu_error))
+               return 0;
+
+       BUG_ON(!dev_priv->ring[RCS].default_context);
+
+       for_each_ring(ring, dev_priv, i) {
+               ret = do_switch(ring, ring->default_context);
+               if (ret)
+                       return ret;
+       }
+
+       return 0;
 }
 
 static int context_idr_cleanup(int id, void *p, void *data)
 {
        struct i915_hw_context *ctx = p;
 
-       BUG_ON(id == DEFAULT_CONTEXT_ID);
+       /* Ignore the default context because close will handle it */
+       if (is_default_context(ctx))
+               return 0;
 
        i915_gem_context_unreference(ctx);
        return 0;
 }
 
-struct i915_ctx_hang_stats *
-i915_gem_context_get_hang_stats(struct drm_device *dev,
-                               struct drm_file *file,
-                               u32 id)
+int i915_gem_context_open(struct drm_device *dev, struct drm_file *file)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_i915_file_private *file_priv = file->driver_priv;
-       struct i915_hw_context *ctx;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       if (!HAS_HW_CONTEXTS(dev)) {
+               /* Cheat for hang stats */
+               file_priv->private_default_ctx =
+                       kzalloc(sizeof(struct i915_hw_context), GFP_KERNEL);
+               file_priv->private_default_ctx->vm = &dev_priv->gtt.base;
+               return 0;
+       }
 
-       if (id == DEFAULT_CONTEXT_ID)
-               return &file_priv->hang_stats;
+       idr_init(&file_priv->context_idr);
 
-       ctx = NULL;
-       if (!dev_priv->hw_contexts_disabled)
-               ctx = i915_gem_context_get(file->driver_priv, id);
-       if (ctx == NULL)
-               return ERR_PTR(-ENOENT);
+       mutex_lock(&dev->struct_mutex);
+       file_priv->private_default_ctx =
+               i915_gem_create_context(dev, file_priv, false);
+       mutex_unlock(&dev->struct_mutex);
 
-       return &ctx->hang_stats;
+       if (IS_ERR(file_priv->private_default_ctx)) {
+               idr_destroy(&file_priv->context_idr);
+               return PTR_ERR(file_priv->private_default_ctx);
+       }
+
+       return 0;
 }
 
 void i915_gem_context_close(struct drm_device *dev, struct drm_file *file)
 {
        struct drm_i915_file_private *file_priv = file->driver_priv;
 
+       if (!HAS_HW_CONTEXTS(dev)) {
+               kfree(file_priv->private_default_ctx);
+               return;
+       }
+
        idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL);
+       i915_gem_context_unreference(file_priv->private_default_ctx);
        idr_destroy(&file_priv->context_idr);
 }
 
-static struct i915_hw_context *
+struct i915_hw_context *
 i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id)
 {
+       if (!HAS_HW_CONTEXTS(file_priv->dev_priv->dev))
+               return file_priv->private_default_ctx;
+
        return (struct i915_hw_context *)idr_find(&file_priv->context_idr, id);
 }
 
@@ -405,19 +582,30 @@ mi_set_context(struct intel_ring_buffer *ring,
        return ret;
 }
 
-static int do_switch(struct i915_hw_context *to)
+static int do_switch(struct intel_ring_buffer *ring,
+                    struct i915_hw_context *to)
 {
-       struct intel_ring_buffer *ring = to->ring;
+       struct drm_i915_private *dev_priv = ring->dev->dev_private;
        struct i915_hw_context *from = ring->last_context;
        u32 hw_flags = 0;
        int ret, i;
 
-       BUG_ON(from != NULL && from->obj != NULL && from->obj->pin_count == 0);
+       if (from != NULL && ring == &dev_priv->ring[RCS]) {
+               BUG_ON(from->obj == NULL);
+               BUG_ON(!i915_gem_obj_is_pinned(from->obj));
+       }
 
-       if (from == to && !to->remap_slice)
+       if (from == to && from->last_ring == ring && !to->remap_slice)
                return 0;
 
-       ret = i915_gem_obj_ggtt_pin(to->obj, CONTEXT_ALIGN, false, false);
+       if (ring != &dev_priv->ring[RCS]) {
+               if (from)
+                       i915_gem_context_unreference(from);
+               goto done;
+       }
+
+       ret = i915_gem_obj_ggtt_pin(to->obj, get_context_alignment(ring->dev),
+                                   false, false);
        if (ret)
                return ret;
 
@@ -438,19 +626,22 @@ static int do_switch(struct i915_hw_context *to)
         */
        ret = i915_gem_object_set_to_gtt_domain(to->obj, false);
        if (ret) {
-               i915_gem_object_unpin(to->obj);
+               i915_gem_object_ggtt_unpin(to->obj);
                return ret;
        }
 
-       if (!to->obj->has_global_gtt_mapping)
-               i915_gem_gtt_bind_object(to->obj, to->obj->cache_level);
+       if (!to->obj->has_global_gtt_mapping) {
+               struct i915_vma *vma = i915_gem_obj_to_vma(to->obj,
+                                                          &dev_priv->gtt.base);
+               vma->bind_vma(vma, to->obj->cache_level, GLOBAL_BIND);
+       }
 
        if (!to->is_initialized || is_default_context(to))
                hw_flags |= MI_RESTORE_INHIBIT;
 
        ret = mi_set_context(ring, to, hw_flags);
        if (ret) {
-               i915_gem_object_unpin(to->obj);
+               i915_gem_object_ggtt_unpin(to->obj);
                return ret;
        }
 
@@ -486,13 +677,15 @@ static int do_switch(struct i915_hw_context *to)
                BUG_ON(from->obj->ring != ring);
 
                /* obj is kept alive until the next request by its active ref */
-               i915_gem_object_unpin(from->obj);
+               i915_gem_object_ggtt_unpin(from->obj);
                i915_gem_context_unreference(from);
        }
 
+done:
        i915_gem_context_reference(to);
        ring->last_context = to;
        to->is_initialized = true;
+       to->last_ring = ring;
 
        return 0;
 }
@@ -502,8 +695,6 @@ static int do_switch(struct i915_hw_context *to)
  * @ring: ring for which we'll execute the context switch
  * @file_priv: file_priv associated with the context, may be NULL
  * @id: context id number
- * @seqno: sequence number by which the new context will be switched to
- * @flags:
  *
  * The context life cycle is simple. The context refcount is incremented and
  * decremented by 1 and create and destroy. If the context is in use by the GPU,
@@ -512,37 +703,24 @@ static int do_switch(struct i915_hw_context *to)
  */
 int i915_switch_context(struct intel_ring_buffer *ring,
                        struct drm_file *file,
-                       int to_id)
+                       struct i915_hw_context *to)
 {
        struct drm_i915_private *dev_priv = ring->dev->dev_private;
-       struct i915_hw_context *to;
-
-       if (dev_priv->hw_contexts_disabled)
-               return 0;
 
        WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
 
-       if (ring != &dev_priv->ring[RCS])
-               return 0;
-
-       if (to_id == DEFAULT_CONTEXT_ID) {
-               to = ring->default_context;
-       } else {
-               if (file == NULL)
-                       return -EINVAL;
+       BUG_ON(file && to == NULL);
 
-               to = i915_gem_context_get(file->driver_priv, to_id);
-               if (to == NULL)
-                       return -ENOENT;
-       }
+       /* We have the fake context, but don't supports switching. */
+       if (!HAS_HW_CONTEXTS(ring->dev))
+               return 0;
 
-       return do_switch(to);
+       return do_switch(ring, to);
 }
 
 int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
                                  struct drm_file *file)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_i915_gem_context_create *args = data;
        struct drm_i915_file_private *file_priv = file->driver_priv;
        struct i915_hw_context *ctx;
@@ -551,14 +729,14 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
        if (!(dev->driver->driver_features & DRIVER_GEM))
                return -ENODEV;
 
-       if (dev_priv->hw_contexts_disabled)
+       if (!HAS_HW_CONTEXTS(dev))
                return -ENODEV;
 
        ret = i915_mutex_lock_interruptible(dev);
        if (ret)
                return ret;
 
-       ctx = create_hw_context(dev, file_priv);
+       ctx = i915_gem_create_context(dev, file_priv, false);
        mutex_unlock(&dev->struct_mutex);
        if (IS_ERR(ctx))
                return PTR_ERR(ctx);
@@ -580,6 +758,9 @@ int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
        if (!(dev->driver->driver_features & DRIVER_GEM))
                return -ENODEV;
 
+       if (args->ctx_id == DEFAULT_CONTEXT_ID)
+               return -EPERM;
+
        ret = i915_mutex_lock_interruptible(dev);
        if (ret)
                return ret;