Remember to keep the rings pinned as well as the context image until the
GPU is no longer active.
v2: Introduce a ring->pin_count primarily to hide the
mock_ring that doesn't fit into the normal GGTT vma picture.
v3: Order is important in teardown, ringbuffer submission needs to drop
the pin count on the engine->kernel_context before it can gleefully free
its ring.
Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=110946
Fixes:
ce476c80b8bf ("drm/i915: Keep contexts pinned until after the next kernel context switch")
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190619170135.15281-1-chris@chris-wilson.co.uk
if (ce->state)
__context_unpin_state(ce->state);
+ intel_ring_unpin(ce->ring);
intel_context_put(ce);
}
intel_context_get(ce);
+ err = intel_ring_pin(ce->ring);
+ if (err)
+ goto err_put;
+
if (!ce->state)
return 0;
err = __context_pin_state(ce->state, flags);
- if (err) {
- i915_active_cancel(&ce->active);
- intel_context_put(ce);
- return err;
- }
+ if (err)
+ goto err_ring;
/* Preallocate tracking nodes */
if (!i915_gem_context_is_kernel(ce->gem_context)) {
err = i915_active_acquire_preallocate_barrier(&ce->active,
ce->engine);
- if (err) {
- i915_active_release(&ce->active);
- return err;
- }
+ if (err)
+ goto err_state;
}
return 0;
+
+err_state:
+ __context_unpin_state(ce->state);
+err_ring:
+ intel_ring_unpin(ce->ring);
+err_put:
+ intel_context_put(ce);
+ i915_active_cancel(&ce->active);
+ return err;
}
void intel_context_active_release(struct intel_context *ce)
struct list_head request_list;
struct list_head active_link;
+ /*
+ * As we have two types of rings, one global to the engine used
+ * by ringbuffer submission and those that are exclusive to a
+ * context used by execlists, we have to play safe and allow
+ * atomic updates to the pin_count. However, the actual pinning
+ * of the context is either done during initialisation for
+ * ringbuffer submission or serialised as part of the context
+ * pinning for execlists, and so we do not need a mutex ourselves
+ * to serialise intel_ring_pin/intel_ring_unpin.
+ */
+ atomic_t pin_count;
+
u32 head;
u32 tail;
u32 emit;
{
struct intel_context *ce = container_of(kref, typeof(*ce), ref);
+ GEM_BUG_ON(!i915_active_is_idle(&ce->active));
GEM_BUG_ON(intel_context_is_pinned(ce));
if (ce->state)
{
i915_gem_context_unpin_hw_id(ce->gem_context);
i915_gem_object_unpin_map(ce->state->obj);
- intel_ring_unpin(ce->ring);
}
static void
goto unpin_active;
}
- ret = intel_ring_pin(ce->ring);
- if (ret)
- goto unpin_map;
-
ret = i915_gem_context_pin_hw_id(ce->gem_context);
if (ret)
- goto unpin_ring;
+ goto unpin_map;
ce->lrc_desc = lrc_descriptor(ce, engine);
ce->lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
return 0;
-unpin_ring:
- intel_ring_unpin(ce->ring);
unpin_map:
i915_gem_object_unpin_map(ce->state->obj);
unpin_active:
int intel_ring_pin(struct intel_ring *ring)
{
struct i915_vma *vma = ring->vma;
- enum i915_map_type map = i915_coherent_map_type(vma->vm->i915);
unsigned int flags;
void *addr;
int ret;
- GEM_BUG_ON(ring->vaddr);
+ if (atomic_fetch_inc(&ring->pin_count))
+ return 0;
ret = i915_timeline_pin(ring->timeline);
if (ret)
- return ret;
+ goto err_unpin;
flags = PIN_GLOBAL;
ret = i915_vma_pin(vma, 0, 0, flags);
if (unlikely(ret))
- goto unpin_timeline;
+ goto err_timeline;
if (i915_vma_is_map_and_fenceable(vma))
addr = (void __force *)i915_vma_pin_iomap(vma);
else
- addr = i915_gem_object_pin_map(vma->obj, map);
+ addr = i915_gem_object_pin_map(vma->obj,
+ i915_coherent_map_type(vma->vm->i915));
if (IS_ERR(addr)) {
ret = PTR_ERR(addr);
- goto unpin_ring;
+ goto err_ring;
}
vma->obj->pin_global++;
+ GEM_BUG_ON(ring->vaddr);
ring->vaddr = addr;
+
return 0;
-unpin_ring:
+err_ring:
i915_vma_unpin(vma);
-unpin_timeline:
+err_timeline:
i915_timeline_unpin(ring->timeline);
+err_unpin:
+ atomic_dec(&ring->pin_count);
return ret;
}
void intel_ring_unpin(struct intel_ring *ring)
{
- GEM_BUG_ON(!ring->vma);
- GEM_BUG_ON(!ring->vaddr);
+ if (!atomic_dec_and_test(&ring->pin_count))
+ return;
/* Discard any unused bytes beyond that submitted to hw. */
intel_ring_reset(ring, ring->tail);
+ GEM_BUG_ON(!ring->vma);
if (i915_vma_is_map_and_fenceable(ring->vma))
i915_vma_unpin_iomap(ring->vma);
else
i915_gem_object_unpin_map(ring->vma->obj);
+
+ GEM_BUG_ON(!ring->vaddr);
ring->vaddr = NULL;
ring->vma->obj->pin_global--;
WARN_ON(INTEL_GEN(dev_priv) > 2 &&
(ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE) == 0);
+ intel_engine_cleanup_common(engine);
+
intel_ring_unpin(engine->buffer);
intel_ring_put(engine->buffer);
- intel_engine_cleanup_common(engine);
kfree(engine);
}
ring->base.effective_size = sz;
ring->base.vaddr = (void *)(ring + 1);
ring->base.timeline = &ring->timeline;
+ atomic_set(&ring->base.pin_count, 1);
INIT_LIST_HEAD(&ring->base.request_list);
intel_ring_update_space(&ring->base);