bool retire = false;
lockdep_assert_held(&ref->mutex);
+ GEM_BUG_ON(i915_active_is_idle(ref));
/* return the unused nodes to our slabcache -- flushing the allocator */
if (atomic_dec_and_test(&ref->count)) {
ref->retire(ref);
}
+static void
+active_work(struct work_struct *wrk)
+{
+ struct i915_active *ref = container_of(wrk, typeof(*ref), work);
+
+ GEM_BUG_ON(!atomic_read(&ref->count));
+ if (atomic_add_unless(&ref->count, -1, 1))
+ return;
+
+ mutex_lock(&ref->mutex);
+ __active_retire(ref);
+}
+
static void
active_retire(struct i915_active *ref)
{
if (atomic_add_unless(&ref->count, -1, 1))
return;
- /* One active may be flushed from inside the acquire of another */
- mutex_lock_nested(&ref->mutex, SINGLE_DEPTH_NESTING);
+ /* If we are inside interrupt context (fence signaling), defer */
+ if (ref->flags & I915_ACTIVE_RETIRE_SLEEPS ||
+ !mutex_trylock(&ref->mutex)) {
+ queue_work(system_unbound_wq, &ref->work);
+ return;
+ }
+
__active_retire(ref);
}
void (*retire)(struct i915_active *ref),
struct lock_class_key *key)
{
+ unsigned long bits;
+
debug_active_init(ref);
ref->i915 = i915;
ref->flags = 0;
ref->active = active;
- ref->retire = retire;
+ ref->retire = ptr_unpack_bits(retire, &bits, 2);
+ if (bits & I915_ACTIVE_MAY_SLEEP)
+ ref->flags |= I915_ACTIVE_RETIRE_SLEEPS;
ref->excl = NULL;
ref->tree = RB_ROOT;
init_llist_head(&ref->preallocated_barriers);
atomic_set(&ref->count, 0);
__mutex_init(&ref->mutex, "i915_active", key);
+ INIT_WORK(&ref->work, active_work);
}
static bool ____active_del_barrier(struct i915_active *ref,
if (wait_on_bit(&ref->flags, I915_ACTIVE_GRAB_BIT, TASK_KILLABLE))
return -EINTR;
+ flush_work(&ref->work);
if (!i915_active_is_idle(ref))
return -EBUSY;
void i915_active_fini(struct i915_active *ref)
{
debug_active_fini(ref);
- GEM_BUG_ON(!RB_EMPTY_ROOT(&ref->tree));
GEM_BUG_ON(atomic_read(&ref->count));
+ GEM_BUG_ON(work_pending(&ref->work));
+ GEM_BUG_ON(!RB_EMPTY_ROOT(&ref->tree));
mutex_destroy(&ref->mutex);
}
#endif
#include <linux/mutex.h>
#include <linux/rbtree.h>
#include <linux/rcupdate.h>
+#include <linux/workqueue.h>
+
+#include "i915_utils.h"
struct drm_i915_private;
struct i915_active_request;
struct active_node;
+#define I915_ACTIVE_MAY_SLEEP BIT(0)
+
+#define __i915_active_call __aligned(4)
+#define i915_active_may_sleep(fn) ptr_pack_bits(&(fn), I915_ACTIVE_MAY_SLEEP, 2)
+
struct i915_active {
struct drm_i915_private *i915;
struct dma_fence_cb excl_cb;
unsigned long flags;
-#define I915_ACTIVE_GRAB_BIT 0
+#define I915_ACTIVE_RETIRE_SLEEPS BIT(0)
+#define I915_ACTIVE_GRAB_BIT 1
int (*active)(struct i915_active *ref);
void (*retire)(struct i915_active *ref);
+ struct work_struct work;
+
struct llist_head preallocated_barriers;
};
}
i915_active_release(&active->base);
- if (active->retired && count) {
+ if (READ_ONCE(active->retired) && count) {
pr_err("i915_active retired before submission!\n");
err = -EINVAL;
}
}
i915_active_wait(&active->base);
- if (!active->retired) {
+ if (!READ_ONCE(active->retired)) {
pr_err("i915_active not retired after waiting!\n");
err = -EINVAL;
}
if (igt_flush_test(i915, I915_WAIT_LOCKED))
err = -EIO;
- if (!active->retired) {
+ if (!READ_ONCE(active->retired)) {
pr_err("i915_active not retired after flushing!\n");
err = -EINVAL;
}