2 * SPDX-License-Identifier: MIT
4 * Copyright © 2014-2016 Intel Corporation
7 #include <linux/dma-fence-array.h>
9 #include "gt/intel_engine.h"
11 #include "i915_gem_ioctls.h"
12 #include "i915_gem_object.h"
14 static __always_inline u32 __busy_read_flag(u16 id)
16 if (id == (u16)I915_ENGINE_CLASS_INVALID)
20 return 0x10000u << id;
23 static __always_inline u32 __busy_write_id(u16 id)
26 * The uABI guarantees an active writer is also amongst the read
27 * engines. This would be true if we accessed the activity tracking
28 * under the lock, but as we perform the lookup of the object and
29 * its activity locklessly we can not guarantee that the last_write
30 * being active implies that we have set the same engine flag from
31 * last_read - hence we always set both read and write busy for
34 if (id == (u16)I915_ENGINE_CLASS_INVALID)
37 return (id + 1) | __busy_read_flag(id);
40 static __always_inline unsigned int
41 __busy_set_if_active(struct dma_fence *fence, u32 (*flag)(u16 id))
43 const struct i915_request *rq;
46 * We have to check the current hw status of the fence as the uABI
47 * guarantees forward progress. We could rely on the idle worker
48 * to eventually flush us, but to minimise latency just ask the
51 * Note we only report on the status of native fences and we currently
52 * have two native fences:
54 * 1. A composite fence (dma_fence_array) constructed of i915 requests
55 * created during a parallel submission. In this case we deconstruct the
56 * composite fence into individual i915 requests and check the status of
59 * 2. A single i915 request.
61 if (dma_fence_is_array(fence)) {
62 struct dma_fence_array *array = to_dma_fence_array(fence);
63 struct dma_fence **child = array->fences;
64 unsigned int nchild = array->num_fences;
67 struct dma_fence *current_fence = *child++;
69 /* Not an i915 fence, can't be busy per above */
70 if (!dma_fence_is_i915(current_fence) ||
71 !test_bit(I915_FENCE_FLAG_COMPOSITE,
72 ¤t_fence->flags)) {
76 rq = to_request(current_fence);
77 if (!i915_request_completed(rq))
78 return flag(rq->engine->uabi_class);
81 /* All requests in array complete, not busy */
84 if (!dma_fence_is_i915(fence))
87 rq = to_request(fence);
88 if (i915_request_completed(rq))
91 /* Beware type-expansion follies! */
92 BUILD_BUG_ON(!typecheck(u16, rq->engine->uabi_class));
93 return flag(rq->engine->uabi_class);
97 static __always_inline unsigned int
98 busy_check_reader(struct dma_fence *fence)
100 return __busy_set_if_active(fence, __busy_read_flag);
103 static __always_inline unsigned int
104 busy_check_writer(struct dma_fence *fence)
109 return __busy_set_if_active(fence, __busy_write_id);
113 i915_gem_busy_ioctl(struct drm_device *dev, void *data,
114 struct drm_file *file)
116 struct drm_i915_gem_busy *args = data;
117 struct drm_i915_gem_object *obj;
118 struct dma_resv_iter cursor;
119 struct dma_fence *fence;
124 obj = i915_gem_object_lookup_rcu(file, args->handle);
129 * A discrepancy here is that we do not report the status of
130 * non-i915 fences, i.e. even though we may report the object as idle,
131 * a call to set-domain may still stall waiting for foreign rendering.
132 * This also means that wait-ioctl may report an object as busy,
133 * where busy-ioctl considers it idle.
135 * We trade the ability to warn of foreign fences to report on which
136 * i915 engines are active for the object.
138 * Alternatively, we can trade that extra information on read/write
141 * !dma_resv_test_signaled(obj->resv, DMA_RESV_USAGE_READ);
142 * to report the overall busyness. This is what the wait-ioctl does.
146 dma_resv_iter_begin(&cursor, obj->base.resv, DMA_RESV_USAGE_READ);
147 dma_resv_for_each_fence_unlocked(&cursor, fence) {
148 if (dma_resv_iter_is_restarted(&cursor))
151 if (dma_resv_iter_usage(&cursor) <= DMA_RESV_USAGE_WRITE)
152 /* Translate the write fences to the READ *and* WRITE engine */
153 args->busy |= busy_check_writer(fence);
155 /* Translate read fences to READ set of engines */
156 args->busy |= busy_check_reader(fence);
158 dma_resv_iter_end(&cursor);