2 * SPDX-License-Identifier: MIT
4 * Copyright © 2016 Intel Corporation
7 #include <linux/dma-fence-array.h>
8 #include <linux/dma-fence-chain.h>
9 #include <linux/jiffies.h>
11 #include "gt/intel_engine.h"
13 #include "dma_resv_utils.h"
14 #include "i915_gem_ioctls.h"
15 #include "i915_gem_object.h"
18 i915_gem_object_wait_fence(struct dma_fence *fence,
22 BUILD_BUG_ON(I915_WAIT_INTERRUPTIBLE != 0x1);
24 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
27 if (dma_fence_is_i915(fence))
28 return i915_request_wait(to_request(fence), flags, timeout);
30 return dma_fence_wait_timeout(fence,
31 flags & I915_WAIT_INTERRUPTIBLE,
36 i915_gem_object_wait_reservation(struct dma_resv *resv,
40 struct dma_fence *excl;
41 bool prune_fences = false;
43 if (flags & I915_WAIT_ALL) {
44 struct dma_fence **shared;
45 unsigned int count, i;
48 ret = dma_resv_get_fences_rcu(resv, &excl, &count, &shared);
52 for (i = 0; i < count; i++) {
53 timeout = i915_gem_object_wait_fence(shared[i],
58 dma_fence_put(shared[i]);
61 for (; i < count; i++)
62 dma_fence_put(shared[i]);
66 * If both shared fences and an exclusive fence exist,
67 * then by construction the shared fences must be later
68 * than the exclusive fence. If we successfully wait for
69 * all the shared fences, we know that the exclusive fence
70 * must all be signaled. If all the shared fences are
71 * signaled, we can prune the array and recover the
72 * floating references on the fences/requests.
74 prune_fences = count && timeout >= 0;
76 excl = dma_resv_get_excl_unlocked(resv);
79 if (excl && timeout >= 0)
80 timeout = i915_gem_object_wait_fence(excl, flags, timeout);
85 * Opportunistically prune the fences iff we know they have *all* been
94 static void fence_set_priority(struct dma_fence *fence,
95 const struct i915_sched_attr *attr)
97 struct i915_request *rq;
98 struct intel_engine_cs *engine;
100 if (dma_fence_is_signaled(fence) || !dma_fence_is_i915(fence))
103 rq = to_request(fence);
106 rcu_read_lock(); /* RCU serialisation for set-wedged protection */
107 if (engine->schedule)
108 engine->schedule(rq, attr);
112 static inline bool __dma_fence_is_chain(const struct dma_fence *fence)
114 return fence->ops == &dma_fence_chain_ops;
117 void i915_gem_fence_wait_priority(struct dma_fence *fence,
118 const struct i915_sched_attr *attr)
120 if (dma_fence_is_signaled(fence))
125 /* Recurse once into a fence-array */
126 if (dma_fence_is_array(fence)) {
127 struct dma_fence_array *array = to_dma_fence_array(fence);
130 for (i = 0; i < array->num_fences; i++)
131 fence_set_priority(array->fences[i], attr);
132 } else if (__dma_fence_is_chain(fence)) {
133 struct dma_fence *iter;
135 /* The chain is ordered; if we boost the last, we boost all */
136 dma_fence_chain_for_each(iter, fence) {
137 fence_set_priority(to_dma_fence_chain(iter)->fence,
143 fence_set_priority(fence, attr);
146 local_bh_enable(); /* kick the tasklets if queues were reprioritised */
150 i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
152 const struct i915_sched_attr *attr)
154 struct dma_fence *excl;
156 if (flags & I915_WAIT_ALL) {
157 struct dma_fence **shared;
158 unsigned int count, i;
161 ret = dma_resv_get_fences_rcu(obj->base.resv,
162 &excl, &count, &shared);
166 for (i = 0; i < count; i++) {
167 i915_gem_fence_wait_priority(shared[i], attr);
168 dma_fence_put(shared[i]);
173 excl = dma_resv_get_excl_unlocked(obj->base.resv);
177 i915_gem_fence_wait_priority(excl, attr);
184 * Waits for rendering to the object to be completed
185 * @obj: i915 gem object
186 * @flags: how to wait (under a lock, for all rendering or just for writes etc)
187 * @timeout: how long to wait
190 i915_gem_object_wait(struct drm_i915_gem_object *obj,
195 GEM_BUG_ON(timeout < 0);
197 timeout = i915_gem_object_wait_reservation(obj->base.resv,
199 return timeout < 0 ? timeout : 0;
202 static inline unsigned long nsecs_to_jiffies_timeout(const u64 n)
204 /* nsecs_to_jiffies64() does not guard against overflow */
205 if (NSEC_PER_SEC % HZ &&
206 div_u64(n, NSEC_PER_SEC) >= MAX_JIFFY_OFFSET / HZ)
207 return MAX_JIFFY_OFFSET;
209 return min_t(u64, MAX_JIFFY_OFFSET, nsecs_to_jiffies64(n) + 1);
212 static unsigned long to_wait_timeout(s64 timeout_ns)
215 return MAX_SCHEDULE_TIMEOUT;
220 return nsecs_to_jiffies_timeout(timeout_ns);
224 * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
225 * @dev: drm device pointer
226 * @data: ioctl data blob
227 * @file: drm file pointer
229 * Returns 0 if successful, else an error is returned with the remaining time in
230 * the timeout parameter.
231 * -ETIME: object is still busy after timeout
232 * -ERESTARTSYS: signal interrupted the wait
233 * -ENONENT: object doesn't exist
234 * Also possible, but rare:
235 * -EAGAIN: incomplete, restart syscall
237 * -ENODEV: Internal IRQ fail
238 * -E?: The add request failed
240 * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any
241 * non-zero timeout parameter the wait ioctl will wait for the given number of
242 * nanoseconds on an object becoming unbusy. Since the wait itself does so
243 * without holding struct_mutex the object may become re-busied before this
244 * function completes. A similar but shorter * race condition exists in the busy
248 i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
250 struct drm_i915_gem_wait *args = data;
251 struct drm_i915_gem_object *obj;
255 if (args->flags != 0)
258 obj = i915_gem_object_lookup(file, args->bo_handle);
264 ret = i915_gem_object_wait(obj,
265 I915_WAIT_INTERRUPTIBLE |
268 to_wait_timeout(args->timeout_ns));
270 if (args->timeout_ns > 0) {
271 args->timeout_ns -= ktime_to_ns(ktime_sub(ktime_get(), start));
272 if (args->timeout_ns < 0)
273 args->timeout_ns = 0;
276 * Apparently ktime isn't accurate enough and occasionally has a
277 * bit of mismatch in the jiffies<->nsecs<->ktime loop. So patch
278 * things up to make the test happy. We allow up to 1 jiffy.
280 * This is a regression from the timespec->ktime conversion.
282 if (ret == -ETIME && !nsecs_to_jiffies(args->timeout_ns))
283 args->timeout_ns = 0;
285 /* Asked to wait beyond the jiffie/scheduler precision? */
286 if (ret == -ETIME && args->timeout_ns)
290 i915_gem_object_put(obj);