1 // SPDX-License-Identifier: MIT
3 * Copyright © 2016-2018 Intel Corporation
8 #include "i915_active.h"
9 #include "i915_syncmap.h"
11 #include "intel_ring.h"
12 #include "intel_timeline.h"
14 #define TIMELINE_SEQNO_BYTES 8
16 static struct i915_vma *hwsp_alloc(struct intel_gt *gt)
18 struct drm_i915_private *i915 = gt->i915;
19 struct drm_i915_gem_object *obj;
22 obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
26 i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
28 vma = i915_vma_instance(obj, >->ggtt->vm, NULL);
30 i915_gem_object_put(obj);
35 static void __timeline_retire(struct i915_active *active)
37 struct intel_timeline *tl =
38 container_of(active, typeof(*tl), active);
40 i915_vma_unpin(tl->hwsp_ggtt);
41 intel_timeline_put(tl);
44 static int __timeline_active(struct i915_active *active)
46 struct intel_timeline *tl =
47 container_of(active, typeof(*tl), active);
49 __i915_vma_pin(tl->hwsp_ggtt);
50 intel_timeline_get(tl);
54 I915_SELFTEST_EXPORT int
55 intel_timeline_pin_map(struct intel_timeline *timeline)
57 struct drm_i915_gem_object *obj = timeline->hwsp_ggtt->obj;
58 u32 ofs = offset_in_page(timeline->hwsp_offset);
61 vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
63 return PTR_ERR(vaddr);
65 timeline->hwsp_map = vaddr;
66 timeline->hwsp_seqno = memset(vaddr + ofs, 0, TIMELINE_SEQNO_BYTES);
72 static int intel_timeline_init(struct intel_timeline *timeline,
74 struct i915_vma *hwsp,
77 kref_init(&timeline->kref);
78 atomic_set(&timeline->pin_count, 0);
83 timeline->hwsp_offset = offset;
84 timeline->hwsp_ggtt = i915_vma_get(hwsp);
86 timeline->has_initial_breadcrumb = true;
87 hwsp = hwsp_alloc(gt);
90 timeline->hwsp_ggtt = hwsp;
93 timeline->hwsp_map = NULL;
94 timeline->hwsp_seqno = (void *)(long)timeline->hwsp_offset;
96 GEM_BUG_ON(timeline->hwsp_offset >= hwsp->size);
98 timeline->fence_context = dma_fence_context_alloc(1);
100 mutex_init(&timeline->mutex);
102 INIT_ACTIVE_FENCE(&timeline->last_request);
103 INIT_LIST_HEAD(&timeline->requests);
105 i915_syncmap_init(&timeline->sync);
106 i915_active_init(&timeline->active, __timeline_active,
107 __timeline_retire, 0);
112 void intel_gt_init_timelines(struct intel_gt *gt)
114 struct intel_gt_timelines *timelines = >->timelines;
116 spin_lock_init(&timelines->lock);
117 INIT_LIST_HEAD(&timelines->active_list);
120 static void intel_timeline_fini(struct rcu_head *rcu)
122 struct intel_timeline *timeline =
123 container_of(rcu, struct intel_timeline, rcu);
125 if (timeline->hwsp_map)
126 i915_gem_object_unpin_map(timeline->hwsp_ggtt->obj);
128 i915_vma_put(timeline->hwsp_ggtt);
129 i915_active_fini(&timeline->active);
132 * A small race exists between intel_gt_retire_requests_timeout and
133 * intel_timeline_exit which could result in the syncmap not getting
134 * free'd. Rather than work to hard to seal this race, simply cleanup
135 * the syncmap on fini.
137 i915_syncmap_free(&timeline->sync);
142 struct intel_timeline *
143 __intel_timeline_create(struct intel_gt *gt,
144 struct i915_vma *global_hwsp,
147 struct intel_timeline *timeline;
150 timeline = kzalloc(sizeof(*timeline), GFP_KERNEL);
152 return ERR_PTR(-ENOMEM);
154 err = intel_timeline_init(timeline, gt, global_hwsp, offset);
163 struct intel_timeline *
164 intel_timeline_create_from_engine(struct intel_engine_cs *engine,
167 struct i915_vma *hwsp = engine->status_page.vma;
168 struct intel_timeline *tl;
170 tl = __intel_timeline_create(engine->gt, hwsp, offset);
174 /* Borrow a nearby lock; we only create these timelines during init */
175 mutex_lock(&hwsp->vm->mutex);
176 list_add_tail(&tl->engine_link, &engine->status_page.timelines);
177 mutex_unlock(&hwsp->vm->mutex);
182 void __intel_timeline_pin(struct intel_timeline *tl)
184 GEM_BUG_ON(!atomic_read(&tl->pin_count));
185 atomic_inc(&tl->pin_count);
188 int intel_timeline_pin(struct intel_timeline *tl, struct i915_gem_ww_ctx *ww)
192 if (atomic_add_unless(&tl->pin_count, 1, 0))
196 err = intel_timeline_pin_map(tl);
201 err = i915_ggtt_pin(tl->hwsp_ggtt, ww, 0, PIN_HIGH);
206 i915_ggtt_offset(tl->hwsp_ggtt) +
207 offset_in_page(tl->hwsp_offset);
208 GT_TRACE(tl->gt, "timeline:%llx using HWSP offset:%x\n",
209 tl->fence_context, tl->hwsp_offset);
211 i915_active_acquire(&tl->active);
212 if (atomic_fetch_inc(&tl->pin_count)) {
213 i915_active_release(&tl->active);
214 __i915_vma_unpin(tl->hwsp_ggtt);
220 void intel_timeline_reset_seqno(const struct intel_timeline *tl)
222 u32 *hwsp_seqno = (u32 *)tl->hwsp_seqno;
223 /* Must be pinned to be writable, and no requests in flight. */
224 GEM_BUG_ON(!atomic_read(&tl->pin_count));
226 memset(hwsp_seqno + 1, 0, TIMELINE_SEQNO_BYTES - sizeof(*hwsp_seqno));
227 WRITE_ONCE(*hwsp_seqno, tl->seqno);
231 void intel_timeline_enter(struct intel_timeline *tl)
233 struct intel_gt_timelines *timelines = &tl->gt->timelines;
236 * Pretend we are serialised by the timeline->mutex.
238 * While generally true, there are a few exceptions to the rule
239 * for the engine->kernel_context being used to manage power
240 * transitions. As the engine_park may be called from under any
241 * timeline, it uses the power mutex as a global serialisation
242 * lock to prevent any other request entering its timeline.
244 * The rule is generally tl->mutex, otherwise engine->wakeref.mutex.
246 * However, intel_gt_retire_request() does not know which engine
247 * it is retiring along and so cannot partake in the engine-pm
248 * barrier, and there we use the tl->active_count as a means to
249 * pin the timeline in the active_list while the locks are dropped.
250 * Ergo, as that is outside of the engine-pm barrier, we need to
251 * use atomic to manipulate tl->active_count.
253 lockdep_assert_held(&tl->mutex);
255 if (atomic_add_unless(&tl->active_count, 1, 0))
258 spin_lock(&timelines->lock);
259 if (!atomic_fetch_inc(&tl->active_count)) {
261 * The HWSP is volatile, and may have been lost while inactive,
262 * e.g. across suspend/resume. Be paranoid, and ensure that
263 * the HWSP value matches our seqno so we don't proclaim
264 * the next request as already complete.
266 intel_timeline_reset_seqno(tl);
267 list_add_tail(&tl->link, &timelines->active_list);
269 spin_unlock(&timelines->lock);
272 void intel_timeline_exit(struct intel_timeline *tl)
274 struct intel_gt_timelines *timelines = &tl->gt->timelines;
276 /* See intel_timeline_enter() */
277 lockdep_assert_held(&tl->mutex);
279 GEM_BUG_ON(!atomic_read(&tl->active_count));
280 if (atomic_add_unless(&tl->active_count, -1, 1))
283 spin_lock(&timelines->lock);
284 if (atomic_dec_and_test(&tl->active_count))
286 spin_unlock(&timelines->lock);
289 * Since this timeline is idle, all bariers upon which we were waiting
290 * must also be complete and so we can discard the last used barriers
291 * without loss of information.
293 i915_syncmap_free(&tl->sync);
296 static u32 timeline_advance(struct intel_timeline *tl)
298 GEM_BUG_ON(!atomic_read(&tl->pin_count));
299 GEM_BUG_ON(tl->seqno & tl->has_initial_breadcrumb);
301 return tl->seqno += 1 + tl->has_initial_breadcrumb;
305 __intel_timeline_get_seqno(struct intel_timeline *tl,
308 u32 next_ofs = offset_in_page(tl->hwsp_offset + TIMELINE_SEQNO_BYTES);
310 /* w/a: bit 5 needs to be zero for MI_FLUSH_DW address. */
311 if (TIMELINE_SEQNO_BYTES <= BIT(5) && (next_ofs & BIT(5)))
312 next_ofs = offset_in_page(next_ofs + BIT(5));
314 tl->hwsp_offset = i915_ggtt_offset(tl->hwsp_ggtt) + next_ofs;
315 tl->hwsp_seqno = tl->hwsp_map + next_ofs;
316 intel_timeline_reset_seqno(tl);
318 *seqno = timeline_advance(tl);
319 GEM_BUG_ON(i915_seqno_passed(*tl->hwsp_seqno, *seqno));
323 int intel_timeline_get_seqno(struct intel_timeline *tl,
324 struct i915_request *rq,
327 *seqno = timeline_advance(tl);
329 /* Replace the HWSP on wraparound for HW semaphores */
330 if (unlikely(!*seqno && tl->has_initial_breadcrumb))
331 return __intel_timeline_get_seqno(tl, seqno);
336 int intel_timeline_read_hwsp(struct i915_request *from,
337 struct i915_request *to,
340 struct intel_timeline *tl;
344 tl = rcu_dereference(from->timeline);
345 if (i915_request_signaled(from) ||
346 !i915_active_acquire_if_busy(&tl->active))
350 /* hwsp_offset may wraparound, so use from->hwsp_seqno */
351 *hwsp = i915_ggtt_offset(tl->hwsp_ggtt) +
352 offset_in_page(from->hwsp_seqno);
355 /* ensure we wait on the right request, if not, we completed */
356 if (tl && __i915_request_is_complete(from)) {
357 i915_active_release(&tl->active);
365 /* Can't do semaphore waits on kernel context */
366 if (!tl->has_initial_breadcrumb) {
371 err = i915_active_add_request(&tl->active, to);
374 i915_active_release(&tl->active);
378 void intel_timeline_unpin(struct intel_timeline *tl)
380 GEM_BUG_ON(!atomic_read(&tl->pin_count));
381 if (!atomic_dec_and_test(&tl->pin_count))
384 i915_active_release(&tl->active);
385 __i915_vma_unpin(tl->hwsp_ggtt);
388 void __intel_timeline_free(struct kref *kref)
390 struct intel_timeline *timeline =
391 container_of(kref, typeof(*timeline), kref);
393 GEM_BUG_ON(atomic_read(&timeline->pin_count));
394 GEM_BUG_ON(!list_empty(&timeline->requests));
395 GEM_BUG_ON(timeline->retire);
397 call_rcu(&timeline->rcu, intel_timeline_fini);
400 void intel_gt_fini_timelines(struct intel_gt *gt)
402 struct intel_gt_timelines *timelines = >->timelines;
404 GEM_BUG_ON(!list_empty(&timelines->active_list));
407 void intel_gt_show_timelines(struct intel_gt *gt,
408 struct drm_printer *m,
409 void (*show_request)(struct drm_printer *m,
410 const struct i915_request *rq,
414 struct intel_gt_timelines *timelines = >->timelines;
415 struct intel_timeline *tl, *tn;
418 spin_lock(&timelines->lock);
419 list_for_each_entry_safe(tl, tn, &timelines->active_list, link) {
420 unsigned long count, ready, inflight;
421 struct i915_request *rq, *rn;
422 struct dma_fence *fence;
424 if (!mutex_trylock(&tl->mutex)) {
425 drm_printf(m, "Timeline %llx: busy; skipping\n",
430 intel_timeline_get(tl);
431 GEM_BUG_ON(!atomic_read(&tl->active_count));
432 atomic_inc(&tl->active_count); /* pin the list element */
433 spin_unlock(&timelines->lock);
438 list_for_each_entry_safe(rq, rn, &tl->requests, link) {
439 if (i915_request_completed(rq))
443 if (i915_request_is_ready(rq))
445 if (i915_request_is_active(rq))
449 drm_printf(m, "Timeline %llx: { ", tl->fence_context);
450 drm_printf(m, "count: %lu, ready: %lu, inflight: %lu",
451 count, ready, inflight);
452 drm_printf(m, ", seqno: { current: %d, last: %d }",
453 *tl->hwsp_seqno, tl->seqno);
454 fence = i915_active_fence_get(&tl->last_request);
456 drm_printf(m, ", engine: %s",
457 to_request(fence)->engine->name);
458 dma_fence_put(fence);
460 drm_printf(m, " }\n");
463 list_for_each_entry_safe(rq, rn, &tl->requests, link)
464 show_request(m, rq, "", 2);
467 mutex_unlock(&tl->mutex);
468 spin_lock(&timelines->lock);
470 /* Resume list iteration after reacquiring spinlock */
471 list_safe_reset_next(tl, tn, link);
472 if (atomic_dec_and_test(&tl->active_count))
475 /* Defer the final release to after the spinlock */
476 if (refcount_dec_and_test(&tl->kref.refcount)) {
477 GEM_BUG_ON(atomic_read(&tl->active_count));
478 list_add(&tl->link, &free);
481 spin_unlock(&timelines->lock);
483 list_for_each_entry_safe(tl, tn, &free, link)
484 __intel_timeline_free(&tl->kref);
487 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
488 #include "gt/selftests/mock_timeline.c"
489 #include "gt/selftest_timeline.c"