2 * SPDX-License-Identifier: MIT
4 * Copyright © 2016-2018 Intel Corporation
9 #include "i915_active.h"
10 #include "i915_syncmap.h"
11 #include "i915_timeline.h"
13 #define ptr_set_bit(ptr, bit) ((typeof(ptr))((unsigned long)(ptr) | BIT(bit)))
14 #define ptr_test_bit(ptr, bit) ((unsigned long)(ptr) & BIT(bit))
16 struct i915_timeline_hwsp {
17 struct i915_gt_timelines *gt;
18 struct list_head free_link;
23 struct i915_timeline_cacheline {
24 struct i915_active active;
25 struct i915_timeline_hwsp *hwsp;
27 #define CACHELINE_BITS 6
28 #define CACHELINE_FREE CACHELINE_BITS
31 static inline struct drm_i915_private *
32 hwsp_to_i915(struct i915_timeline_hwsp *hwsp)
34 return container_of(hwsp->gt, struct drm_i915_private, gt.timelines);
37 static struct i915_vma *__hwsp_alloc(struct drm_i915_private *i915)
39 struct drm_i915_gem_object *obj;
42 obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
46 i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
48 vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
50 i915_gem_object_put(obj);
55 static struct i915_vma *
56 hwsp_alloc(struct i915_timeline *timeline, unsigned int *cacheline)
58 struct drm_i915_private *i915 = timeline->i915;
59 struct i915_gt_timelines *gt = &i915->gt.timelines;
60 struct i915_timeline_hwsp *hwsp;
62 BUILD_BUG_ON(BITS_PER_TYPE(u64) * CACHELINE_BYTES > PAGE_SIZE);
64 spin_lock_irq(>->hwsp_lock);
66 /* hwsp_free_list only contains HWSP that have available cachelines */
67 hwsp = list_first_entry_or_null(>->hwsp_free_list,
68 typeof(*hwsp), free_link);
72 spin_unlock_irq(>->hwsp_lock);
74 hwsp = kmalloc(sizeof(*hwsp), GFP_KERNEL);
76 return ERR_PTR(-ENOMEM);
78 vma = __hwsp_alloc(i915);
86 hwsp->free_bitmap = ~0ull;
89 spin_lock_irq(>->hwsp_lock);
90 list_add(&hwsp->free_link, >->hwsp_free_list);
93 GEM_BUG_ON(!hwsp->free_bitmap);
94 *cacheline = __ffs64(hwsp->free_bitmap);
95 hwsp->free_bitmap &= ~BIT_ULL(*cacheline);
96 if (!hwsp->free_bitmap)
97 list_del(&hwsp->free_link);
99 spin_unlock_irq(>->hwsp_lock);
101 GEM_BUG_ON(hwsp->vma->private != hwsp);
105 static void __idle_hwsp_free(struct i915_timeline_hwsp *hwsp, int cacheline)
107 struct i915_gt_timelines *gt = hwsp->gt;
110 spin_lock_irqsave(>->hwsp_lock, flags);
112 /* As a cacheline becomes available, publish the HWSP on the freelist */
113 if (!hwsp->free_bitmap)
114 list_add_tail(&hwsp->free_link, >->hwsp_free_list);
116 GEM_BUG_ON(cacheline >= BITS_PER_TYPE(hwsp->free_bitmap));
117 hwsp->free_bitmap |= BIT_ULL(cacheline);
119 /* And if no one is left using it, give the page back to the system */
120 if (hwsp->free_bitmap == ~0ull) {
121 i915_vma_put(hwsp->vma);
122 list_del(&hwsp->free_link);
126 spin_unlock_irqrestore(>->hwsp_lock, flags);
129 static void __idle_cacheline_free(struct i915_timeline_cacheline *cl)
131 GEM_BUG_ON(!i915_active_is_idle(&cl->active));
133 i915_gem_object_unpin_map(cl->hwsp->vma->obj);
134 i915_vma_put(cl->hwsp->vma);
135 __idle_hwsp_free(cl->hwsp, ptr_unmask_bits(cl->vaddr, CACHELINE_BITS));
137 i915_active_fini(&cl->active);
141 static void __cacheline_retire(struct i915_active *active)
143 struct i915_timeline_cacheline *cl =
144 container_of(active, typeof(*cl), active);
146 i915_vma_unpin(cl->hwsp->vma);
147 if (ptr_test_bit(cl->vaddr, CACHELINE_FREE))
148 __idle_cacheline_free(cl);
151 static struct i915_timeline_cacheline *
152 cacheline_alloc(struct i915_timeline_hwsp *hwsp, unsigned int cacheline)
154 struct i915_timeline_cacheline *cl;
157 GEM_BUG_ON(cacheline >= BIT(CACHELINE_BITS));
159 cl = kmalloc(sizeof(*cl), GFP_KERNEL);
161 return ERR_PTR(-ENOMEM);
163 vaddr = i915_gem_object_pin_map(hwsp->vma->obj, I915_MAP_WB);
166 return ERR_CAST(vaddr);
169 i915_vma_get(hwsp->vma);
171 cl->vaddr = page_pack_bits(vaddr, cacheline);
173 i915_active_init(hwsp_to_i915(hwsp), &cl->active, __cacheline_retire);
178 static void cacheline_acquire(struct i915_timeline_cacheline *cl)
180 if (cl && i915_active_acquire(&cl->active))
181 __i915_vma_pin(cl->hwsp->vma);
184 static void cacheline_release(struct i915_timeline_cacheline *cl)
187 i915_active_release(&cl->active);
190 static void cacheline_free(struct i915_timeline_cacheline *cl)
192 GEM_BUG_ON(ptr_test_bit(cl->vaddr, CACHELINE_FREE));
193 cl->vaddr = ptr_set_bit(cl->vaddr, CACHELINE_FREE);
195 if (i915_active_is_idle(&cl->active))
196 __idle_cacheline_free(cl);
199 int i915_timeline_init(struct drm_i915_private *i915,
200 struct i915_timeline *timeline,
201 struct i915_vma *hwsp)
206 * Ideally we want a set of engines on a single leaf as we expect
207 * to mostly be tracking synchronisation between engines. It is not
208 * a huge issue if this is not the case, but we may want to mitigate
209 * any page crossing penalties if they become an issue.
211 * Called during early_init before we know how many engines there are.
213 BUILD_BUG_ON(KSYNCMAP < I915_NUM_ENGINES);
215 timeline->i915 = i915;
216 timeline->pin_count = 0;
217 timeline->has_initial_breadcrumb = !hwsp;
218 timeline->hwsp_cacheline = NULL;
221 struct i915_timeline_cacheline *cl;
222 unsigned int cacheline;
224 hwsp = hwsp_alloc(timeline, &cacheline);
226 return PTR_ERR(hwsp);
228 cl = cacheline_alloc(hwsp->private, cacheline);
230 __idle_hwsp_free(hwsp->private, cacheline);
234 timeline->hwsp_cacheline = cl;
235 timeline->hwsp_offset = cacheline * CACHELINE_BYTES;
237 vaddr = page_mask_bits(cl->vaddr);
239 timeline->hwsp_offset = I915_GEM_HWS_SEQNO_ADDR;
241 vaddr = i915_gem_object_pin_map(hwsp->obj, I915_MAP_WB);
243 return PTR_ERR(vaddr);
246 timeline->hwsp_seqno =
247 memset(vaddr + timeline->hwsp_offset, 0, CACHELINE_BYTES);
249 timeline->hwsp_ggtt = i915_vma_get(hwsp);
250 GEM_BUG_ON(timeline->hwsp_offset >= hwsp->size);
252 timeline->fence_context = dma_fence_context_alloc(1);
254 mutex_init(&timeline->mutex);
256 INIT_ACTIVE_REQUEST(&timeline->last_request);
257 INIT_LIST_HEAD(&timeline->requests);
259 i915_syncmap_init(&timeline->sync);
264 void i915_timelines_init(struct drm_i915_private *i915)
266 struct i915_gt_timelines *gt = &i915->gt.timelines;
268 mutex_init(>->mutex);
269 INIT_LIST_HEAD(>->active_list);
271 spin_lock_init(>->hwsp_lock);
272 INIT_LIST_HEAD(>->hwsp_free_list);
274 /* via i915_gem_wait_for_idle() */
275 i915_gem_shrinker_taints_mutex(i915, >->mutex);
278 static void timeline_add_to_active(struct i915_timeline *tl)
280 struct i915_gt_timelines *gt = &tl->i915->gt.timelines;
282 mutex_lock(>->mutex);
283 list_add(&tl->link, >->active_list);
284 mutex_unlock(>->mutex);
287 static void timeline_remove_from_active(struct i915_timeline *tl)
289 struct i915_gt_timelines *gt = &tl->i915->gt.timelines;
291 mutex_lock(>->mutex);
293 mutex_unlock(>->mutex);
297 * i915_timelines_park - called when the driver idles
298 * @i915: the drm_i915_private device
300 * When the driver is completely idle, we know that all of our sync points
301 * have been signaled and our tracking is then entirely redundant. Any request
302 * to wait upon an older sync point will be completed instantly as we know
303 * the fence is signaled and therefore we will not even look them up in the
306 void i915_timelines_park(struct drm_i915_private *i915)
308 struct i915_gt_timelines *gt = &i915->gt.timelines;
309 struct i915_timeline *timeline;
311 mutex_lock(>->mutex);
312 list_for_each_entry(timeline, >->active_list, link) {
314 * All known fences are completed so we can scrap
315 * the current sync point tracking and start afresh,
316 * any attempt to wait upon a previous sync point
317 * will be skipped as the fence was signaled.
319 i915_syncmap_free(&timeline->sync);
321 mutex_unlock(>->mutex);
324 void i915_timeline_fini(struct i915_timeline *timeline)
326 GEM_BUG_ON(timeline->pin_count);
327 GEM_BUG_ON(!list_empty(&timeline->requests));
329 i915_syncmap_free(&timeline->sync);
331 if (timeline->hwsp_cacheline)
332 cacheline_free(timeline->hwsp_cacheline);
334 i915_gem_object_unpin_map(timeline->hwsp_ggtt->obj);
336 i915_vma_put(timeline->hwsp_ggtt);
339 struct i915_timeline *
340 i915_timeline_create(struct drm_i915_private *i915,
341 struct i915_vma *global_hwsp)
343 struct i915_timeline *timeline;
346 timeline = kzalloc(sizeof(*timeline), GFP_KERNEL);
348 return ERR_PTR(-ENOMEM);
350 err = i915_timeline_init(i915, timeline, global_hwsp);
356 kref_init(&timeline->kref);
361 int i915_timeline_pin(struct i915_timeline *tl)
367 GEM_BUG_ON(!tl->pin_count);
369 err = i915_vma_pin(tl->hwsp_ggtt, 0, 0, PIN_GLOBAL | PIN_HIGH);
374 i915_ggtt_offset(tl->hwsp_ggtt) +
375 offset_in_page(tl->hwsp_offset);
377 cacheline_acquire(tl->hwsp_cacheline);
378 timeline_add_to_active(tl);
387 static u32 timeline_advance(struct i915_timeline *tl)
389 GEM_BUG_ON(!tl->pin_count);
390 GEM_BUG_ON(tl->seqno & tl->has_initial_breadcrumb);
392 return tl->seqno += 1 + tl->has_initial_breadcrumb;
395 static void timeline_rollback(struct i915_timeline *tl)
397 tl->seqno -= 1 + tl->has_initial_breadcrumb;
401 __i915_timeline_get_seqno(struct i915_timeline *tl,
402 struct i915_request *rq,
405 struct i915_timeline_cacheline *cl;
406 unsigned int cacheline;
407 struct i915_vma *vma;
412 * If there is an outstanding GPU reference to this cacheline,
413 * such as it being sampled by a HW semaphore on another timeline,
414 * we cannot wraparound our seqno value (the HW semaphore does
415 * a strict greater-than-or-equals compare, not i915_seqno_passed).
416 * So if the cacheline is still busy, we must detach ourselves
417 * from it and leave it inflight alongside its users.
419 * However, if nobody is watching and we can guarantee that nobody
420 * will, we could simply reuse the same cacheline.
422 * if (i915_active_request_is_signaled(&tl->last_request) &&
423 * i915_active_is_signaled(&tl->hwsp_cacheline->active))
426 * That seems unlikely for a busy timeline that needed to wrap in
427 * the first place, so just replace the cacheline.
430 vma = hwsp_alloc(tl, &cacheline);
436 err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
438 __idle_hwsp_free(vma->private, cacheline);
442 cl = cacheline_alloc(vma->private, cacheline);
445 __idle_hwsp_free(vma->private, cacheline);
448 GEM_BUG_ON(cl->hwsp->vma != vma);
451 * Attach the old cacheline to the current request, so that we only
452 * free it after the current request is retired, which ensures that
453 * all writes into the cacheline from previous requests are complete.
455 err = i915_active_ref(&tl->hwsp_cacheline->active,
456 tl->fence_context, rq);
460 cacheline_release(tl->hwsp_cacheline); /* ownership now xfered to rq */
461 cacheline_free(tl->hwsp_cacheline);
463 i915_vma_unpin(tl->hwsp_ggtt); /* binding kept alive by old cacheline */
464 i915_vma_put(tl->hwsp_ggtt);
466 tl->hwsp_ggtt = i915_vma_get(vma);
468 vaddr = page_mask_bits(cl->vaddr);
469 tl->hwsp_offset = cacheline * CACHELINE_BYTES;
471 memset(vaddr + tl->hwsp_offset, 0, CACHELINE_BYTES);
473 tl->hwsp_offset += i915_ggtt_offset(vma);
475 cacheline_acquire(cl);
476 tl->hwsp_cacheline = cl;
478 *seqno = timeline_advance(tl);
479 GEM_BUG_ON(i915_seqno_passed(*tl->hwsp_seqno, *seqno));
487 timeline_rollback(tl);
491 int i915_timeline_get_seqno(struct i915_timeline *tl,
492 struct i915_request *rq,
495 *seqno = timeline_advance(tl);
497 /* Replace the HWSP on wraparound for HW semaphores */
498 if (unlikely(!*seqno && tl->hwsp_cacheline))
499 return __i915_timeline_get_seqno(tl, rq, seqno);
504 static int cacheline_ref(struct i915_timeline_cacheline *cl,
505 struct i915_request *rq)
507 return i915_active_ref(&cl->active, rq->fence.context, rq);
510 int i915_timeline_read_hwsp(struct i915_request *from,
511 struct i915_request *to,
514 struct i915_timeline_cacheline *cl = from->hwsp_cacheline;
515 struct i915_timeline *tl = from->timeline;
518 GEM_BUG_ON(to->timeline == tl);
520 mutex_lock_nested(&tl->mutex, SINGLE_DEPTH_NESTING);
521 err = i915_request_completed(from);
523 err = cacheline_ref(cl, to);
525 if (likely(cl == tl->hwsp_cacheline)) {
526 *hwsp = tl->hwsp_offset;
527 } else { /* across a seqno wrap, recover the original offset */
528 *hwsp = i915_ggtt_offset(cl->hwsp->vma) +
529 ptr_unmask_bits(cl->vaddr, CACHELINE_BITS) *
533 mutex_unlock(&tl->mutex);
538 void i915_timeline_unpin(struct i915_timeline *tl)
540 GEM_BUG_ON(!tl->pin_count);
544 timeline_remove_from_active(tl);
545 cacheline_release(tl->hwsp_cacheline);
548 * Since this timeline is idle, all bariers upon which we were waiting
549 * must also be complete and so we can discard the last used barriers
550 * without loss of information.
552 i915_syncmap_free(&tl->sync);
554 __i915_vma_unpin(tl->hwsp_ggtt);
557 void __i915_timeline_free(struct kref *kref)
559 struct i915_timeline *timeline =
560 container_of(kref, typeof(*timeline), kref);
562 i915_timeline_fini(timeline);
566 void i915_timelines_fini(struct drm_i915_private *i915)
568 struct i915_gt_timelines *gt = &i915->gt.timelines;
570 GEM_BUG_ON(!list_empty(>->active_list));
571 GEM_BUG_ON(!list_empty(>->hwsp_free_list));
573 mutex_destroy(>->mutex);
576 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
577 #include "selftests/mock_timeline.c"
578 #include "selftests/i915_timeline.c"