2 * SPDX-License-Identifier: MIT
4 * Copyright © 2016 Intel Corporation
7 #ifndef __I915_TIMELINE_TYPES_H__
8 #define __I915_TIMELINE_TYPES_H__
10 #include <linux/list.h>
11 #include <linux/kref.h>
12 #include <linux/mutex.h>
13 #include <linux/rcupdate.h>
14 #include <linux/types.h>
16 #include "i915_active_types.h"
21 struct intel_timeline_hwsp;
23 struct intel_timeline {
27 struct mutex mutex; /* protects the flow of requests */
30 * pin_count and active_count track essentially the same thing:
31 * How many requests are in flight or may be under construction.
33 * We need two distinct counters so that we can assign different
34 * lifetimes to the events for different use-cases. For example,
35 * we want to permanently keep the timeline pinned for the kernel
36 * context so that we can issue requests at any time without having
37 * to acquire space in the GGTT. However, we want to keep tracking
38 * the activity (to be able to detect when we become idle) along that
39 * permanently pinned timeline and so end up requiring two counters.
41 * Note that the active_count is protected by the intel_timeline.mutex,
42 * but the pin_count is protected by a combination of serialisation
43 * from the intel_context caller plus internal atomicity.
46 atomic_t active_count;
48 const u32 *hwsp_seqno;
49 struct i915_vma *hwsp_ggtt;
52 struct intel_timeline_cacheline *hwsp_cacheline;
54 bool has_initial_breadcrumb;
57 * List of breadcrumbs associated with GPU requests currently
60 struct list_head requests;
63 * Contains an RCU guarded pointer to the last request. No reference is
64 * held to the request, users must carefully acquire a reference to
65 * the request using i915_active_fence_get(), or manage the RCU
66 * protection themselves (cf the i915_active_fence API).
68 struct i915_active_fence last_request;
70 /** A chain of completed timelines ready for early retirement. */
71 struct intel_timeline *retire;
74 * We track the most recent seqno that we wait on in every context so
75 * that we only have to emit a new await and dependency on a more
76 * recent sync point. As the contexts may be executed out-of-order, we
77 * have to track each individually and can not rely on an absolute
78 * global_seqno. When we know that all tracked fences are completed
79 * (i.e. when the driver is idle), we know that the syncmap is
80 * redundant and we can discard it without loss of generality.
82 struct i915_syncmap *sync;
84 struct list_head link;
91 struct intel_timeline_cacheline {
92 struct i915_active active;
94 struct intel_timeline_hwsp *hwsp;
100 #endif /* __I915_TIMELINE_TYPES_H__ */