Merge tag 'kvmarm-fixes-5.6-1' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmar...
[linux-2.6-microblaze.git] / drivers / gpu / drm / i915 / selftests / i915_active.c
1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2018 Intel Corporation
5  */
6
7 #include <linux/kref.h>
8
9 #include "gem/i915_gem_pm.h"
10 #include "gt/intel_gt.h"
11
12 #include "i915_selftest.h"
13
14 #include "igt_flush_test.h"
15 #include "lib_sw_fence.h"
16
17 struct live_active {
18         struct i915_active base;
19         struct kref ref;
20         bool retired;
21 };
22
23 static void __live_get(struct live_active *active)
24 {
25         kref_get(&active->ref);
26 }
27
28 static void __live_free(struct live_active *active)
29 {
30         i915_active_fini(&active->base);
31         kfree(active);
32 }
33
34 static void __live_release(struct kref *ref)
35 {
36         struct live_active *active = container_of(ref, typeof(*active), ref);
37
38         __live_free(active);
39 }
40
41 static void __live_put(struct live_active *active)
42 {
43         kref_put(&active->ref, __live_release);
44 }
45
46 static int __live_active(struct i915_active *base)
47 {
48         struct live_active *active = container_of(base, typeof(*active), base);
49
50         __live_get(active);
51         return 0;
52 }
53
54 static void __live_retire(struct i915_active *base)
55 {
56         struct live_active *active = container_of(base, typeof(*active), base);
57
58         active->retired = true;
59         __live_put(active);
60 }
61
62 static struct live_active *__live_alloc(struct drm_i915_private *i915)
63 {
64         struct live_active *active;
65
66         active = kzalloc(sizeof(*active), GFP_KERNEL);
67         if (!active)
68                 return NULL;
69
70         kref_init(&active->ref);
71         i915_active_init(&active->base, __live_active, __live_retire);
72
73         return active;
74 }
75
76 static struct live_active *
77 __live_active_setup(struct drm_i915_private *i915)
78 {
79         struct intel_engine_cs *engine;
80         struct i915_sw_fence *submit;
81         struct live_active *active;
82         unsigned int count = 0;
83         int err = 0;
84
85         active = __live_alloc(i915);
86         if (!active)
87                 return ERR_PTR(-ENOMEM);
88
89         submit = heap_fence_create(GFP_KERNEL);
90         if (!submit) {
91                 kfree(active);
92                 return ERR_PTR(-ENOMEM);
93         }
94
95         err = i915_active_acquire(&active->base);
96         if (err)
97                 goto out;
98
99         for_each_uabi_engine(engine, i915) {
100                 struct i915_request *rq;
101
102                 rq = intel_engine_create_kernel_request(engine);
103                 if (IS_ERR(rq)) {
104                         err = PTR_ERR(rq);
105                         break;
106                 }
107
108                 err = i915_sw_fence_await_sw_fence_gfp(&rq->submit,
109                                                        submit,
110                                                        GFP_KERNEL);
111                 if (err >= 0)
112                         err = i915_active_add_request(&active->base, rq);
113                 i915_request_add(rq);
114                 if (err) {
115                         pr_err("Failed to track active ref!\n");
116                         break;
117                 }
118
119                 count++;
120         }
121
122         i915_active_release(&active->base);
123         if (READ_ONCE(active->retired) && count) {
124                 pr_err("i915_active retired before submission!\n");
125                 err = -EINVAL;
126         }
127         if (atomic_read(&active->base.count) != count) {
128                 pr_err("i915_active not tracking all requests, found %d, expected %d\n",
129                        atomic_read(&active->base.count), count);
130                 err = -EINVAL;
131         }
132
133 out:
134         i915_sw_fence_commit(submit);
135         heap_fence_put(submit);
136         if (err) {
137                 __live_put(active);
138                 active = ERR_PTR(err);
139         }
140
141         return active;
142 }
143
144 static int live_active_wait(void *arg)
145 {
146         struct drm_i915_private *i915 = arg;
147         struct live_active *active;
148         int err = 0;
149
150         /* Check that we get a callback when requests retire upon waiting */
151
152         active = __live_active_setup(i915);
153         if (IS_ERR(active))
154                 return PTR_ERR(active);
155
156         i915_active_wait(&active->base);
157         if (!READ_ONCE(active->retired)) {
158                 struct drm_printer p = drm_err_printer(__func__);
159
160                 pr_err("i915_active not retired after waiting!\n");
161                 i915_active_print(&active->base, &p);
162
163                 err = -EINVAL;
164         }
165
166         __live_put(active);
167
168         if (igt_flush_test(i915))
169                 err = -EIO;
170
171         return err;
172 }
173
174 static int live_active_retire(void *arg)
175 {
176         struct drm_i915_private *i915 = arg;
177         struct live_active *active;
178         int err = 0;
179
180         /* Check that we get a callback when requests are indirectly retired */
181
182         active = __live_active_setup(i915);
183         if (IS_ERR(active))
184                 return PTR_ERR(active);
185
186         /* waits for & retires all requests */
187         if (igt_flush_test(i915))
188                 err = -EIO;
189
190         if (!READ_ONCE(active->retired)) {
191                 struct drm_printer p = drm_err_printer(__func__);
192
193                 pr_err("i915_active not retired after flushing!\n");
194                 i915_active_print(&active->base, &p);
195
196                 err = -EINVAL;
197         }
198
199         __live_put(active);
200
201         return err;
202 }
203
204 int i915_active_live_selftests(struct drm_i915_private *i915)
205 {
206         static const struct i915_subtest tests[] = {
207                 SUBTEST(live_active_wait),
208                 SUBTEST(live_active_retire),
209         };
210
211         if (intel_gt_is_wedged(&i915->gt))
212                 return 0;
213
214         return i915_subtests(tests, i915);
215 }
216
217 static struct intel_engine_cs *node_to_barrier(struct active_node *it)
218 {
219         struct intel_engine_cs *engine;
220
221         if (!is_barrier(&it->base))
222                 return NULL;
223
224         engine = __barrier_to_engine(it);
225         smp_rmb(); /* serialise with add_active_barriers */
226         if (!is_barrier(&it->base))
227                 return NULL;
228
229         return engine;
230 }
231
232 void i915_active_print(struct i915_active *ref, struct drm_printer *m)
233 {
234         drm_printf(m, "active %pS:%pS\n", ref->active, ref->retire);
235         drm_printf(m, "\tcount: %d\n", atomic_read(&ref->count));
236         drm_printf(m, "\tpreallocated barriers? %s\n",
237                    yesno(!llist_empty(&ref->preallocated_barriers)));
238
239         if (i915_active_acquire_if_busy(ref)) {
240                 struct active_node *it, *n;
241
242                 rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
243                         struct intel_engine_cs *engine;
244
245                         engine = node_to_barrier(it);
246                         if (engine) {
247                                 drm_printf(m, "\tbarrier: %s\n", engine->name);
248                                 continue;
249                         }
250
251                         if (i915_active_fence_isset(&it->base)) {
252                                 drm_printf(m,
253                                            "\ttimeline: %llx\n", it->timeline);
254                                 continue;
255                         }
256                 }
257
258                 i915_active_release(ref);
259         }
260 }
261
262 static void spin_unlock_wait(spinlock_t *lock)
263 {
264         spin_lock_irq(lock);
265         spin_unlock_irq(lock);
266 }
267
268 void i915_active_unlock_wait(struct i915_active *ref)
269 {
270         if (i915_active_acquire_if_busy(ref)) {
271                 struct active_node *it, *n;
272
273                 rcu_read_lock();
274                 rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
275                         struct dma_fence *f;
276
277                         /* Wait for all active callbacks */
278                         f = rcu_dereference(it->base.fence);
279                         if (f)
280                                 spin_unlock_wait(f->lock);
281                 }
282                 rcu_read_unlock();
283
284                 i915_active_release(ref);
285         }
286
287         /* And wait for the retire callback */
288         spin_lock_irq(&ref->tree_lock);
289         spin_unlock_irq(&ref->tree_lock);
290
291         /* ... which may have been on a thread instead */
292         flush_work(&ref->work);
293 }