Merge tag 'powerpc-5.13-6' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc...
[linux-2.6-microblaze.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_sync.c
1 /*
2  * Copyright 2014 Advanced Micro Devices, Inc.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sub license, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19  * USE OR OTHER DEALINGS IN THE SOFTWARE.
20  *
21  * The above copyright notice and this permission notice (including the
22  * next paragraph) shall be included in all copies or substantial portions
23  * of the Software.
24  *
25  */
26 /*
27  * Authors:
28  *    Christian König <christian.koenig@amd.com>
29  */
30
31 #include "amdgpu.h"
32 #include "amdgpu_trace.h"
33 #include "amdgpu_amdkfd.h"
34
35 struct amdgpu_sync_entry {
36         struct hlist_node       node;
37         struct dma_fence        *fence;
38 };
39
40 static struct kmem_cache *amdgpu_sync_slab;
41
42 /**
43  * amdgpu_sync_create - zero init sync object
44  *
45  * @sync: sync object to initialize
46  *
47  * Just clear the sync object for now.
48  */
49 void amdgpu_sync_create(struct amdgpu_sync *sync)
50 {
51         hash_init(sync->fences);
52         sync->last_vm_update = NULL;
53 }
54
55 /**
56  * amdgpu_sync_same_dev - test if fence belong to us
57  *
58  * @adev: amdgpu device to use for the test
59  * @f: fence to test
60  *
61  * Test if the fence was issued by us.
62  */
63 static bool amdgpu_sync_same_dev(struct amdgpu_device *adev,
64                                  struct dma_fence *f)
65 {
66         struct drm_sched_fence *s_fence = to_drm_sched_fence(f);
67
68         if (s_fence) {
69                 struct amdgpu_ring *ring;
70
71                 ring = container_of(s_fence->sched, struct amdgpu_ring, sched);
72                 return ring->adev == adev;
73         }
74
75         return false;
76 }
77
78 /**
79  * amdgpu_sync_get_owner - extract the owner of a fence
80  *
81  * @f: fence get the owner from
82  *
83  * Extract who originally created the fence.
84  */
85 static void *amdgpu_sync_get_owner(struct dma_fence *f)
86 {
87         struct drm_sched_fence *s_fence;
88         struct amdgpu_amdkfd_fence *kfd_fence;
89
90         if (!f)
91                 return AMDGPU_FENCE_OWNER_UNDEFINED;
92
93         s_fence = to_drm_sched_fence(f);
94         if (s_fence)
95                 return s_fence->owner;
96
97         kfd_fence = to_amdgpu_amdkfd_fence(f);
98         if (kfd_fence)
99                 return AMDGPU_FENCE_OWNER_KFD;
100
101         return AMDGPU_FENCE_OWNER_UNDEFINED;
102 }
103
104 /**
105  * amdgpu_sync_keep_later - Keep the later fence
106  *
107  * @keep: existing fence to test
108  * @fence: new fence
109  *
110  * Either keep the existing fence or the new one, depending which one is later.
111  */
112 static void amdgpu_sync_keep_later(struct dma_fence **keep,
113                                    struct dma_fence *fence)
114 {
115         if (*keep && dma_fence_is_later(*keep, fence))
116                 return;
117
118         dma_fence_put(*keep);
119         *keep = dma_fence_get(fence);
120 }
121
122 /**
123  * amdgpu_sync_add_later - add the fence to the hash
124  *
125  * @sync: sync object to add the fence to
126  * @f: fence to add
127  *
128  * Tries to add the fence to an existing hash entry. Returns true when an entry
129  * was found, false otherwise.
130  */
131 static bool amdgpu_sync_add_later(struct amdgpu_sync *sync, struct dma_fence *f)
132 {
133         struct amdgpu_sync_entry *e;
134
135         hash_for_each_possible(sync->fences, e, node, f->context) {
136                 if (unlikely(e->fence->context != f->context))
137                         continue;
138
139                 amdgpu_sync_keep_later(&e->fence, f);
140                 return true;
141         }
142         return false;
143 }
144
145 /**
146  * amdgpu_sync_fence - remember to sync to this fence
147  *
148  * @sync: sync object to add fence to
149  * @f: fence to sync to
150  *
151  * Add the fence to the sync object.
152  */
153 int amdgpu_sync_fence(struct amdgpu_sync *sync, struct dma_fence *f)
154 {
155         struct amdgpu_sync_entry *e;
156
157         if (!f)
158                 return 0;
159
160         if (amdgpu_sync_add_later(sync, f))
161                 return 0;
162
163         e = kmem_cache_alloc(amdgpu_sync_slab, GFP_KERNEL);
164         if (!e)
165                 return -ENOMEM;
166
167         hash_add(sync->fences, &e->node, f->context);
168         e->fence = dma_fence_get(f);
169         return 0;
170 }
171
172 /**
173  * amdgpu_sync_vm_fence - remember to sync to this VM fence
174  *
175  * @sync: sync object to add fence to
176  * @fence: the VM fence to add
177  *
178  * Add the fence to the sync object and remember it as VM update.
179  */
180 int amdgpu_sync_vm_fence(struct amdgpu_sync *sync, struct dma_fence *fence)
181 {
182         if (!fence)
183                 return 0;
184
185         amdgpu_sync_keep_later(&sync->last_vm_update, fence);
186         return amdgpu_sync_fence(sync, fence);
187 }
188
189 /**
190  * amdgpu_sync_resv - sync to a reservation object
191  *
192  * @adev: amdgpu device
193  * @sync: sync object to add fences from reservation object to
194  * @resv: reservation object with embedded fence
195  * @mode: how owner affects which fences we sync to
196  * @owner: owner of the planned job submission
197  *
198  * Sync to the fence
199  */
200 int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync,
201                      struct dma_resv *resv, enum amdgpu_sync_mode mode,
202                      void *owner)
203 {
204         struct dma_resv_list *flist;
205         struct dma_fence *f;
206         unsigned i;
207         int r = 0;
208
209         if (resv == NULL)
210                 return -EINVAL;
211
212         /* always sync to the exclusive fence */
213         f = dma_resv_get_excl(resv);
214         r = amdgpu_sync_fence(sync, f);
215
216         flist = dma_resv_get_list(resv);
217         if (!flist || r)
218                 return r;
219
220         for (i = 0; i < flist->shared_count; ++i) {
221                 void *fence_owner;
222
223                 f = rcu_dereference_protected(flist->shared[i],
224                                               dma_resv_held(resv));
225
226                 fence_owner = amdgpu_sync_get_owner(f);
227
228                 /* Always sync to moves, no matter what */
229                 if (fence_owner == AMDGPU_FENCE_OWNER_UNDEFINED) {
230                         r = amdgpu_sync_fence(sync, f);
231                         if (r)
232                                 break;
233                 }
234
235                 /* We only want to trigger KFD eviction fences on
236                  * evict or move jobs. Skip KFD fences otherwise.
237                  */
238                 if (fence_owner == AMDGPU_FENCE_OWNER_KFD &&
239                     owner != AMDGPU_FENCE_OWNER_UNDEFINED)
240                         continue;
241
242                 /* Never sync to VM updates either. */
243                 if (fence_owner == AMDGPU_FENCE_OWNER_VM &&
244                     owner != AMDGPU_FENCE_OWNER_UNDEFINED)
245                         continue;
246
247                 /* Ignore fences depending on the sync mode */
248                 switch (mode) {
249                 case AMDGPU_SYNC_ALWAYS:
250                         break;
251
252                 case AMDGPU_SYNC_NE_OWNER:
253                         if (amdgpu_sync_same_dev(adev, f) &&
254                             fence_owner == owner)
255                                 continue;
256                         break;
257
258                 case AMDGPU_SYNC_EQ_OWNER:
259                         if (amdgpu_sync_same_dev(adev, f) &&
260                             fence_owner != owner)
261                                 continue;
262                         break;
263
264                 case AMDGPU_SYNC_EXPLICIT:
265                         continue;
266                 }
267
268                 WARN(debug_evictions && fence_owner == AMDGPU_FENCE_OWNER_KFD,
269                      "Adding eviction fence to sync obj");
270                 r = amdgpu_sync_fence(sync, f);
271                 if (r)
272                         break;
273         }
274         return r;
275 }
276
277 /**
278  * amdgpu_sync_peek_fence - get the next fence not signaled yet
279  *
280  * @sync: the sync object
281  * @ring: optional ring to use for test
282  *
283  * Returns the next fence not signaled yet without removing it from the sync
284  * object.
285  */
286 struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
287                                          struct amdgpu_ring *ring)
288 {
289         struct amdgpu_sync_entry *e;
290         struct hlist_node *tmp;
291         int i;
292
293         hash_for_each_safe(sync->fences, i, tmp, e, node) {
294                 struct dma_fence *f = e->fence;
295                 struct drm_sched_fence *s_fence = to_drm_sched_fence(f);
296
297                 if (dma_fence_is_signaled(f)) {
298                         hash_del(&e->node);
299                         dma_fence_put(f);
300                         kmem_cache_free(amdgpu_sync_slab, e);
301                         continue;
302                 }
303                 if (ring && s_fence) {
304                         /* For fences from the same ring it is sufficient
305                          * when they are scheduled.
306                          */
307                         if (s_fence->sched == &ring->sched) {
308                                 if (dma_fence_is_signaled(&s_fence->scheduled))
309                                         continue;
310
311                                 return &s_fence->scheduled;
312                         }
313                 }
314
315                 return f;
316         }
317
318         return NULL;
319 }
320
321 /**
322  * amdgpu_sync_get_fence - get the next fence from the sync object
323  *
324  * @sync: sync object to use
325  *
326  * Get and removes the next fence from the sync object not signaled yet.
327  */
328 struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync)
329 {
330         struct amdgpu_sync_entry *e;
331         struct hlist_node *tmp;
332         struct dma_fence *f;
333         int i;
334         hash_for_each_safe(sync->fences, i, tmp, e, node) {
335
336                 f = e->fence;
337
338                 hash_del(&e->node);
339                 kmem_cache_free(amdgpu_sync_slab, e);
340
341                 if (!dma_fence_is_signaled(f))
342                         return f;
343
344                 dma_fence_put(f);
345         }
346         return NULL;
347 }
348
349 /**
350  * amdgpu_sync_clone - clone a sync object
351  *
352  * @source: sync object to clone
353  * @clone: pointer to destination sync object
354  *
355  * Adds references to all unsignaled fences in @source to @clone. Also
356  * removes signaled fences from @source while at it.
357  */
358 int amdgpu_sync_clone(struct amdgpu_sync *source, struct amdgpu_sync *clone)
359 {
360         struct amdgpu_sync_entry *e;
361         struct hlist_node *tmp;
362         struct dma_fence *f;
363         int i, r;
364
365         hash_for_each_safe(source->fences, i, tmp, e, node) {
366                 f = e->fence;
367                 if (!dma_fence_is_signaled(f)) {
368                         r = amdgpu_sync_fence(clone, f);
369                         if (r)
370                                 return r;
371                 } else {
372                         hash_del(&e->node);
373                         dma_fence_put(f);
374                         kmem_cache_free(amdgpu_sync_slab, e);
375                 }
376         }
377
378         dma_fence_put(clone->last_vm_update);
379         clone->last_vm_update = dma_fence_get(source->last_vm_update);
380
381         return 0;
382 }
383
384 int amdgpu_sync_wait(struct amdgpu_sync *sync, bool intr)
385 {
386         struct amdgpu_sync_entry *e;
387         struct hlist_node *tmp;
388         int i, r;
389
390         hash_for_each_safe(sync->fences, i, tmp, e, node) {
391                 r = dma_fence_wait(e->fence, intr);
392                 if (r)
393                         return r;
394
395                 hash_del(&e->node);
396                 dma_fence_put(e->fence);
397                 kmem_cache_free(amdgpu_sync_slab, e);
398         }
399
400         return 0;
401 }
402
403 /**
404  * amdgpu_sync_free - free the sync object
405  *
406  * @sync: sync object to use
407  *
408  * Free the sync object.
409  */
410 void amdgpu_sync_free(struct amdgpu_sync *sync)
411 {
412         struct amdgpu_sync_entry *e;
413         struct hlist_node *tmp;
414         unsigned i;
415
416         hash_for_each_safe(sync->fences, i, tmp, e, node) {
417                 hash_del(&e->node);
418                 dma_fence_put(e->fence);
419                 kmem_cache_free(amdgpu_sync_slab, e);
420         }
421
422         dma_fence_put(sync->last_vm_update);
423 }
424
425 /**
426  * amdgpu_sync_init - init sync object subsystem
427  *
428  * Allocate the slab allocator.
429  */
430 int amdgpu_sync_init(void)
431 {
432         amdgpu_sync_slab = kmem_cache_create(
433                 "amdgpu_sync", sizeof(struct amdgpu_sync_entry), 0,
434                 SLAB_HWCACHE_ALIGN, NULL);
435         if (!amdgpu_sync_slab)
436                 return -ENOMEM;
437
438         return 0;
439 }
440
441 /**
442  * amdgpu_sync_fini - fini sync object subsystem
443  *
444  * Free the slab allocator.
445  */
446 void amdgpu_sync_fini(void)
447 {
448         kmem_cache_destroy(amdgpu_sync_slab);
449 }