Merge remote-tracking branch 'spi/for-5.14' into spi-linus
[linux-2.6-microblaze.git] / drivers / gpu / drm / ttm / ttm_execbuf_util.c
1 /* SPDX-License-Identifier: GPL-2.0 OR MIT */
2 /**************************************************************************
3  *
4  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25  * USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  **************************************************************************/
28
29 #include <drm/ttm/ttm_execbuf_util.h>
30 #include <drm/ttm/ttm_bo_driver.h>
31 #include <drm/ttm/ttm_placement.h>
32 #include <linux/wait.h>
33 #include <linux/sched.h>
34 #include <linux/module.h>
35
36 static void ttm_eu_backoff_reservation_reverse(struct list_head *list,
37                                               struct ttm_validate_buffer *entry)
38 {
39         list_for_each_entry_continue_reverse(entry, list, head) {
40                 struct ttm_buffer_object *bo = entry->bo;
41
42                 dma_resv_unlock(bo->base.resv);
43         }
44 }
45
46 void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
47                                 struct list_head *list)
48 {
49         struct ttm_validate_buffer *entry;
50
51         if (list_empty(list))
52                 return;
53
54         list_for_each_entry(entry, list, head) {
55                 struct ttm_buffer_object *bo = entry->bo;
56
57                 ttm_bo_move_to_lru_tail_unlocked(bo);
58                 dma_resv_unlock(bo->base.resv);
59         }
60
61         if (ticket)
62                 ww_acquire_fini(ticket);
63 }
64 EXPORT_SYMBOL(ttm_eu_backoff_reservation);
65
66 /*
67  * Reserve buffers for validation.
68  *
69  * If a buffer in the list is marked for CPU access, we back off and
70  * wait for that buffer to become free for GPU access.
71  *
72  * If a buffer is reserved for another validation, the validator with
73  * the highest validation sequence backs off and waits for that buffer
74  * to become unreserved. This prevents deadlocks when validating multiple
75  * buffers in different orders.
76  */
77
78 int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
79                            struct list_head *list, bool intr,
80                            struct list_head *dups)
81 {
82         struct ttm_validate_buffer *entry;
83         int ret;
84
85         if (list_empty(list))
86                 return 0;
87
88         if (ticket)
89                 ww_acquire_init(ticket, &reservation_ww_class);
90
91         list_for_each_entry(entry, list, head) {
92                 struct ttm_buffer_object *bo = entry->bo;
93
94                 ret = ttm_bo_reserve(bo, intr, (ticket == NULL), ticket);
95                 if (ret == -EALREADY && dups) {
96                         struct ttm_validate_buffer *safe = entry;
97                         entry = list_prev_entry(entry, head);
98                         list_del(&safe->head);
99                         list_add(&safe->head, dups);
100                         continue;
101                 }
102
103                 if (!ret) {
104                         if (!entry->num_shared)
105                                 continue;
106
107                         ret = dma_resv_reserve_shared(bo->base.resv,
108                                                                 entry->num_shared);
109                         if (!ret)
110                                 continue;
111                 }
112
113                 /* uh oh, we lost out, drop every reservation and try
114                  * to only reserve this buffer, then start over if
115                  * this succeeds.
116                  */
117                 ttm_eu_backoff_reservation_reverse(list, entry);
118
119                 if (ret == -EDEADLK) {
120                         ret = ttm_bo_reserve_slowpath(bo, intr, ticket);
121                 }
122
123                 if (!ret && entry->num_shared)
124                         ret = dma_resv_reserve_shared(bo->base.resv,
125                                                                 entry->num_shared);
126
127                 if (unlikely(ret != 0)) {
128                         if (ticket) {
129                                 ww_acquire_done(ticket);
130                                 ww_acquire_fini(ticket);
131                         }
132                         return ret;
133                 }
134
135                 /* move this item to the front of the list,
136                  * forces correct iteration of the loop without keeping track
137                  */
138                 list_del(&entry->head);
139                 list_add(&entry->head, list);
140         }
141
142         return 0;
143 }
144 EXPORT_SYMBOL(ttm_eu_reserve_buffers);
145
146 void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
147                                  struct list_head *list,
148                                  struct dma_fence *fence)
149 {
150         struct ttm_validate_buffer *entry;
151
152         if (list_empty(list))
153                 return;
154
155         list_for_each_entry(entry, list, head) {
156                 struct ttm_buffer_object *bo = entry->bo;
157
158                 if (entry->num_shared)
159                         dma_resv_add_shared_fence(bo->base.resv, fence);
160                 else
161                         dma_resv_add_excl_fence(bo->base.resv, fence);
162                 ttm_bo_move_to_lru_tail_unlocked(bo);
163                 dma_resv_unlock(bo->base.resv);
164         }
165         if (ticket)
166                 ww_acquire_fini(ticket);
167 }
168 EXPORT_SYMBOL(ttm_eu_fence_buffer_objects);