1 /**************************************************************************
3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 #include <drm/ttm/ttm_execbuf_util.h>
29 #include <drm/ttm/ttm_bo_driver.h>
30 #include <drm/ttm/ttm_placement.h>
31 #include <linux/wait.h>
32 #include <linux/sched.h>
33 #include <linux/module.h>
35 static void ttm_eu_backoff_reservation_locked(struct list_head *list)
37 struct ttm_validate_buffer *entry;
39 list_for_each_entry(entry, list, head) {
40 struct ttm_buffer_object *bo = entry->bo;
45 ttm_bo_add_to_lru(bo);
46 entry->removed = false;
49 entry->reserved = false;
50 atomic_set(&bo->reserved, 0);
51 wake_up_all(&bo->event_queue);
55 static void ttm_eu_del_from_lru_locked(struct list_head *list)
57 struct ttm_validate_buffer *entry;
59 list_for_each_entry(entry, list, head) {
60 struct ttm_buffer_object *bo = entry->bo;
64 if (!entry->removed) {
65 entry->put_count = ttm_bo_del_from_lru(bo);
66 entry->removed = true;
71 static void ttm_eu_list_ref_sub(struct list_head *list)
73 struct ttm_validate_buffer *entry;
75 list_for_each_entry(entry, list, head) {
76 struct ttm_buffer_object *bo = entry->bo;
78 if (entry->put_count) {
79 ttm_bo_list_ref_sub(bo, entry->put_count, true);
85 void ttm_eu_backoff_reservation(struct list_head *list)
87 struct ttm_validate_buffer *entry;
88 struct ttm_bo_global *glob;
93 entry = list_first_entry(list, struct ttm_validate_buffer, head);
94 glob = entry->bo->glob;
95 spin_lock(&glob->lru_lock);
96 ttm_eu_backoff_reservation_locked(list);
97 spin_unlock(&glob->lru_lock);
99 EXPORT_SYMBOL(ttm_eu_backoff_reservation);
102 * Reserve buffers for validation.
104 * If a buffer in the list is marked for CPU access, we back off and
105 * wait for that buffer to become free for GPU access.
107 * If a buffer is reserved for another validation, the validator with
108 * the highest validation sequence backs off and waits for that buffer
109 * to become unreserved. This prevents deadlocks when validating multiple
110 * buffers in different orders.
113 int ttm_eu_reserve_buffers(struct list_head *list)
115 struct ttm_bo_global *glob;
116 struct ttm_validate_buffer *entry;
120 if (list_empty(list))
123 list_for_each_entry(entry, list, head) {
124 entry->reserved = false;
125 entry->put_count = 0;
126 entry->removed = false;
129 entry = list_first_entry(list, struct ttm_validate_buffer, head);
130 glob = entry->bo->glob;
132 spin_lock(&glob->lru_lock);
133 val_seq = entry->bo->bdev->val_seq++;
136 list_for_each_entry(entry, list, head) {
137 struct ttm_buffer_object *bo = entry->bo;
139 /* already slowpath reserved? */
143 ret = ttm_bo_reserve_nolru(bo, true, true, true, val_seq);
148 ttm_eu_del_from_lru_locked(list);
149 spin_unlock(&glob->lru_lock);
150 ret = ttm_bo_reserve_nolru(bo, true, false,
152 spin_lock(&glob->lru_lock);
156 if (unlikely(ret != -EAGAIN))
161 ttm_eu_backoff_reservation_locked(list);
164 * temporarily increase sequence number every retry,
165 * to prevent us from seeing our old reservation
166 * sequence when someone else reserved the buffer,
167 * but hasn't updated the seq_valid/seqno members yet.
169 val_seq = entry->bo->bdev->val_seq++;
171 spin_unlock(&glob->lru_lock);
172 ttm_eu_list_ref_sub(list);
173 ret = ttm_bo_reserve_slowpath_nolru(bo, true, val_seq);
174 if (unlikely(ret != 0))
176 spin_lock(&glob->lru_lock);
177 entry->reserved = true;
178 if (unlikely(atomic_read(&bo->cpu_writers) > 0)) {
187 entry->reserved = true;
188 if (unlikely(atomic_read(&bo->cpu_writers) > 0)) {
194 ttm_eu_del_from_lru_locked(list);
195 spin_unlock(&glob->lru_lock);
196 ttm_eu_list_ref_sub(list);
201 ttm_eu_backoff_reservation_locked(list);
202 spin_unlock(&glob->lru_lock);
203 ttm_eu_list_ref_sub(list);
206 EXPORT_SYMBOL(ttm_eu_reserve_buffers);
208 void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj)
210 struct ttm_validate_buffer *entry;
211 struct ttm_buffer_object *bo;
212 struct ttm_bo_global *glob;
213 struct ttm_bo_device *bdev;
214 struct ttm_bo_driver *driver;
216 if (list_empty(list))
219 bo = list_first_entry(list, struct ttm_validate_buffer, head)->bo;
221 driver = bdev->driver;
224 spin_lock(&glob->lru_lock);
225 spin_lock(&bdev->fence_lock);
227 list_for_each_entry(entry, list, head) {
229 entry->old_sync_obj = bo->sync_obj;
230 bo->sync_obj = driver->sync_obj_ref(sync_obj);
231 ttm_bo_unreserve_locked(bo);
232 entry->reserved = false;
234 spin_unlock(&bdev->fence_lock);
235 spin_unlock(&glob->lru_lock);
237 list_for_each_entry(entry, list, head) {
238 if (entry->old_sync_obj)
239 driver->sync_obj_unref(&entry->old_sync_obj);
242 EXPORT_SYMBOL(ttm_eu_fence_buffer_objects);