1 /* SPDX-License-Identifier: GPL-2.0 OR MIT */
2 /**************************************************************************
4 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
27 **************************************************************************/
29 #include <drm/ttm/ttm_execbuf_util.h>
30 #include <drm/ttm/ttm_bo_driver.h>
31 #include <drm/ttm/ttm_placement.h>
32 #include <linux/wait.h>
33 #include <linux/sched.h>
34 #include <linux/module.h>
36 static void ttm_eu_backoff_reservation_reverse(struct list_head *list,
37 struct ttm_validate_buffer *entry)
39 list_for_each_entry_continue_reverse(entry, list, head) {
40 struct ttm_buffer_object *bo = entry->bo;
42 reservation_object_unlock(bo->resv);
46 static void ttm_eu_del_from_lru_locked(struct list_head *list)
48 struct ttm_validate_buffer *entry;
50 list_for_each_entry(entry, list, head) {
51 struct ttm_buffer_object *bo = entry->bo;
52 ttm_bo_del_from_lru(bo);
56 void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
57 struct list_head *list)
59 struct ttm_validate_buffer *entry;
60 struct ttm_bo_global *glob;
65 entry = list_first_entry(list, struct ttm_validate_buffer, head);
66 glob = entry->bo->bdev->glob;
68 spin_lock(&glob->lru_lock);
69 list_for_each_entry(entry, list, head) {
70 struct ttm_buffer_object *bo = entry->bo;
72 ttm_bo_add_to_lru(bo);
73 reservation_object_unlock(bo->resv);
75 spin_unlock(&glob->lru_lock);
78 ww_acquire_fini(ticket);
80 EXPORT_SYMBOL(ttm_eu_backoff_reservation);
83 * Reserve buffers for validation.
85 * If a buffer in the list is marked for CPU access, we back off and
86 * wait for that buffer to become free for GPU access.
88 * If a buffer is reserved for another validation, the validator with
89 * the highest validation sequence backs off and waits for that buffer
90 * to become unreserved. This prevents deadlocks when validating multiple
91 * buffers in different orders.
94 int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
95 struct list_head *list, bool intr,
96 struct list_head *dups)
98 struct ttm_bo_global *glob;
99 struct ttm_validate_buffer *entry;
102 if (list_empty(list))
105 entry = list_first_entry(list, struct ttm_validate_buffer, head);
106 glob = entry->bo->bdev->glob;
109 ww_acquire_init(ticket, &reservation_ww_class);
111 list_for_each_entry(entry, list, head) {
112 struct ttm_buffer_object *bo = entry->bo;
114 ret = __ttm_bo_reserve(bo, intr, (ticket == NULL), ticket);
115 if (!ret && unlikely(atomic_read(&bo->cpu_writers) > 0)) {
116 reservation_object_unlock(bo->resv);
120 } else if (ret == -EALREADY && dups) {
121 struct ttm_validate_buffer *safe = entry;
122 entry = list_prev_entry(entry, head);
123 list_del(&safe->head);
124 list_add(&safe->head, dups);
129 if (!entry->num_shared)
132 ret = reservation_object_reserve_shared(bo->resv,
138 /* uh oh, we lost out, drop every reservation and try
139 * to only reserve this buffer, then start over if
142 ttm_eu_backoff_reservation_reverse(list, entry);
144 if (ret == -EDEADLK) {
146 ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock,
149 ww_mutex_lock_slow(&bo->resv->lock, ticket);
154 if (!ret && entry->num_shared)
155 ret = reservation_object_reserve_shared(bo->resv,
158 if (unlikely(ret != 0)) {
162 ww_acquire_done(ticket);
163 ww_acquire_fini(ticket);
168 /* move this item to the front of the list,
169 * forces correct iteration of the loop without keeping track
171 list_del(&entry->head);
172 list_add(&entry->head, list);
176 ww_acquire_done(ticket);
177 spin_lock(&glob->lru_lock);
178 ttm_eu_del_from_lru_locked(list);
179 spin_unlock(&glob->lru_lock);
182 EXPORT_SYMBOL(ttm_eu_reserve_buffers);
184 void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
185 struct list_head *list,
186 struct dma_fence *fence)
188 struct ttm_validate_buffer *entry;
189 struct ttm_buffer_object *bo;
190 struct ttm_bo_global *glob;
191 struct ttm_bo_device *bdev;
193 if (list_empty(list))
196 bo = list_first_entry(list, struct ttm_validate_buffer, head)->bo;
198 glob = bo->bdev->glob;
200 spin_lock(&glob->lru_lock);
202 list_for_each_entry(entry, list, head) {
204 if (entry->num_shared)
205 reservation_object_add_shared_fence(bo->resv, fence);
207 reservation_object_add_excl_fence(bo->resv, fence);
208 ttm_bo_add_to_lru(bo);
209 reservation_object_unlock(bo->resv);
211 spin_unlock(&glob->lru_lock);
213 ww_acquire_fini(ticket);
215 EXPORT_SYMBOL(ttm_eu_fence_buffer_objects);