1 // SPDX-License-Identifier: MIT
3 * Copyright (C) 2012-2014 Canonical Ltd (Maarten Lankhorst)
5 * Based on bo.c which bears the following copyright notice,
6 * but is dual licensed:
8 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
11 * Permission is hereby granted, free of charge, to any person obtaining a
12 * copy of this software and associated documentation files (the
13 * "Software"), to deal in the Software without restriction, including
14 * without limitation the rights to use, copy, modify, merge, publish,
15 * distribute, sub license, and/or sell copies of the Software, and to
16 * permit persons to whom the Software is furnished to do so, subject to
17 * the following conditions:
19 * The above copyright notice and this permission notice (including the
20 * next paragraph) shall be included in all copies or substantial portions
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
24 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
26 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
27 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
28 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
29 * USE OR OTHER DEALINGS IN THE SOFTWARE.
31 **************************************************************************/
33 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
36 #include <linux/dma-resv.h>
37 #include <linux/export.h>
39 #include <linux/sched/mm.h>
40 #include <linux/mmu_notifier.h>
43 * DOC: Reservation Object Overview
45 * The reservation object provides a mechanism to manage shared and
46 * exclusive fences associated with a buffer. A reservation object
47 * can have attached one exclusive fence (normally associated with
48 * write operations) or N shared fences (read operations). The RCU
49 * mechanism is used to protect read access to fences from locked
53 DEFINE_WD_CLASS(reservation_ww_class);
54 EXPORT_SYMBOL(reservation_ww_class);
57 * dma_resv_list_alloc - allocate fence list
58 * @shared_max: number of fences we need space for
60 * Allocate a new dma_resv_list and make sure to correctly initialize
63 static struct dma_resv_list *dma_resv_list_alloc(unsigned int shared_max)
65 struct dma_resv_list *list;
67 list = kmalloc(struct_size(list, shared, shared_max), GFP_KERNEL);
71 list->shared_max = (ksize(list) - offsetof(typeof(*list), shared)) /
72 sizeof(*list->shared);
78 * dma_resv_list_free - free fence list
81 * Free a dma_resv_list and make sure to drop all references.
83 static void dma_resv_list_free(struct dma_resv_list *list)
90 for (i = 0; i < list->shared_count; ++i)
91 dma_fence_put(rcu_dereference_protected(list->shared[i], true));
97 * dma_resv_init - initialize a reservation object
98 * @obj: the reservation object
100 void dma_resv_init(struct dma_resv *obj)
102 ww_mutex_init(&obj->lock, &reservation_ww_class);
103 seqcount_ww_mutex_init(&obj->seq, &obj->lock);
105 RCU_INIT_POINTER(obj->fence, NULL);
106 RCU_INIT_POINTER(obj->fence_excl, NULL);
108 EXPORT_SYMBOL(dma_resv_init);
111 * dma_resv_fini - destroys a reservation object
112 * @obj: the reservation object
114 void dma_resv_fini(struct dma_resv *obj)
116 struct dma_resv_list *fobj;
117 struct dma_fence *excl;
120 * This object should be dead and all references must have
121 * been released to it, so no need to be protected with rcu.
123 excl = rcu_dereference_protected(obj->fence_excl, 1);
127 fobj = rcu_dereference_protected(obj->fence, 1);
128 dma_resv_list_free(fobj);
129 ww_mutex_destroy(&obj->lock);
131 EXPORT_SYMBOL(dma_resv_fini);
134 * dma_resv_reserve_shared - Reserve space to add shared fences to
136 * @obj: reservation object
137 * @num_fences: number of fences we want to add
139 * Should be called before dma_resv_add_shared_fence(). Must
140 * be called with obj->lock held.
143 * Zero for success, or -errno
145 int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences)
147 struct dma_resv_list *old, *new;
148 unsigned int i, j, k, max;
150 dma_resv_assert_held(obj);
152 old = dma_resv_shared_list(obj);
153 if (old && old->shared_max) {
154 if ((old->shared_count + num_fences) <= old->shared_max)
156 max = max(old->shared_count + num_fences, old->shared_max * 2);
158 max = max(4ul, roundup_pow_of_two(num_fences));
161 new = dma_resv_list_alloc(max);
166 * no need to bump fence refcounts, rcu_read access
167 * requires the use of kref_get_unless_zero, and the
168 * references from the old struct are carried over to
171 for (i = 0, j = 0, k = max; i < (old ? old->shared_count : 0); ++i) {
172 struct dma_fence *fence;
174 fence = rcu_dereference_protected(old->shared[i],
176 if (dma_fence_is_signaled(fence))
177 RCU_INIT_POINTER(new->shared[--k], fence);
179 RCU_INIT_POINTER(new->shared[j++], fence);
181 new->shared_count = j;
184 * We are not changing the effective set of fences here so can
185 * merely update the pointer to the new array; both existing
186 * readers and new readers will see exactly the same set of
187 * active (unsignaled) shared fences. Individual fences and the
188 * old array are protected by RCU and so will not vanish under
189 * the gaze of the rcu_read_lock() readers.
191 rcu_assign_pointer(obj->fence, new);
196 /* Drop the references to the signaled fences */
197 for (i = k; i < max; ++i) {
198 struct dma_fence *fence;
200 fence = rcu_dereference_protected(new->shared[i],
202 dma_fence_put(fence);
208 EXPORT_SYMBOL(dma_resv_reserve_shared);
210 #ifdef CONFIG_DEBUG_MUTEXES
212 * dma_resv_reset_shared_max - reset shared fences for debugging
213 * @obj: the dma_resv object to reset
215 * Reset the number of pre-reserved shared slots to test that drivers do
216 * correct slot allocation using dma_resv_reserve_shared(). See also
217 * &dma_resv_list.shared_max.
219 void dma_resv_reset_shared_max(struct dma_resv *obj)
221 struct dma_resv_list *fences = dma_resv_shared_list(obj);
223 dma_resv_assert_held(obj);
225 /* Test shared fence slot reservation */
227 fences->shared_max = fences->shared_count;
229 EXPORT_SYMBOL(dma_resv_reset_shared_max);
233 * dma_resv_add_shared_fence - Add a fence to a shared slot
234 * @obj: the reservation object
235 * @fence: the shared fence to add
237 * Add a fence to a shared slot, obj->lock must be held, and
238 * dma_resv_reserve_shared() has been called.
240 void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence)
242 struct dma_resv_list *fobj;
243 struct dma_fence *old;
244 unsigned int i, count;
246 dma_fence_get(fence);
248 dma_resv_assert_held(obj);
250 fobj = dma_resv_shared_list(obj);
251 count = fobj->shared_count;
253 write_seqcount_begin(&obj->seq);
255 for (i = 0; i < count; ++i) {
257 old = rcu_dereference_protected(fobj->shared[i],
259 if (old->context == fence->context ||
260 dma_fence_is_signaled(old))
264 BUG_ON(fobj->shared_count >= fobj->shared_max);
269 RCU_INIT_POINTER(fobj->shared[i], fence);
270 /* pointer update must be visible before we extend the shared_count */
271 smp_store_mb(fobj->shared_count, count);
273 write_seqcount_end(&obj->seq);
276 EXPORT_SYMBOL(dma_resv_add_shared_fence);
279 * dma_resv_add_excl_fence - Add an exclusive fence.
280 * @obj: the reservation object
281 * @fence: the shared fence to add
283 * Add a fence to the exclusive slot. The obj->lock must be held.
285 void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence)
287 struct dma_fence *old_fence = dma_resv_excl_fence(obj);
288 struct dma_resv_list *old;
291 dma_resv_assert_held(obj);
293 old = dma_resv_shared_list(obj);
295 i = old->shared_count;
298 dma_fence_get(fence);
300 write_seqcount_begin(&obj->seq);
301 /* write_seqcount_begin provides the necessary memory barrier */
302 RCU_INIT_POINTER(obj->fence_excl, fence);
304 old->shared_count = 0;
305 write_seqcount_end(&obj->seq);
307 /* inplace update, no shared fences */
309 dma_fence_put(rcu_dereference_protected(old->shared[i],
310 dma_resv_held(obj)));
312 dma_fence_put(old_fence);
314 EXPORT_SYMBOL(dma_resv_add_excl_fence);
317 * dma_resv_copy_fences - Copy all fences from src to dst.
318 * @dst: the destination reservation object
319 * @src: the source reservation object
321 * Copy all fences from src to dst. dst-lock must be held.
323 int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
325 struct dma_resv_list *src_list, *dst_list;
326 struct dma_fence *old, *new;
329 dma_resv_assert_held(dst);
332 src_list = dma_resv_shared_list(src);
336 unsigned int shared_count = src_list->shared_count;
340 dst_list = dma_resv_list_alloc(shared_count);
345 src_list = dma_resv_shared_list(src);
346 if (!src_list || src_list->shared_count > shared_count) {
351 dst_list->shared_count = 0;
352 for (i = 0; i < src_list->shared_count; ++i) {
353 struct dma_fence __rcu **dst;
354 struct dma_fence *fence;
356 fence = rcu_dereference(src_list->shared[i]);
357 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
361 if (!dma_fence_get_rcu(fence)) {
362 dma_resv_list_free(dst_list);
363 src_list = dma_resv_shared_list(src);
367 if (dma_fence_is_signaled(fence)) {
368 dma_fence_put(fence);
372 dst = &dst_list->shared[dst_list->shared_count++];
373 rcu_assign_pointer(*dst, fence);
379 new = dma_fence_get_rcu_safe(&src->fence_excl);
382 src_list = dma_resv_shared_list(dst);
383 old = dma_resv_excl_fence(dst);
385 write_seqcount_begin(&dst->seq);
386 /* write_seqcount_begin provides the necessary memory barrier */
387 RCU_INIT_POINTER(dst->fence_excl, new);
388 RCU_INIT_POINTER(dst->fence, dst_list);
389 write_seqcount_end(&dst->seq);
391 dma_resv_list_free(src_list);
396 EXPORT_SYMBOL(dma_resv_copy_fences);
399 * dma_resv_get_fences - Get an object's shared and exclusive
400 * fences without update side lock held
401 * @obj: the reservation object
402 * @pfence_excl: the returned exclusive fence (or NULL)
403 * @pshared_count: the number of shared fences returned
404 * @pshared: the array of shared fence ptrs returned (array is krealloc'd to
405 * the required size, and must be freed by caller)
407 * Retrieve all fences from the reservation object. If the pointer for the
408 * exclusive fence is not specified the fence is put into the array of the
409 * shared fences as well. Returns either zero or -ENOMEM.
411 int dma_resv_get_fences(struct dma_resv *obj, struct dma_fence **pfence_excl,
412 unsigned int *pshared_count,
413 struct dma_fence ***pshared)
415 struct dma_fence **shared = NULL;
416 struct dma_fence *fence_excl;
417 unsigned int shared_count;
421 struct dma_resv_list *fobj;
425 shared_count = i = 0;
428 seq = read_seqcount_begin(&obj->seq);
430 fence_excl = dma_resv_excl_fence(obj);
431 if (fence_excl && !dma_fence_get_rcu(fence_excl))
434 fobj = dma_resv_shared_list(obj);
436 sz += sizeof(*shared) * fobj->shared_max;
438 if (!pfence_excl && fence_excl)
439 sz += sizeof(*shared);
442 struct dma_fence **nshared;
444 nshared = krealloc(shared, sz,
445 GFP_NOWAIT | __GFP_NOWARN);
449 dma_fence_put(fence_excl);
452 nshared = krealloc(shared, sz, GFP_KERNEL);
462 shared_count = fobj ? fobj->shared_count : 0;
463 for (i = 0; i < shared_count; ++i) {
464 shared[i] = rcu_dereference(fobj->shared[i]);
465 if (!dma_fence_get_rcu(shared[i]))
470 if (i != shared_count || read_seqcount_retry(&obj->seq, seq)) {
472 dma_fence_put(shared[i]);
473 dma_fence_put(fence_excl);
483 *pfence_excl = fence_excl;
485 shared[shared_count++] = fence_excl;
492 *pshared_count = shared_count;
496 EXPORT_SYMBOL_GPL(dma_resv_get_fences);
499 * dma_resv_wait_timeout - Wait on reservation's objects
500 * shared and/or exclusive fences.
501 * @obj: the reservation object
502 * @wait_all: if true, wait on all fences, else wait on just exclusive fence
503 * @intr: if true, do interruptible wait
504 * @timeout: timeout value in jiffies or zero to return immediately
506 * Callers are not required to hold specific locks, but maybe hold
507 * dma_resv_lock() already
509 * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or
510 * greater than zer on success.
512 long dma_resv_wait_timeout(struct dma_resv *obj, bool wait_all, bool intr,
513 unsigned long timeout)
515 long ret = timeout ? timeout : 1;
516 unsigned int seq, shared_count;
517 struct dma_fence *fence;
522 seq = read_seqcount_begin(&obj->seq);
526 fence = dma_resv_excl_fence(obj);
527 if (fence && !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
528 if (!dma_fence_get_rcu(fence))
531 if (dma_fence_is_signaled(fence)) {
532 dma_fence_put(fence);
541 struct dma_resv_list *fobj = dma_resv_shared_list(obj);
544 shared_count = fobj->shared_count;
546 for (i = 0; !fence && i < shared_count; ++i) {
547 struct dma_fence *lfence;
549 lfence = rcu_dereference(fobj->shared[i]);
550 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
554 if (!dma_fence_get_rcu(lfence))
557 if (dma_fence_is_signaled(lfence)) {
558 dma_fence_put(lfence);
569 if (read_seqcount_retry(&obj->seq, seq)) {
570 dma_fence_put(fence);
574 ret = dma_fence_wait_timeout(fence, intr, ret);
575 dma_fence_put(fence);
576 if (ret > 0 && wait_all && (i + 1 < shared_count))
585 EXPORT_SYMBOL_GPL(dma_resv_wait_timeout);
588 static inline int dma_resv_test_signaled_single(struct dma_fence *passed_fence)
590 struct dma_fence *fence, *lfence = passed_fence;
593 if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &lfence->flags)) {
594 fence = dma_fence_get_rcu(lfence);
598 ret = !!dma_fence_is_signaled(fence);
599 dma_fence_put(fence);
605 * dma_resv_test_signaled - Test if a reservation object's fences have been
607 * @obj: the reservation object
608 * @test_all: if true, test all fences, otherwise only test the exclusive
611 * Callers are not required to hold specific locks, but maybe hold
612 * dma_resv_lock() already
614 * true if all fences signaled, else false
616 bool dma_resv_test_signaled(struct dma_resv *obj, bool test_all)
618 struct dma_fence *fence;
625 seq = read_seqcount_begin(&obj->seq);
628 struct dma_resv_list *fobj = dma_resv_shared_list(obj);
629 unsigned int i, shared_count;
631 shared_count = fobj ? fobj->shared_count : 0;
632 for (i = 0; i < shared_count; ++i) {
633 fence = rcu_dereference(fobj->shared[i]);
634 ret = dma_resv_test_signaled_single(fence);
642 fence = dma_resv_excl_fence(obj);
644 ret = dma_resv_test_signaled_single(fence);
650 if (read_seqcount_retry(&obj->seq, seq))
656 EXPORT_SYMBOL_GPL(dma_resv_test_signaled);
658 #if IS_ENABLED(CONFIG_LOCKDEP)
659 static int __init dma_resv_lockdep(void)
661 struct mm_struct *mm = mm_alloc();
662 struct ww_acquire_ctx ctx;
664 struct address_space mapping;
671 address_space_init_once(&mapping);
674 ww_acquire_init(&ctx, &reservation_ww_class);
675 ret = dma_resv_lock(&obj, &ctx);
677 dma_resv_lock_slow(&obj, &ctx);
678 fs_reclaim_acquire(GFP_KERNEL);
679 /* for unmap_mapping_range on trylocked buffer objects in shrinkers */
680 i_mmap_lock_write(&mapping);
681 i_mmap_unlock_write(&mapping);
682 #ifdef CONFIG_MMU_NOTIFIER
683 lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
684 __dma_fence_might_wait();
685 lock_map_release(&__mmu_notifier_invalidate_range_start_map);
687 __dma_fence_might_wait();
689 fs_reclaim_release(GFP_KERNEL);
690 ww_mutex_unlock(&obj.lock);
691 ww_acquire_fini(&ctx);
692 mmap_read_unlock(mm);
698 subsys_initcall(dma_resv_lockdep);