2 * Copyright (C) 2012-2014 Canonical Ltd (Maarten Lankhorst)
4 * Based on bo.c which bears the following copyright notice,
5 * but is dual licensed:
7 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
10 * Permission is hereby granted, free of charge, to any person obtaining a
11 * copy of this software and associated documentation files (the
12 * "Software"), to deal in the Software without restriction, including
13 * without limitation the rights to use, copy, modify, merge, publish,
14 * distribute, sub license, and/or sell copies of the Software, and to
15 * permit persons to whom the Software is furnished to do so, subject to
16 * the following conditions:
18 * The above copyright notice and this permission notice (including the
19 * next paragraph) shall be included in all copies or substantial portions
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
25 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
26 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
27 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
28 * USE OR OTHER DEALINGS IN THE SOFTWARE.
30 **************************************************************************/
32 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
35 #include <linux/dma-resv.h>
36 #include <linux/export.h>
38 #include <linux/sched/mm.h>
39 #include <linux/mmu_notifier.h>
42 * DOC: Reservation Object Overview
44 * The reservation object provides a mechanism to manage shared and
45 * exclusive fences associated with a buffer. A reservation object
46 * can have attached one exclusive fence (normally associated with
47 * write operations) or N shared fences (read operations). The RCU
48 * mechanism is used to protect read access to fences from locked
52 DEFINE_WD_CLASS(reservation_ww_class);
53 EXPORT_SYMBOL(reservation_ww_class);
56 * dma_resv_list_alloc - allocate fence list
57 * @shared_max: number of fences we need space for
59 * Allocate a new dma_resv_list and make sure to correctly initialize
62 static struct dma_resv_list *dma_resv_list_alloc(unsigned int shared_max)
64 struct dma_resv_list *list;
66 list = kmalloc(offsetof(typeof(*list), shared[shared_max]), GFP_KERNEL);
70 list->shared_max = (ksize(list) - offsetof(typeof(*list), shared)) /
71 sizeof(*list->shared);
77 * dma_resv_list_free - free fence list
80 * Free a dma_resv_list and make sure to drop all references.
82 static void dma_resv_list_free(struct dma_resv_list *list)
89 for (i = 0; i < list->shared_count; ++i)
90 dma_fence_put(rcu_dereference_protected(list->shared[i], true));
95 #if IS_ENABLED(CONFIG_LOCKDEP)
96 static int __init dma_resv_lockdep(void)
98 struct mm_struct *mm = mm_alloc();
99 struct ww_acquire_ctx ctx;
109 ww_acquire_init(&ctx, &reservation_ww_class);
110 ret = dma_resv_lock(&obj, &ctx);
112 dma_resv_lock_slow(&obj, &ctx);
113 fs_reclaim_acquire(GFP_KERNEL);
114 #ifdef CONFIG_MMU_NOTIFIER
115 lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
116 __dma_fence_might_wait();
117 lock_map_release(&__mmu_notifier_invalidate_range_start_map);
119 __dma_fence_might_wait();
121 fs_reclaim_release(GFP_KERNEL);
122 ww_mutex_unlock(&obj.lock);
123 ww_acquire_fini(&ctx);
124 mmap_read_unlock(mm);
130 subsys_initcall(dma_resv_lockdep);
134 * dma_resv_init - initialize a reservation object
135 * @obj: the reservation object
137 void dma_resv_init(struct dma_resv *obj)
139 ww_mutex_init(&obj->lock, &reservation_ww_class);
140 seqcount_ww_mutex_init(&obj->seq, &obj->lock);
142 RCU_INIT_POINTER(obj->fence, NULL);
143 RCU_INIT_POINTER(obj->fence_excl, NULL);
145 EXPORT_SYMBOL(dma_resv_init);
148 * dma_resv_fini - destroys a reservation object
149 * @obj: the reservation object
151 void dma_resv_fini(struct dma_resv *obj)
153 struct dma_resv_list *fobj;
154 struct dma_fence *excl;
157 * This object should be dead and all references must have
158 * been released to it, so no need to be protected with rcu.
160 excl = rcu_dereference_protected(obj->fence_excl, 1);
164 fobj = rcu_dereference_protected(obj->fence, 1);
165 dma_resv_list_free(fobj);
166 ww_mutex_destroy(&obj->lock);
168 EXPORT_SYMBOL(dma_resv_fini);
171 * dma_resv_reserve_shared - Reserve space to add shared fences to
173 * @obj: reservation object
174 * @num_fences: number of fences we want to add
176 * Should be called before dma_resv_add_shared_fence(). Must
177 * be called with obj->lock held.
180 * Zero for success, or -errno
182 int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences)
184 struct dma_resv_list *old, *new;
185 unsigned int i, j, k, max;
187 dma_resv_assert_held(obj);
189 old = dma_resv_get_list(obj);
191 if (old && old->shared_max) {
192 if ((old->shared_count + num_fences) <= old->shared_max)
195 max = max(old->shared_count + num_fences,
196 old->shared_max * 2);
201 new = dma_resv_list_alloc(max);
206 * no need to bump fence refcounts, rcu_read access
207 * requires the use of kref_get_unless_zero, and the
208 * references from the old struct are carried over to
211 for (i = 0, j = 0, k = max; i < (old ? old->shared_count : 0); ++i) {
212 struct dma_fence *fence;
214 fence = rcu_dereference_protected(old->shared[i],
216 if (dma_fence_is_signaled(fence))
217 RCU_INIT_POINTER(new->shared[--k], fence);
219 RCU_INIT_POINTER(new->shared[j++], fence);
221 new->shared_count = j;
224 * We are not changing the effective set of fences here so can
225 * merely update the pointer to the new array; both existing
226 * readers and new readers will see exactly the same set of
227 * active (unsignaled) shared fences. Individual fences and the
228 * old array are protected by RCU and so will not vanish under
229 * the gaze of the rcu_read_lock() readers.
231 rcu_assign_pointer(obj->fence, new);
236 /* Drop the references to the signaled fences */
237 for (i = k; i < max; ++i) {
238 struct dma_fence *fence;
240 fence = rcu_dereference_protected(new->shared[i],
242 dma_fence_put(fence);
248 EXPORT_SYMBOL(dma_resv_reserve_shared);
251 * dma_resv_add_shared_fence - Add a fence to a shared slot
252 * @obj: the reservation object
253 * @fence: the shared fence to add
255 * Add a fence to a shared slot, obj->lock must be held, and
256 * dma_resv_reserve_shared() has been called.
258 void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence)
260 struct dma_resv_list *fobj;
261 struct dma_fence *old;
262 unsigned int i, count;
264 dma_fence_get(fence);
266 dma_resv_assert_held(obj);
268 fobj = dma_resv_get_list(obj);
269 count = fobj->shared_count;
271 write_seqcount_begin(&obj->seq);
273 for (i = 0; i < count; ++i) {
275 old = rcu_dereference_protected(fobj->shared[i],
277 if (old->context == fence->context ||
278 dma_fence_is_signaled(old))
282 BUG_ON(fobj->shared_count >= fobj->shared_max);
287 RCU_INIT_POINTER(fobj->shared[i], fence);
288 /* pointer update must be visible before we extend the shared_count */
289 smp_store_mb(fobj->shared_count, count);
291 write_seqcount_end(&obj->seq);
294 EXPORT_SYMBOL(dma_resv_add_shared_fence);
297 * dma_resv_add_excl_fence - Add an exclusive fence.
298 * @obj: the reservation object
299 * @fence: the shared fence to add
301 * Add a fence to the exclusive slot. The obj->lock must be held.
303 void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence)
305 struct dma_fence *old_fence = dma_resv_get_excl(obj);
306 struct dma_resv_list *old;
309 dma_resv_assert_held(obj);
311 old = dma_resv_get_list(obj);
313 i = old->shared_count;
316 dma_fence_get(fence);
318 write_seqcount_begin(&obj->seq);
319 /* write_seqcount_begin provides the necessary memory barrier */
320 RCU_INIT_POINTER(obj->fence_excl, fence);
322 old->shared_count = 0;
323 write_seqcount_end(&obj->seq);
325 /* inplace update, no shared fences */
327 dma_fence_put(rcu_dereference_protected(old->shared[i],
328 dma_resv_held(obj)));
330 dma_fence_put(old_fence);
332 EXPORT_SYMBOL(dma_resv_add_excl_fence);
335 * dma_resv_copy_fences - Copy all fences from src to dst.
336 * @dst: the destination reservation object
337 * @src: the source reservation object
339 * Copy all fences from src to dst. dst-lock must be held.
341 int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
343 struct dma_resv_list *src_list, *dst_list;
344 struct dma_fence *old, *new;
347 dma_resv_assert_held(dst);
350 src_list = rcu_dereference(src->fence);
354 unsigned shared_count = src_list->shared_count;
358 dst_list = dma_resv_list_alloc(shared_count);
363 src_list = rcu_dereference(src->fence);
364 if (!src_list || src_list->shared_count > shared_count) {
369 dst_list->shared_count = 0;
370 for (i = 0; i < src_list->shared_count; ++i) {
371 struct dma_fence *fence;
373 fence = rcu_dereference(src_list->shared[i]);
374 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
378 if (!dma_fence_get_rcu(fence)) {
379 dma_resv_list_free(dst_list);
380 src_list = rcu_dereference(src->fence);
384 if (dma_fence_is_signaled(fence)) {
385 dma_fence_put(fence);
389 rcu_assign_pointer(dst_list->shared[dst_list->shared_count++], fence);
395 new = dma_fence_get_rcu_safe(&src->fence_excl);
398 src_list = dma_resv_get_list(dst);
399 old = dma_resv_get_excl(dst);
401 write_seqcount_begin(&dst->seq);
402 /* write_seqcount_begin provides the necessary memory barrier */
403 RCU_INIT_POINTER(dst->fence_excl, new);
404 RCU_INIT_POINTER(dst->fence, dst_list);
405 write_seqcount_end(&dst->seq);
407 dma_resv_list_free(src_list);
412 EXPORT_SYMBOL(dma_resv_copy_fences);
415 * dma_resv_get_fences_rcu - Get an object's shared and exclusive
416 * fences without update side lock held
417 * @obj: the reservation object
418 * @pfence_excl: the returned exclusive fence (or NULL)
419 * @pshared_count: the number of shared fences returned
420 * @pshared: the array of shared fence ptrs returned (array is krealloc'd to
421 * the required size, and must be freed by caller)
423 * Retrieve all fences from the reservation object. If the pointer for the
424 * exclusive fence is not specified the fence is put into the array of the
425 * shared fences as well. Returns either zero or -ENOMEM.
427 int dma_resv_get_fences_rcu(struct dma_resv *obj,
428 struct dma_fence **pfence_excl,
429 unsigned *pshared_count,
430 struct dma_fence ***pshared)
432 struct dma_fence **shared = NULL;
433 struct dma_fence *fence_excl;
434 unsigned int shared_count;
438 struct dma_resv_list *fobj;
442 shared_count = i = 0;
445 seq = read_seqcount_begin(&obj->seq);
447 fence_excl = rcu_dereference(obj->fence_excl);
448 if (fence_excl && !dma_fence_get_rcu(fence_excl))
451 fobj = rcu_dereference(obj->fence);
453 sz += sizeof(*shared) * fobj->shared_max;
455 if (!pfence_excl && fence_excl)
456 sz += sizeof(*shared);
459 struct dma_fence **nshared;
461 nshared = krealloc(shared, sz,
462 GFP_NOWAIT | __GFP_NOWARN);
466 dma_fence_put(fence_excl);
469 nshared = krealloc(shared, sz, GFP_KERNEL);
479 shared_count = fobj ? fobj->shared_count : 0;
480 for (i = 0; i < shared_count; ++i) {
481 shared[i] = rcu_dereference(fobj->shared[i]);
482 if (!dma_fence_get_rcu(shared[i]))
487 if (i != shared_count || read_seqcount_retry(&obj->seq, seq)) {
489 dma_fence_put(shared[i]);
490 dma_fence_put(fence_excl);
500 *pfence_excl = fence_excl;
502 shared[shared_count++] = fence_excl;
509 *pshared_count = shared_count;
513 EXPORT_SYMBOL_GPL(dma_resv_get_fences_rcu);
516 * dma_resv_wait_timeout_rcu - Wait on reservation's objects
517 * shared and/or exclusive fences.
518 * @obj: the reservation object
519 * @wait_all: if true, wait on all fences, else wait on just exclusive fence
520 * @intr: if true, do interruptible wait
521 * @timeout: timeout value in jiffies or zero to return immediately
524 * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or
525 * greater than zer on success.
527 long dma_resv_wait_timeout_rcu(struct dma_resv *obj,
528 bool wait_all, bool intr,
529 unsigned long timeout)
531 struct dma_fence *fence;
532 unsigned seq, shared_count;
533 long ret = timeout ? timeout : 1;
538 seq = read_seqcount_begin(&obj->seq);
542 fence = rcu_dereference(obj->fence_excl);
543 if (fence && !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
544 if (!dma_fence_get_rcu(fence))
547 if (dma_fence_is_signaled(fence)) {
548 dma_fence_put(fence);
557 struct dma_resv_list *fobj = rcu_dereference(obj->fence);
560 shared_count = fobj->shared_count;
562 for (i = 0; !fence && i < shared_count; ++i) {
563 struct dma_fence *lfence = rcu_dereference(fobj->shared[i]);
565 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
569 if (!dma_fence_get_rcu(lfence))
572 if (dma_fence_is_signaled(lfence)) {
573 dma_fence_put(lfence);
584 if (read_seqcount_retry(&obj->seq, seq)) {
585 dma_fence_put(fence);
589 ret = dma_fence_wait_timeout(fence, intr, ret);
590 dma_fence_put(fence);
591 if (ret > 0 && wait_all && (i + 1 < shared_count))
600 EXPORT_SYMBOL_GPL(dma_resv_wait_timeout_rcu);
603 static inline int dma_resv_test_signaled_single(struct dma_fence *passed_fence)
605 struct dma_fence *fence, *lfence = passed_fence;
608 if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &lfence->flags)) {
609 fence = dma_fence_get_rcu(lfence);
613 ret = !!dma_fence_is_signaled(fence);
614 dma_fence_put(fence);
620 * dma_resv_test_signaled_rcu - Test if a reservation object's
621 * fences have been signaled.
622 * @obj: the reservation object
623 * @test_all: if true, test all fences, otherwise only test the exclusive
627 * true if all fences signaled, else false
629 bool dma_resv_test_signaled_rcu(struct dma_resv *obj, bool test_all)
631 unsigned seq, shared_count;
638 seq = read_seqcount_begin(&obj->seq);
643 struct dma_resv_list *fobj = rcu_dereference(obj->fence);
646 shared_count = fobj->shared_count;
648 for (i = 0; i < shared_count; ++i) {
649 struct dma_fence *fence = rcu_dereference(fobj->shared[i]);
651 ret = dma_resv_test_signaled_single(fence);
658 if (read_seqcount_retry(&obj->seq, seq))
663 struct dma_fence *fence_excl = rcu_dereference(obj->fence_excl);
666 ret = dma_resv_test_signaled_single(fence_excl);
670 if (read_seqcount_retry(&obj->seq, seq))
678 EXPORT_SYMBOL_GPL(dma_resv_test_signaled_rcu);