Merge tag 'char-misc-5.16-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregk...
[linux-2.6-microblaze.git] / drivers / dma-buf / dma-resv.c
index e744fd8..a480af9 100644 (file)
@@ -48,6 +48,8 @@
  * write operations) or N shared fences (read operations).  The RCU
  * mechanism is used to protect read access to fences from locked
  * write-side updates.
+ *
+ * See struct dma_resv for more details.
  */
 
 DEFINE_WD_CLASS(reservation_ww_class);
@@ -137,7 +139,11 @@ EXPORT_SYMBOL(dma_resv_fini);
  * @num_fences: number of fences we want to add
  *
  * Should be called before dma_resv_add_shared_fence().  Must
- * be called with obj->lock held.
+ * be called with @obj locked through dma_resv_lock().
+ *
+ * Note that the preallocated slots need to be re-reserved if @obj is unlocked
+ * at any time before calling dma_resv_add_shared_fence(). This is validated
+ * when CONFIG_DEBUG_MUTEXES is enabled.
  *
  * RETURNS
  * Zero for success, or -errno
@@ -234,8 +240,10 @@ EXPORT_SYMBOL(dma_resv_reset_shared_max);
  * @obj: the reservation object
  * @fence: the shared fence to add
  *
- * Add a fence to a shared slot, obj->lock must be held, and
+ * Add a fence to a shared slot, @obj must be locked with dma_resv_lock(), and
  * dma_resv_reserve_shared() has been called.
+ *
+ * See also &dma_resv.fence for a discussion of the semantics.
  */
 void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence)
 {
@@ -278,9 +286,11 @@ EXPORT_SYMBOL(dma_resv_add_shared_fence);
 /**
  * dma_resv_add_excl_fence - Add an exclusive fence.
  * @obj: the reservation object
- * @fence: the shared fence to add
+ * @fence: the exclusive fence to add
  *
- * Add a fence to the exclusive slot.  The obj->lock must be held.
+ * Add a fence to the exclusive slot. @obj must be locked with dma_resv_lock().
+ * Note that this function replaces all fences attached to @obj, see also
+ * &dma_resv.fence_excl for a discussion of the semantics.
  */
 void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence)
 {
@@ -313,6 +323,106 @@ void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence)
 }
 EXPORT_SYMBOL(dma_resv_add_excl_fence);
 
+/**
+ * dma_resv_iter_restart_unlocked - restart the unlocked iterator
+ * @cursor: The dma_resv_iter object to restart
+ *
+ * Restart the unlocked iteration by initializing the cursor object.
+ */
+static void dma_resv_iter_restart_unlocked(struct dma_resv_iter *cursor)
+{
+       cursor->seq = read_seqcount_begin(&cursor->obj->seq);
+       cursor->index = -1;
+       if (cursor->all_fences)
+               cursor->fences = dma_resv_shared_list(cursor->obj);
+       else
+               cursor->fences = NULL;
+       cursor->is_restarted = true;
+}
+
+/**
+ * dma_resv_iter_walk_unlocked - walk over fences in a dma_resv obj
+ * @cursor: cursor to record the current position
+ *
+ * Return all the fences in the dma_resv object which are not yet signaled.
+ * The returned fence has an extra local reference so will stay alive.
+ * If a concurrent modify is detected the whole iteration is started over again.
+ */
+static void dma_resv_iter_walk_unlocked(struct dma_resv_iter *cursor)
+{
+       struct dma_resv *obj = cursor->obj;
+
+       do {
+               /* Drop the reference from the previous round */
+               dma_fence_put(cursor->fence);
+
+               if (cursor->index == -1) {
+                       cursor->fence = dma_resv_excl_fence(obj);
+                       cursor->index++;
+                       if (!cursor->fence)
+                               continue;
+
+               } else if (!cursor->fences ||
+                          cursor->index >= cursor->fences->shared_count) {
+                       cursor->fence = NULL;
+                       break;
+
+               } else {
+                       struct dma_resv_list *fences = cursor->fences;
+                       unsigned int idx = cursor->index++;
+
+                       cursor->fence = rcu_dereference(fences->shared[idx]);
+               }
+               cursor->fence = dma_fence_get_rcu(cursor->fence);
+               if (!cursor->fence || !dma_fence_is_signaled(cursor->fence))
+                       break;
+       } while (true);
+}
+
+/**
+ * dma_resv_iter_first_unlocked - first fence in an unlocked dma_resv obj.
+ * @cursor: the cursor with the current position
+ *
+ * Returns the first fence from an unlocked dma_resv obj.
+ */
+struct dma_fence *dma_resv_iter_first_unlocked(struct dma_resv_iter *cursor)
+{
+       rcu_read_lock();
+       do {
+               dma_resv_iter_restart_unlocked(cursor);
+               dma_resv_iter_walk_unlocked(cursor);
+       } while (read_seqcount_retry(&cursor->obj->seq, cursor->seq));
+       rcu_read_unlock();
+
+       return cursor->fence;
+}
+EXPORT_SYMBOL(dma_resv_iter_first_unlocked);
+
+/**
+ * dma_resv_iter_next_unlocked - next fence in an unlocked dma_resv obj.
+ * @cursor: the cursor with the current position
+ *
+ * Returns the next fence from an unlocked dma_resv obj.
+ */
+struct dma_fence *dma_resv_iter_next_unlocked(struct dma_resv_iter *cursor)
+{
+       bool restart;
+
+       rcu_read_lock();
+       cursor->is_restarted = false;
+       restart = read_seqcount_retry(&cursor->obj->seq, cursor->seq);
+       do {
+               if (restart)
+                       dma_resv_iter_restart_unlocked(cursor);
+               dma_resv_iter_walk_unlocked(cursor);
+               restart = true;
+       } while (read_seqcount_retry(&cursor->obj->seq, cursor->seq));
+       rcu_read_unlock();
+
+       return cursor->fence;
+}
+EXPORT_SYMBOL(dma_resv_iter_next_unlocked);
+
 /**
  * dma_resv_copy_fences - Copy all fences from src to dst.
  * @dst: the destination reservation object
@@ -322,74 +432,54 @@ EXPORT_SYMBOL(dma_resv_add_excl_fence);
  */
 int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
 {
-       struct dma_resv_list *src_list, *dst_list;
-       struct dma_fence *old, *new;
-       unsigned int i;
+       struct dma_resv_iter cursor;
+       struct dma_resv_list *list;
+       struct dma_fence *f, *excl;
 
        dma_resv_assert_held(dst);
 
-       rcu_read_lock();
-       src_list = dma_resv_shared_list(src);
-
-retry:
-       if (src_list) {
-               unsigned int shared_count = src_list->shared_count;
-
-               rcu_read_unlock();
+       list = NULL;
+       excl = NULL;
 
-               dst_list = dma_resv_list_alloc(shared_count);
-               if (!dst_list)
-                       return -ENOMEM;
+       dma_resv_iter_begin(&cursor, src, true);
+       dma_resv_for_each_fence_unlocked(&cursor, f) {
 
-               rcu_read_lock();
-               src_list = dma_resv_shared_list(src);
-               if (!src_list || src_list->shared_count > shared_count) {
-                       kfree(dst_list);
-                       goto retry;
-               }
+               if (dma_resv_iter_is_restarted(&cursor)) {
+                       dma_resv_list_free(list);
+                       dma_fence_put(excl);
 
-               dst_list->shared_count = 0;
-               for (i = 0; i < src_list->shared_count; ++i) {
-                       struct dma_fence __rcu **dst;
-                       struct dma_fence *fence;
+                       if (cursor.fences) {
+                               unsigned int cnt = cursor.fences->shared_count;
 
-                       fence = rcu_dereference(src_list->shared[i]);
-                       if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
-                                    &fence->flags))
-                               continue;
+                               list = dma_resv_list_alloc(cnt);
+                               if (!list) {
+                                       dma_resv_iter_end(&cursor);
+                                       return -ENOMEM;
+                               }
 
-                       if (!dma_fence_get_rcu(fence)) {
-                               dma_resv_list_free(dst_list);
-                               src_list = dma_resv_shared_list(src);
-                               goto retry;
-                       }
+                               list->shared_count = 0;
 
-                       if (dma_fence_is_signaled(fence)) {
-                               dma_fence_put(fence);
-                               continue;
+                       } else {
+                               list = NULL;
                        }
-
-                       dst = &dst_list->shared[dst_list->shared_count++];
-                       rcu_assign_pointer(*dst, fence);
+                       excl = NULL;
                }
-       } else {
-               dst_list = NULL;
-       }
 
-       new = dma_fence_get_rcu_safe(&src->fence_excl);
-       rcu_read_unlock();
-
-       src_list = dma_resv_shared_list(dst);
-       old = dma_resv_excl_fence(dst);
+               dma_fence_get(f);
+               if (dma_resv_iter_is_exclusive(&cursor))
+                       excl = f;
+               else
+                       RCU_INIT_POINTER(list->shared[list->shared_count++], f);
+       }
+       dma_resv_iter_end(&cursor);
 
        write_seqcount_begin(&dst->seq);
-       /* write_seqcount_begin provides the necessary memory barrier */
-       RCU_INIT_POINTER(dst->fence_excl, new);
-       RCU_INIT_POINTER(dst->fence, dst_list);
+       excl = rcu_replace_pointer(dst->fence_excl, excl, dma_resv_held(dst));
+       list = rcu_replace_pointer(dst->fence, list, dma_resv_held(dst));
        write_seqcount_end(&dst->seq);
 
-       dma_resv_list_free(src_list);
-       dma_fence_put(old);
+       dma_resv_list_free(list);
+       dma_fence_put(excl);
 
        return 0;
 }
@@ -399,99 +489,61 @@ EXPORT_SYMBOL(dma_resv_copy_fences);
  * dma_resv_get_fences - Get an object's shared and exclusive
  * fences without update side lock held
  * @obj: the reservation object
- * @pfence_excl: the returned exclusive fence (or NULL)
- * @pshared_count: the number of shared fences returned
- * @pshared: the array of shared fence ptrs returned (array is krealloc'd to
+ * @fence_excl: the returned exclusive fence (or NULL)
+ * @shared_count: the number of shared fences returned
+ * @shared: the array of shared fence ptrs returned (array is krealloc'd to
  * the required size, and must be freed by caller)
  *
  * Retrieve all fences from the reservation object. If the pointer for the
  * exclusive fence is not specified the fence is put into the array of the
  * shared fences as well. Returns either zero or -ENOMEM.
  */
-int dma_resv_get_fences(struct dma_resv *obj, struct dma_fence **pfence_excl,
-                       unsigned int *pshared_count,
-                       struct dma_fence ***pshared)
+int dma_resv_get_fences(struct dma_resv *obj, struct dma_fence **fence_excl,
+                       unsigned int *shared_count, struct dma_fence ***shared)
 {
-       struct dma_fence **shared = NULL;
-       struct dma_fence *fence_excl;
-       unsigned int shared_count;
-       int ret = 1;
-
-       do {
-               struct dma_resv_list *fobj;
-               unsigned int i, seq;
-               size_t sz = 0;
-
-               shared_count = i = 0;
-
-               rcu_read_lock();
-               seq = read_seqcount_begin(&obj->seq);
+       struct dma_resv_iter cursor;
+       struct dma_fence *fence;
 
-               fence_excl = dma_resv_excl_fence(obj);
-               if (fence_excl && !dma_fence_get_rcu(fence_excl))
-                       goto unlock;
+       *shared_count = 0;
+       *shared = NULL;
 
-               fobj = dma_resv_shared_list(obj);
-               if (fobj)
-                       sz += sizeof(*shared) * fobj->shared_max;
+       if (fence_excl)
+               *fence_excl = NULL;
 
-               if (!pfence_excl && fence_excl)
-                       sz += sizeof(*shared);
+       dma_resv_iter_begin(&cursor, obj, true);
+       dma_resv_for_each_fence_unlocked(&cursor, fence) {
 
-               if (sz) {
-                       struct dma_fence **nshared;
+               if (dma_resv_iter_is_restarted(&cursor)) {
+                       unsigned int count;
 
-                       nshared = krealloc(shared, sz,
-                                          GFP_NOWAIT | __GFP_NOWARN);
-                       if (!nshared) {
-                               rcu_read_unlock();
+                       while (*shared_count)
+                               dma_fence_put((*shared)[--(*shared_count)]);
 
-                               dma_fence_put(fence_excl);
-                               fence_excl = NULL;
+                       if (fence_excl)
+                               dma_fence_put(*fence_excl);
 
-                               nshared = krealloc(shared, sz, GFP_KERNEL);
-                               if (nshared) {
-                                       shared = nshared;
-                                       continue;
-                               }
+                       count = cursor.fences ? cursor.fences->shared_count : 0;
+                       count += fence_excl ? 0 : 1;
 
-                               ret = -ENOMEM;
-                               break;
+                       /* Eventually re-allocate the array */
+                       *shared = krealloc_array(*shared, count,
+                                                sizeof(void *),
+                                                GFP_KERNEL);
+                       if (count && !*shared) {
+                               dma_resv_iter_end(&cursor);
+                               return -ENOMEM;
                        }
-                       shared = nshared;
-                       shared_count = fobj ? fobj->shared_count : 0;
-                       for (i = 0; i < shared_count; ++i) {
-                               shared[i] = rcu_dereference(fobj->shared[i]);
-                               if (!dma_fence_get_rcu(shared[i]))
-                                       break;
-                       }
-               }
-
-               if (i != shared_count || read_seqcount_retry(&obj->seq, seq)) {
-                       while (i--)
-                               dma_fence_put(shared[i]);
-                       dma_fence_put(fence_excl);
-                       goto unlock;
                }
 
-               ret = 0;
-unlock:
-               rcu_read_unlock();
-       } while (ret);
-
-       if (pfence_excl)
-               *pfence_excl = fence_excl;
-       else if (fence_excl)
-               shared[shared_count++] = fence_excl;
-
-       if (!shared_count) {
-               kfree(shared);
-               shared = NULL;
+               dma_fence_get(fence);
+               if (dma_resv_iter_is_exclusive(&cursor) && fence_excl)
+                       *fence_excl = fence;
+               else
+                       (*shared)[(*shared_count)++] = fence;
        }
+       dma_resv_iter_end(&cursor);
 
-       *pshared_count = shared_count;
-       *pshared = shared;
-       return ret;
+       return 0;
 }
 EXPORT_SYMBOL_GPL(dma_resv_get_fences);
 
@@ -513,94 +565,25 @@ long dma_resv_wait_timeout(struct dma_resv *obj, bool wait_all, bool intr,
                           unsigned long timeout)
 {
        long ret = timeout ? timeout : 1;
-       unsigned int seq, shared_count;
+       struct dma_resv_iter cursor;
        struct dma_fence *fence;
-       int i;
-
-retry:
-       shared_count = 0;
-       seq = read_seqcount_begin(&obj->seq);
-       rcu_read_lock();
-       i = -1;
-
-       fence = dma_resv_excl_fence(obj);
-       if (fence && !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
-               if (!dma_fence_get_rcu(fence))
-                       goto unlock_retry;
-
-               if (dma_fence_is_signaled(fence)) {
-                       dma_fence_put(fence);
-                       fence = NULL;
-               }
-
-       } else {
-               fence = NULL;
-       }
-
-       if (wait_all) {
-               struct dma_resv_list *fobj = dma_resv_shared_list(obj);
-
-               if (fobj)
-                       shared_count = fobj->shared_count;
-
-               for (i = 0; !fence && i < shared_count; ++i) {
-                       struct dma_fence *lfence;
 
-                       lfence = rcu_dereference(fobj->shared[i]);
-                       if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
-                                    &lfence->flags))
-                               continue;
-
-                       if (!dma_fence_get_rcu(lfence))
-                               goto unlock_retry;
-
-                       if (dma_fence_is_signaled(lfence)) {
-                               dma_fence_put(lfence);
-                               continue;
-                       }
+       dma_resv_iter_begin(&cursor, obj, wait_all);
+       dma_resv_for_each_fence_unlocked(&cursor, fence) {
 
-                       fence = lfence;
-                       break;
+               ret = dma_fence_wait_timeout(fence, intr, ret);
+               if (ret <= 0) {
+                       dma_resv_iter_end(&cursor);
+                       return ret;
                }
        }
+       dma_resv_iter_end(&cursor);
 
-       rcu_read_unlock();
-       if (fence) {
-               if (read_seqcount_retry(&obj->seq, seq)) {
-                       dma_fence_put(fence);
-                       goto retry;
-               }
-
-               ret = dma_fence_wait_timeout(fence, intr, ret);
-               dma_fence_put(fence);
-               if (ret > 0 && wait_all && (i + 1 < shared_count))
-                       goto retry;
-       }
        return ret;
-
-unlock_retry:
-       rcu_read_unlock();
-       goto retry;
 }
 EXPORT_SYMBOL_GPL(dma_resv_wait_timeout);
 
 
-static inline int dma_resv_test_signaled_single(struct dma_fence *passed_fence)
-{
-       struct dma_fence *fence, *lfence = passed_fence;
-       int ret = 1;
-
-       if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &lfence->flags)) {
-               fence = dma_fence_get_rcu(lfence);
-               if (!fence)
-                       return -1;
-
-               ret = !!dma_fence_is_signaled(fence);
-               dma_fence_put(fence);
-       }
-       return ret;
-}
-
 /**
  * dma_resv_test_signaled - Test if a reservation object's fences have been
  * signaled.
@@ -609,49 +592,24 @@ static inline int dma_resv_test_signaled_single(struct dma_fence *passed_fence)
  * fence
  *
  * Callers are not required to hold specific locks, but maybe hold
- * dma_resv_lock() already
+ * dma_resv_lock() already.
+ *
  * RETURNS
- * true if all fences signaled, else false
+ *
+ * True if all fences signaled, else false.
  */
 bool dma_resv_test_signaled(struct dma_resv *obj, bool test_all)
 {
+       struct dma_resv_iter cursor;
        struct dma_fence *fence;
-       unsigned int seq;
-       int ret;
-
-       rcu_read_lock();
-retry:
-       ret = true;
-       seq = read_seqcount_begin(&obj->seq);
-
-       if (test_all) {
-               struct dma_resv_list *fobj = dma_resv_shared_list(obj);
-               unsigned int i, shared_count;
-
-               shared_count = fobj ? fobj->shared_count : 0;
-               for (i = 0; i < shared_count; ++i) {
-                       fence = rcu_dereference(fobj->shared[i]);
-                       ret = dma_resv_test_signaled_single(fence);
-                       if (ret < 0)
-                               goto retry;
-                       else if (!ret)
-                               break;
-               }
-       }
-
-       fence = dma_resv_excl_fence(obj);
-       if (ret && fence) {
-               ret = dma_resv_test_signaled_single(fence);
-               if (ret < 0)
-                       goto retry;
 
+       dma_resv_iter_begin(&cursor, obj, test_all);
+       dma_resv_for_each_fence_unlocked(&cursor, fence) {
+               dma_resv_iter_end(&cursor);
+               return false;
        }
-
-       if (read_seqcount_retry(&obj->seq, seq))
-               goto retry;
-
-       rcu_read_unlock();
-       return ret;
+       dma_resv_iter_end(&cursor);
+       return true;
 }
 EXPORT_SYMBOL_GPL(dma_resv_test_signaled);