dma-buf: rename and cleanup dma_resv_get_list v2
authorChristian König <christian.koenig@amd.com>
Tue, 11 May 2021 12:11:41 +0000 (14:11 +0200)
committerChristian König <christian.koenig@amd.com>
Sun, 6 Jun 2021 09:18:19 +0000 (11:18 +0200)
When the comment needs to state explicitly that this is doesn't get a reference
to the object then the function is named rather badly.

Rename the function and use it in even more places.

v2: use dma_resv_shared_list as new name

Signed-off-by: Christian König <christian.koenig@amd.com>
Acked-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Link: https://patchwork.freedesktop.org/patch/msgid/20210602111714.212426-5-christian.koenig@amd.com
13 files changed:
drivers/dma-buf/dma-resv.c
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
drivers/gpu/drm/etnaviv/etnaviv_gem.c
drivers/gpu/drm/i915/gem/i915_gem_busy.c
drivers/gpu/drm/msm/msm_gem.c
drivers/gpu/drm/nouveau/nouveau_fence.c
drivers/gpu/drm/qxl/qxl_debugfs.c
drivers/gpu/drm/radeon/radeon_sync.c
drivers/gpu/drm/ttm/ttm_bo.c
include/linux/dma-resv.h

index ed7b4e8..62e7e05 100644 (file)
@@ -149,8 +149,7 @@ int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences)
 
        dma_resv_assert_held(obj);
 
-       old = dma_resv_get_list(obj);
-
+       old = dma_resv_shared_list(obj);
        if (old && old->shared_max) {
                if ((old->shared_count + num_fences) <= old->shared_max)
                        return 0;
@@ -219,12 +218,13 @@ EXPORT_SYMBOL(dma_resv_reserve_shared);
  */
 void dma_resv_reset_shared_max(struct dma_resv *obj)
 {
-       /* Test shared fence slot reservation */
-       if (rcu_access_pointer(obj->fence)) {
-               struct dma_resv_list *fence = dma_resv_get_list(obj);
+       struct dma_resv_list *fences = dma_resv_shared_list(obj);
 
-               fence->shared_max = fence->shared_count;
-       }
+       dma_resv_assert_held(obj);
+
+       /* Test shared fence slot reservation */
+       if (fences)
+               fences->shared_max = fences->shared_count;
 }
 EXPORT_SYMBOL(dma_resv_reset_shared_max);
 #endif
@@ -247,7 +247,7 @@ void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence)
 
        dma_resv_assert_held(obj);
 
-       fobj = dma_resv_get_list(obj);
+       fobj = dma_resv_shared_list(obj);
        count = fobj->shared_count;
 
        write_seqcount_begin(&obj->seq);
@@ -290,7 +290,7 @@ void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence)
 
        dma_resv_assert_held(obj);
 
-       old = dma_resv_get_list(obj);
+       old = dma_resv_shared_list(obj);
        if (old)
                i = old->shared_count;
 
@@ -329,7 +329,7 @@ int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
        dma_resv_assert_held(dst);
 
        rcu_read_lock();
-       src_list = rcu_dereference(src->fence);
+       src_list = dma_resv_shared_list(src);
 
 retry:
        if (src_list) {
@@ -342,7 +342,7 @@ retry:
                        return -ENOMEM;
 
                rcu_read_lock();
-               src_list = rcu_dereference(src->fence);
+               src_list = dma_resv_shared_list(src);
                if (!src_list || src_list->shared_count > shared_count) {
                        kfree(dst_list);
                        goto retry;
@@ -360,7 +360,7 @@ retry:
 
                        if (!dma_fence_get_rcu(fence)) {
                                dma_resv_list_free(dst_list);
-                               src_list = rcu_dereference(src->fence);
+                               src_list = dma_resv_shared_list(src);
                                goto retry;
                        }
 
@@ -379,7 +379,7 @@ retry:
        new = dma_fence_get_rcu_safe(&src->fence_excl);
        rcu_read_unlock();
 
-       src_list = dma_resv_get_list(dst);
+       src_list = dma_resv_shared_list(dst);
        old = dma_resv_excl_fence(dst);
 
        write_seqcount_begin(&dst->seq);
@@ -432,7 +432,7 @@ int dma_resv_get_fences_rcu(struct dma_resv *obj,
                if (fence_excl && !dma_fence_get_rcu(fence_excl))
                        goto unlock;
 
-               fobj = rcu_dereference(obj->fence);
+               fobj = dma_resv_shared_list(obj);
                if (fobj)
                        sz += sizeof(*shared) * fobj->shared_max;
 
@@ -538,7 +538,7 @@ retry:
        }
 
        if (wait_all) {
-               struct dma_resv_list *fobj = rcu_dereference(obj->fence);
+               struct dma_resv_list *fobj = dma_resv_shared_list(obj);
 
                if (fobj)
                        shared_count = fobj->shared_count;
@@ -623,7 +623,7 @@ retry:
        seq = read_seqcount_begin(&obj->seq);
 
        if (test_all) {
-               struct dma_resv_list *fobj = rcu_dereference(obj->fence);
+               struct dma_resv_list *fobj = dma_resv_shared_list(obj);
                unsigned int i;
 
                if (fobj)
index d5e6519..6552859 100644 (file)
@@ -247,7 +247,7 @@ static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
        if (!ef)
                return -EINVAL;
 
-       old = dma_resv_get_list(resv);
+       old = dma_resv_shared_list(resv);
        if (!old)
                return 0;
 
index 6dd0ea6..04caa31 100644 (file)
@@ -49,7 +49,7 @@ __dma_resv_make_exclusive(struct dma_resv *obj)
        unsigned int count;
        int r;
 
-       if (!dma_resv_get_list(obj)) /* no shared fences to convert */
+       if (!dma_resv_shared_list(obj)) /* no shared fences to convert */
                return 0;
 
        r = dma_resv_get_fences_rcu(obj, NULL, &count, &fences);
index 2bdc9df..1b2cecc 100644 (file)
@@ -213,7 +213,7 @@ int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync,
        f = dma_resv_excl_fence(resv);
        r = amdgpu_sync_fence(sync, f);
 
-       flist = dma_resv_get_list(resv);
+       flist = dma_resv_shared_list(resv);
        if (!flist || r)
                return r;
 
index df1f185..53a8ab8 100644 (file)
@@ -1339,7 +1339,7 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
         * If true, then return false as any KFD process needs all its BOs to
         * be resident to run successfully
         */
-       flist = dma_resv_get_list(bo->base.resv);
+       flist = dma_resv_shared_list(bo->base.resv);
        if (flist) {
                for (i = 0; i < flist->shared_count; ++i) {
                        f = rcu_dereference_protected(flist->shared[i],
index 2237fe5..8792d8d 100644 (file)
@@ -461,7 +461,7 @@ static void etnaviv_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
                        off, etnaviv_obj->vaddr, obj->size);
 
        rcu_read_lock();
-       fobj = rcu_dereference(robj->fence);
+       fobj = dma_resv_shared_list(robj);
        if (fobj) {
                unsigned int i, shared_count = fobj->shared_count;
 
index 088d375..35279dd 100644 (file)
@@ -116,7 +116,7 @@ retry:
        args->busy = busy_check_writer(dma_resv_excl_fence(obj->base.resv));
 
        /* Translate shared fences to READ set of engines */
-       list = rcu_dereference(obj->base.resv->fence);
+       list = dma_resv_shared_list(obj->base.resv);
        if (list) {
                unsigned int shared_count = list->shared_count, i;
 
index a5a2a92..410a93a 100644 (file)
@@ -817,7 +817,7 @@ int msm_gem_sync_object(struct drm_gem_object *obj,
        struct dma_fence *fence;
        int i, ret;
 
-       fobj = dma_resv_get_list(obj->resv);
+       fobj = dma_resv_shared_list(obj->resv);
        if (!fobj || (fobj->shared_count == 0)) {
                fence = dma_resv_excl_fence(obj->resv);
                /* don't need to wait on our own fences, since ring is fifo */
@@ -1025,7 +1025,7 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m,
        }
 
        rcu_read_lock();
-       fobj = rcu_dereference(robj->fence);
+       fobj = dma_resv_shared_list(robj);
        if (fobj) {
                unsigned int i, shared_count = fobj->shared_count;
 
index 19c096d..6b43918 100644 (file)
@@ -355,7 +355,7 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool e
                        return ret;
        }
 
-       fobj = dma_resv_get_list(resv);
+       fobj = dma_resv_shared_list(resv);
        fence = dma_resv_excl_fence(resv);
 
        if (fence && (!exclusive || !fobj || !fobj->shared_count)) {
index 183d15e..1f9a596 100644 (file)
@@ -61,7 +61,7 @@ qxl_debugfs_buffers_info(struct seq_file *m, void *data)
                int rel;
 
                rcu_read_lock();
-               fobj = rcu_dereference(bo->tbo.base.resv->fence);
+               fobj = dma_resv_shared_list(bo->tbo.base.resv);
                rel = fobj ? fobj->shared_count : 0;
                rcu_read_unlock();
 
index c8a1711..9257b60 100644 (file)
@@ -105,7 +105,7 @@ int radeon_sync_resv(struct radeon_device *rdev,
        else if (f)
                r = dma_fence_wait(f, true);
 
-       flist = dma_resv_get_list(resv);
+       flist = dma_resv_shared_list(resv);
        if (shared || !flist || r)
                return r;
 
index 1752f8e..f04a269 100644 (file)
@@ -261,7 +261,7 @@ static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo)
        int i;
 
        rcu_read_lock();
-       fobj = rcu_dereference(resv->fence);
+       fobj = dma_resv_shared_list(resv);
        fence = dma_resv_excl_fence(resv);
        if (fence && !fence->ops->signaled)
                dma_fence_enable_sw_signaling(fence);
index e3a7f74..8dc19d6 100644 (file)
@@ -78,20 +78,6 @@ struct dma_resv {
 #define dma_resv_held(obj) lockdep_is_held(&(obj)->lock.base)
 #define dma_resv_assert_held(obj) lockdep_assert_held(&(obj)->lock.base)
 
-/**
- * dma_resv_get_list - get the reservation object's
- * shared fence list, with update-side lock held
- * @obj: the reservation object
- *
- * Returns the shared fence list.  Does NOT take references to
- * the fence.  The obj->lock must be held.
- */
-static inline struct dma_resv_list *dma_resv_get_list(struct dma_resv *obj)
-{
-       return rcu_dereference_protected(obj->fence,
-                                        dma_resv_held(obj));
-}
-
 #ifdef CONFIG_DEBUG_MUTEXES
 void dma_resv_reset_shared_max(struct dma_resv *obj);
 #else
@@ -268,6 +254,19 @@ dma_resv_get_excl_rcu(struct dma_resv *obj)
        return fence;
 }
 
+/**
+ * dma_resv_shared_list - get the reservation object's shared fence list
+ * @obj: the reservation object
+ *
+ * Returns the shared fence list. Caller must either hold the objects
+ * through dma_resv_lock() or the RCU read side lock through rcu_read_lock(),
+ * or one of the variants of each
+ */
+static inline struct dma_resv_list *dma_resv_shared_list(struct dma_resv *obj)
+{
+       return rcu_dereference_check(obj->fence, dma_resv_held(obj));
+}
+
 void dma_resv_init(struct dma_resv *obj);
 void dma_resv_fini(struct dma_resv *obj);
 int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences);