drm/i915/gem: Make i915_gem_object_flush_write_domain() static
authorChris Wilson <chris@chris-wilson.co.uk>
Tue, 19 Jan 2021 14:49:11 +0000 (14:49 +0000)
committerChris Wilson <chris@chris-wilson.co.uk>
Tue, 19 Jan 2021 20:47:25 +0000 (20:47 +0000)
flush_write_domain() is only used within the GEM domain management code,
so move it to i915_gem_domain.c and drop the export.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20210119144912.12653-5-chris@chris-wilson.co.uk
drivers/gpu/drm/i915/gem/i915_gem_domain.c
drivers/gpu/drm/i915/gem/i915_gem_object.c
drivers/gpu/drm/i915/gem/i915_gem_object.h

index fcce690..f0379b5 100644 (file)
@@ -5,6 +5,7 @@
  */
 
 #include "display/intel_frontbuffer.h"
+#include "gt/intel_gt.h"
 
 #include "i915_drv.h"
 #include "i915_gem_clflush.h"
 #include "i915_gem_lmem.h"
 #include "i915_gem_mman.h"
 
+static bool gpu_write_needs_clflush(struct drm_i915_gem_object *obj)
+{
+       return !(obj->cache_level == I915_CACHE_NONE ||
+                obj->cache_level == I915_CACHE_WT);
+}
+
+static void
+flush_write_domain(struct drm_i915_gem_object *obj, unsigned int flush_domains)
+{
+       struct i915_vma *vma;
+
+       assert_object_held(obj);
+
+       if (!(obj->write_domain & flush_domains))
+               return;
+
+       switch (obj->write_domain) {
+       case I915_GEM_DOMAIN_GTT:
+               spin_lock(&obj->vma.lock);
+               for_each_ggtt_vma(vma, obj) {
+                       if (i915_vma_unset_ggtt_write(vma))
+                               intel_gt_flush_ggtt_writes(vma->vm->gt);
+               }
+               spin_unlock(&obj->vma.lock);
+
+               i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
+               break;
+
+       case I915_GEM_DOMAIN_WC:
+               wmb();
+               break;
+
+       case I915_GEM_DOMAIN_CPU:
+               i915_gem_clflush_object(obj, I915_CLFLUSH_SYNC);
+               break;
+
+       case I915_GEM_DOMAIN_RENDER:
+               if (gpu_write_needs_clflush(obj))
+                       obj->cache_dirty = true;
+               break;
+       }
+
+       obj->write_domain = 0;
+}
+
 static void __i915_gem_object_flush_for_display(struct drm_i915_gem_object *obj)
 {
        /*
         * We manually flush the CPU domain so that we can override and
         * force the flush for the display, and perform it asyncrhonously.
         */
-       i915_gem_object_flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
+       flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
        if (obj->cache_dirty)
                i915_gem_clflush_object(obj, I915_CLFLUSH_FORCE);
        obj->write_domain = 0;
@@ -80,7 +126,7 @@ i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write)
        if (ret)
                return ret;
 
-       i915_gem_object_flush_write_domain(obj, ~I915_GEM_DOMAIN_WC);
+       flush_write_domain(obj, ~I915_GEM_DOMAIN_WC);
 
        /* Serialise direct access to this object with the barriers for
         * coherent writes from the GPU, by effectively invalidating the
@@ -141,7 +187,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
        if (ret)
                return ret;
 
-       i915_gem_object_flush_write_domain(obj, ~I915_GEM_DOMAIN_GTT);
+       flush_write_domain(obj, ~I915_GEM_DOMAIN_GTT);
 
        /* Serialise direct access to this object with the barriers for
         * coherent writes from the GPU, by effectively invalidating the
@@ -451,7 +497,7 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
        if (ret)
                return ret;
 
-       i915_gem_object_flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
+       flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
 
        /* Flush the CPU cache if it's still invalid. */
        if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) {
@@ -619,7 +665,7 @@ int i915_gem_object_prepare_read(struct drm_i915_gem_object *obj,
                        goto out;
        }
 
-       i915_gem_object_flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
+       flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
 
        /* If we're not in the cpu read domain, set ourself into the gtt
         * read domain and manually flush cachelines (if required). This
@@ -670,7 +716,7 @@ int i915_gem_object_prepare_write(struct drm_i915_gem_object *obj,
                        goto out;
        }
 
-       i915_gem_object_flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
+       flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
 
        /* If we're not in the cpu write domain, set ourself into the
         * gtt write domain and manually flush cachelines (as required).
index 00d2400..83c6ee6 100644 (file)
@@ -25,7 +25,6 @@
 #include <linux/sched/mm.h>
 
 #include "display/intel_frontbuffer.h"
-#include "gt/intel_gt.h"
 #include "i915_drv.h"
 #include "i915_gem_clflush.h"
 #include "i915_gem_context.h"
@@ -313,52 +312,6 @@ static void i915_gem_free_object(struct drm_gem_object *gem_obj)
                queue_work(i915->wq, &i915->mm.free_work);
 }
 
-static bool gpu_write_needs_clflush(struct drm_i915_gem_object *obj)
-{
-       return !(obj->cache_level == I915_CACHE_NONE ||
-                obj->cache_level == I915_CACHE_WT);
-}
-
-void
-i915_gem_object_flush_write_domain(struct drm_i915_gem_object *obj,
-                                  unsigned int flush_domains)
-{
-       struct i915_vma *vma;
-
-       assert_object_held(obj);
-
-       if (!(obj->write_domain & flush_domains))
-               return;
-
-       switch (obj->write_domain) {
-       case I915_GEM_DOMAIN_GTT:
-               spin_lock(&obj->vma.lock);
-               for_each_ggtt_vma(vma, obj) {
-                       if (i915_vma_unset_ggtt_write(vma))
-                               intel_gt_flush_ggtt_writes(vma->vm->gt);
-               }
-               spin_unlock(&obj->vma.lock);
-
-               i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
-               break;
-
-       case I915_GEM_DOMAIN_WC:
-               wmb();
-               break;
-
-       case I915_GEM_DOMAIN_CPU:
-               i915_gem_clflush_object(obj, I915_CLFLUSH_SYNC);
-               break;
-
-       case I915_GEM_DOMAIN_RENDER:
-               if (gpu_write_needs_clflush(obj))
-                       obj->cache_dirty = true;
-               break;
-       }
-
-       obj->write_domain = 0;
-}
-
 void __i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object *obj,
                                         enum fb_op_origin origin)
 {
index a21714d..eaa3641 100644 (file)
@@ -427,10 +427,6 @@ static inline void i915_gem_object_unpin_map(struct drm_i915_gem_object *obj)
 
 void __i915_gem_object_release_map(struct drm_i915_gem_object *obj);
 
-void
-i915_gem_object_flush_write_domain(struct drm_i915_gem_object *obj,
-                                  unsigned int flush_domains);
-
 int i915_gem_object_prepare_read(struct drm_i915_gem_object *obj,
                                 unsigned int *needs_clflush);
 int i915_gem_object_prepare_write(struct drm_i915_gem_object *obj,