drm/i915/region: support volatile objects
authorMatthew Auld <matthew.auld@intel.com>
Tue, 8 Oct 2019 16:01:16 +0000 (17:01 +0100)
committerChris Wilson <chris@chris-wilson.co.uk>
Tue, 8 Oct 2019 19:50:01 +0000 (20:50 +0100)
Volatile objects are marked as DONTNEED while pinned, therefore once
unpinned the backing store can be discarded. This is limited to kernel
internal objects.

Signed-off-by: Matthew Auld <matthew.auld@intel.com>
Signed-off-by: CQ Tang <cq.tang@intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: Abdiel Janulgue <abdiel.janulgue@linux.intel.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20191008160116.18379-4-matthew.auld@intel.com
drivers/gpu/drm/i915/gem/i915_gem_internal.c
drivers/gpu/drm/i915/gem/i915_gem_object.h
drivers/gpu/drm/i915/gem/i915_gem_object_types.h
drivers/gpu/drm/i915/gem/i915_gem_pages.c
drivers/gpu/drm/i915/gem/i915_gem_region.c
drivers/gpu/drm/i915/gem/selftests/huge_pages.c
drivers/gpu/drm/i915/intel_memory_region.c
drivers/gpu/drm/i915/intel_memory_region.h
drivers/gpu/drm/i915/selftests/i915_gem_gtt.c

index 0c41e04..5ae694c 100644 (file)
@@ -117,13 +117,6 @@ create_st:
                goto err;
        }
 
-       /* Mark the pages as dontneed whilst they are still pinned. As soon
-        * as they are unpinned they are allowed to be reaped by the shrinker,
-        * and the caller is expected to repopulate - the contents of this
-        * object are only valid whilst active and pinned.
-        */
-       obj->mm.madv = I915_MADV_DONTNEED;
-
        __i915_gem_object_set_pages(obj, st, sg_page_sizes);
 
        return 0;
@@ -143,7 +136,6 @@ static void i915_gem_object_put_pages_internal(struct drm_i915_gem_object *obj,
        internal_free_pages(pages);
 
        obj->mm.dirty = false;
-       obj->mm.madv = I915_MADV_WILLNEED;
 }
 
 static const struct drm_i915_gem_object_ops i915_gem_object_internal_ops = {
@@ -188,6 +180,15 @@ i915_gem_object_create_internal(struct drm_i915_private *i915,
        drm_gem_private_object_init(&i915->drm, &obj->base, size);
        i915_gem_object_init(obj, &i915_gem_object_internal_ops);
 
+       /*
+        * Mark the object as volatile, such that the pages are marked as
+        * dontneed whilst they are still pinned. As soon as they are unpinned
+        * they are allowed to be reaped by the shrinker, and the caller is
+        * expected to repopulate - the contents of this object are only valid
+        * whilst active and pinned.
+        */
+       i915_gem_object_set_volatile(obj);
+
        obj->read_domains = I915_GEM_DOMAIN_CPU;
        obj->write_domain = I915_GEM_DOMAIN_CPU;
 
index dfd16d6..c5e14c9 100644 (file)
@@ -145,6 +145,18 @@ i915_gem_object_is_contiguous(const struct drm_i915_gem_object *obj)
        return obj->flags & I915_BO_ALLOC_CONTIGUOUS;
 }
 
+static inline bool
+i915_gem_object_is_volatile(const struct drm_i915_gem_object *obj)
+{
+       return obj->flags & I915_BO_ALLOC_VOLATILE;
+}
+
+static inline void
+i915_gem_object_set_volatile(struct drm_i915_gem_object *obj)
+{
+       obj->flags |= I915_BO_ALLOC_VOLATILE;
+}
+
 static inline bool
 i915_gem_object_type_has(const struct drm_i915_gem_object *obj,
                         unsigned long flags)
index c6a712c..a387e3e 100644 (file)
@@ -121,7 +121,8 @@ struct drm_i915_gem_object {
 
        unsigned long flags;
 #define I915_BO_ALLOC_CONTIGUOUS BIT(0)
-#define I915_BO_ALLOC_FLAGS (I915_BO_ALLOC_CONTIGUOUS)
+#define I915_BO_ALLOC_VOLATILE   BIT(1)
+#define I915_BO_ALLOC_FLAGS (I915_BO_ALLOC_CONTIGUOUS | I915_BO_ALLOC_VOLATILE)
 
        /*
         * Is the object to be mapped as read-only to the GPU
@@ -172,6 +173,12 @@ struct drm_i915_gem_object {
                 * List of memory region blocks allocated for this object.
                 */
                struct list_head blocks;
+               /**
+                * Element within memory_region->objects or region->purgeable
+                * if the object is marked as DONTNEED. Access is protected by
+                * region->obj_lock.
+                */
+               struct list_head region_link;
 
                struct sg_table *pages;
                void *mapping;
index 2e941f0..b0ec095 100644 (file)
@@ -18,6 +18,9 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
 
        lockdep_assert_held(&obj->mm.lock);
 
+       if (i915_gem_object_is_volatile(obj))
+               obj->mm.madv = I915_MADV_DONTNEED;
+
        /* Make the pages coherent with the GPU (flushing any swapin). */
        if (obj->cache_dirty) {
                obj->write_domain = 0;
@@ -160,6 +163,9 @@ __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj)
        if (IS_ERR_OR_NULL(pages))
                return pages;
 
+       if (i915_gem_object_is_volatile(obj))
+               obj->mm.madv = I915_MADV_WILLNEED;
+
        i915_gem_object_make_unshrinkable(obj);
 
        if (obj->mm.mapping) {
index d94914a..d3f7733 100644 (file)
@@ -107,11 +107,26 @@ void i915_gem_object_init_memory_region(struct drm_i915_gem_object *obj,
        INIT_LIST_HEAD(&obj->mm.blocks);
        obj->mm.region = intel_memory_region_get(mem);
        obj->flags |= flags;
+
+       mutex_lock(&mem->objects.lock);
+
+       if (obj->flags & I915_BO_ALLOC_VOLATILE)
+               list_add(&obj->mm.region_link, &mem->objects.purgeable);
+       else
+               list_add(&obj->mm.region_link, &mem->objects.list);
+
+       mutex_unlock(&mem->objects.lock);
 }
 
 void i915_gem_object_release_memory_region(struct drm_i915_gem_object *obj)
 {
-       intel_memory_region_put(obj->mm.region);
+       struct intel_memory_region *mem = obj->mm.region;
+
+       mutex_lock(&mem->objects.lock);
+       list_del(&obj->mm.region_link);
+       mutex_unlock(&mem->objects.lock);
+
+       intel_memory_region_put(mem);
 }
 
 struct drm_i915_gem_object *
index 63a4743..f27772f 100644 (file)
@@ -115,8 +115,6 @@ static int get_huge_pages(struct drm_i915_gem_object *obj)
        if (i915_gem_gtt_prepare_pages(obj, st))
                goto err;
 
-       obj->mm.madv = I915_MADV_DONTNEED;
-
        GEM_BUG_ON(sg_page_sizes != obj->mm.page_mask);
        __i915_gem_object_set_pages(obj, st, sg_page_sizes);
 
@@ -137,7 +135,6 @@ static void put_huge_pages(struct drm_i915_gem_object *obj,
        huge_pages_free_pages(pages);
 
        obj->mm.dirty = false;
-       obj->mm.madv = I915_MADV_WILLNEED;
 }
 
 static const struct drm_i915_gem_object_ops huge_page_ops = {
@@ -170,6 +167,8 @@ huge_pages_object(struct drm_i915_private *i915,
        drm_gem_private_object_init(&i915->drm, &obj->base, size);
        i915_gem_object_init(obj, &huge_page_ops);
 
+       i915_gem_object_set_volatile(obj);
+
        obj->write_domain = I915_GEM_DOMAIN_CPU;
        obj->read_domains = I915_GEM_DOMAIN_CPU;
        obj->cache_level = I915_CACHE_NONE;
@@ -229,8 +228,6 @@ static int fake_get_huge_pages(struct drm_i915_gem_object *obj)
 
        i915_sg_trim(st);
 
-       obj->mm.madv = I915_MADV_DONTNEED;
-
        __i915_gem_object_set_pages(obj, st, sg_page_sizes);
 
        return 0;
@@ -263,8 +260,6 @@ static int fake_get_huge_pages_single(struct drm_i915_gem_object *obj)
        sg_dma_len(sg) = obj->base.size;
        sg_dma_address(sg) = page_size;
 
-       obj->mm.madv = I915_MADV_DONTNEED;
-
        __i915_gem_object_set_pages(obj, st, sg->length);
 
        return 0;
@@ -283,7 +278,6 @@ static void fake_put_huge_pages(struct drm_i915_gem_object *obj,
 {
        fake_free_huge_pages(obj, pages);
        obj->mm.dirty = false;
-       obj->mm.madv = I915_MADV_WILLNEED;
 }
 
 static const struct drm_i915_gem_object_ops fake_ops = {
@@ -323,6 +317,8 @@ fake_huge_pages_object(struct drm_i915_private *i915, u64 size, bool single)
        else
                i915_gem_object_init(obj, &fake_ops);
 
+       i915_gem_object_set_volatile(obj);
+
        obj->write_domain = I915_GEM_DOMAIN_CPU;
        obj->read_domains = I915_GEM_DOMAIN_CPU;
        obj->cache_level = I915_CACHE_NONE;
index 9800661..c029943 100644 (file)
@@ -152,6 +152,10 @@ intel_memory_region_create(struct drm_i915_private *i915,
        mem->min_page_size = min_page_size;
        mem->ops = ops;
 
+       mutex_init(&mem->objects.lock);
+       INIT_LIST_HEAD(&mem->objects.list);
+       INIT_LIST_HEAD(&mem->objects.purgeable);
+
        mutex_init(&mem->mm_lock);
 
        if (ops->init) {
@@ -177,6 +181,7 @@ static void __intel_memory_region_destroy(struct kref *kref)
                mem->ops->release(mem);
 
        mutex_destroy(&mem->mm_lock);
+       mutex_destroy(&mem->objects.lock);
        kfree(mem);
 }
 
index 29b86ca..52c141b 100644 (file)
@@ -52,6 +52,12 @@ struct intel_memory_region {
        unsigned int type;
        unsigned int instance;
        unsigned int id;
+
+       struct {
+               struct mutex lock; /* Protects access to objects */
+               struct list_head list;
+               struct list_head purgeable;
+       } objects;
 };
 
 int intel_memory_region_init_buddy(struct intel_memory_region *mem);
index 165b3a7..ebe735d 100644 (file)
@@ -82,8 +82,6 @@ static int fake_get_pages(struct drm_i915_gem_object *obj)
        }
        GEM_BUG_ON(rem);
 
-       obj->mm.madv = I915_MADV_DONTNEED;
-
        __i915_gem_object_set_pages(obj, pages, sg_page_sizes);
 
        return 0;
@@ -95,7 +93,6 @@ static void fake_put_pages(struct drm_i915_gem_object *obj,
 {
        fake_free_pages(obj, pages);
        obj->mm.dirty = false;
-       obj->mm.madv = I915_MADV_WILLNEED;
 }
 
 static const struct drm_i915_gem_object_ops fake_ops = {
@@ -122,6 +119,8 @@ fake_dma_object(struct drm_i915_private *i915, u64 size)
        drm_gem_private_object_init(&i915->drm, &obj->base, size);
        i915_gem_object_init(obj, &fake_ops);
 
+       i915_gem_object_set_volatile(obj);
+
        obj->write_domain = I915_GEM_DOMAIN_CPU;
        obj->read_domains = I915_GEM_DOMAIN_CPU;
        obj->cache_level = I915_CACHE_NONE;