drm/i915/selftests: add sanity selftest for huge-GTT-pages
authorMatthew Auld <matthew.auld@intel.com>
Fri, 25 Oct 2019 15:37:28 +0000 (16:37 +0100)
committerChris Wilson <chris@chris-wilson.co.uk>
Fri, 25 Oct 2019 21:56:05 +0000 (22:56 +0100)
Now that for all the relevant backends we do randomised testing, we need
to make sure we still sanity check the obvious cases that might blow up,
such that introducing a temporary regression is less likely.  Also
rather than do this for every backend, just limit to our two memory
types: system and local.

Suggested-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Matthew Auld <matthew.auld@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20191025153728.23689-7-chris@chris-wilson.co.uk
drivers/gpu/drm/i915/gem/selftests/huge_pages.c

index b777999..688c49a 100644 (file)
@@ -1342,6 +1342,12 @@ igt_create_internal(struct drm_i915_private *i915, u32 size, u32 flags)
        return i915_gem_object_create_internal(i915, size);
 }
 
+static struct drm_i915_gem_object *
+igt_create_system(struct drm_i915_private *i915, u32 size, u32 flags)
+{
+       return huge_pages_object(i915, size, size);
+}
+
 static struct drm_i915_gem_object *
 igt_create_local(struct drm_i915_private *i915, u32 size, u32 flags)
 {
@@ -1452,6 +1458,98 @@ out_put:
        return err;
 }
 
+static int igt_ppgtt_sanity_check(void *arg)
+{
+       struct i915_gem_context *ctx = arg;
+       struct drm_i915_private *i915 = ctx->i915;
+       unsigned int supported = INTEL_INFO(i915)->page_sizes;
+       struct {
+               igt_create_fn fn;
+               unsigned int flags;
+       } backends[] = {
+               { igt_create_system, 0,                        },
+               { igt_create_local,  I915_BO_ALLOC_CONTIGUOUS, },
+       };
+       struct {
+               u32 size;
+               u32 pages;
+       } combos[] = {
+               { SZ_64K,               SZ_64K          },
+               { SZ_2M,                SZ_2M           },
+               { SZ_2M,                SZ_64K          },
+               { SZ_2M - SZ_64K,       SZ_64K          },
+               { SZ_2M - SZ_4K,        SZ_64K | SZ_4K  },
+               { SZ_2M + SZ_4K,        SZ_64K | SZ_4K  },
+               { SZ_2M + SZ_4K,        SZ_2M  | SZ_4K  },
+               { SZ_2M + SZ_64K,       SZ_2M  | SZ_64K },
+       };
+       int i, j;
+       int err;
+
+       if (supported == I915_GTT_PAGE_SIZE_4K)
+               return 0;
+
+       /*
+        * Sanity check that the HW behaves with a limited set of combinations.
+        * We already have a bunch of randomised testing, which should give us
+        * a decent amount of variation between runs, however we should keep
+        * this to limit the chances of introducing a temporary regression, by
+        * testing the most obvious cases that might make something blow up.
+        */
+
+       for (i = 0; i < ARRAY_SIZE(backends); ++i) {
+               for (j = 0; j < ARRAY_SIZE(combos); ++j) {
+                       struct drm_i915_gem_object *obj;
+                       u32 size = combos[j].size;
+                       u32 pages = combos[j].pages;
+
+                       obj = backends[i].fn(i915, size, backends[i].flags);
+                       if (IS_ERR(obj)) {
+                               err = PTR_ERR(obj);
+                               if (err == -ENODEV) {
+                                       pr_info("Device lacks local memory, skipping\n");
+                                       err = 0;
+                                       break;
+                               }
+
+                               return err;
+                       }
+
+                       err = i915_gem_object_pin_pages(obj);
+                       if (err) {
+                               i915_gem_object_put(obj);
+                               goto out;
+                       }
+
+                       GEM_BUG_ON(pages > obj->base.size);
+                       pages = pages & supported;
+
+                       if (pages)
+                               obj->mm.page_sizes.sg = pages;
+
+                       err = igt_write_huge(ctx, obj);
+
+                       i915_gem_object_unpin_pages(obj);
+                       __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
+                       i915_gem_object_put(obj);
+
+                       if (err) {
+                               pr_err("%s write-huge failed with size=%u pages=%u i=%d, j=%d\n",
+                                      __func__, size, pages, i, j);
+                               goto out;
+                       }
+               }
+
+               cond_resched();
+       }
+
+out:
+       if (err == -ENOMEM)
+               err = 0;
+
+       return err;
+}
+
 static int igt_ppgtt_pin_update(void *arg)
 {
        struct i915_gem_context *ctx = arg;
@@ -1812,6 +1910,7 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *i915)
                SUBTEST(igt_tmpfs_fallback),
                SUBTEST(igt_ppgtt_exhaust_huge),
                SUBTEST(igt_ppgtt_smoke_huge),
+               SUBTEST(igt_ppgtt_sanity_check),
        };
        struct drm_file *file;
        struct i915_gem_context *ctx;