9ec55b3a3815efb5d0ceadedda6966393a2b232e
[linux-2.6-microblaze.git] / drivers / gpu / drm / i915 / gem / selftests / i915_gem_object_blt.c
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2019 Intel Corporation
4  */
5
6 #include "gt/intel_gt.h"
7
8 #include "i915_selftest.h"
9
10 #include "selftests/igt_flush_test.h"
11 #include "selftests/mock_drm.h"
12 #include "huge_gem_object.h"
13 #include "mock_context.h"
14
15 static int igt_fill_blt(void *arg)
16 {
17         struct drm_i915_private *i915 = arg;
18         struct intel_context *ce = i915->engine[BCS0]->kernel_context;
19         struct drm_i915_gem_object *obj;
20         struct rnd_state prng;
21         IGT_TIMEOUT(end);
22         u32 *vaddr;
23         int err = 0;
24
25         prandom_seed_state(&prng, i915_selftest.random_seed);
26
27         /*
28          * XXX: needs some threads to scale all these tests, also maybe throw
29          * in submission from higher priority context to see if we are
30          * preempted for very large objects...
31          */
32
33         do {
34                 const u32 max_block_size = S16_MAX * PAGE_SIZE;
35                 u32 sz = min_t(u64, ce->vm->total >> 4, prandom_u32_state(&prng));
36                 u32 phys_sz = sz % (max_block_size + 1);
37                 u32 val = prandom_u32_state(&prng);
38                 u32 i;
39
40                 sz = round_up(sz, PAGE_SIZE);
41                 phys_sz = round_up(phys_sz, PAGE_SIZE);
42
43                 pr_debug("%s with phys_sz= %x, sz=%x, val=%x\n", __func__,
44                          phys_sz, sz, val);
45
46                 obj = huge_gem_object(i915, phys_sz, sz);
47                 if (IS_ERR(obj)) {
48                         err = PTR_ERR(obj);
49                         goto err_flush;
50                 }
51
52                 vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
53                 if (IS_ERR(vaddr)) {
54                         err = PTR_ERR(vaddr);
55                         goto err_put;
56                 }
57
58                 /*
59                  * Make sure the potentially async clflush does its job, if
60                  * required.
61                  */
62                 memset32(vaddr, val ^ 0xdeadbeaf,
63                          huge_gem_object_phys_size(obj) / sizeof(u32));
64
65                 if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE))
66                         obj->cache_dirty = true;
67
68                 err = i915_gem_object_fill_blt(obj, ce, val);
69                 if (err)
70                         goto err_unpin;
71
72                 i915_gem_object_lock(obj);
73                 err = i915_gem_object_set_to_cpu_domain(obj, false);
74                 i915_gem_object_unlock(obj);
75                 if (err)
76                         goto err_unpin;
77
78                 for (i = 0; i < huge_gem_object_phys_size(obj) / sizeof(u32); ++i) {
79                         if (vaddr[i] != val) {
80                                 pr_err("vaddr[%u]=%x, expected=%x\n", i,
81                                        vaddr[i], val);
82                                 err = -EINVAL;
83                                 goto err_unpin;
84                         }
85                 }
86
87                 i915_gem_object_unpin_map(obj);
88                 i915_gem_object_put(obj);
89         } while (!time_after(jiffies, end));
90
91         goto err_flush;
92
93 err_unpin:
94         i915_gem_object_unpin_map(obj);
95 err_put:
96         i915_gem_object_put(obj);
97 err_flush:
98         if (err == -ENOMEM)
99                 err = 0;
100
101         return err;
102 }
103
104 static int igt_copy_blt(void *arg)
105 {
106         struct drm_i915_private *i915 = arg;
107         struct intel_context *ce = i915->engine[BCS0]->kernel_context;
108         struct drm_i915_gem_object *src, *dst;
109         struct rnd_state prng;
110         IGT_TIMEOUT(end);
111         u32 *vaddr;
112         int err = 0;
113
114         prandom_seed_state(&prng, i915_selftest.random_seed);
115
116         do {
117                 const u32 max_block_size = S16_MAX * PAGE_SIZE;
118                 u32 sz = min_t(u64, ce->vm->total >> 4, prandom_u32_state(&prng));
119                 u32 phys_sz = sz % (max_block_size + 1);
120                 u32 val = prandom_u32_state(&prng);
121                 u32 i;
122
123                 sz = round_up(sz, PAGE_SIZE);
124                 phys_sz = round_up(phys_sz, PAGE_SIZE);
125
126                 pr_debug("%s with phys_sz= %x, sz=%x, val=%x\n", __func__,
127                          phys_sz, sz, val);
128
129                 src = huge_gem_object(i915, phys_sz, sz);
130                 if (IS_ERR(src)) {
131                         err = PTR_ERR(src);
132                         goto err_flush;
133                 }
134
135                 vaddr = i915_gem_object_pin_map(src, I915_MAP_WB);
136                 if (IS_ERR(vaddr)) {
137                         err = PTR_ERR(vaddr);
138                         goto err_put_src;
139                 }
140
141                 memset32(vaddr, val,
142                          huge_gem_object_phys_size(src) / sizeof(u32));
143
144                 i915_gem_object_unpin_map(src);
145
146                 if (!(src->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ))
147                         src->cache_dirty = true;
148
149                 dst = huge_gem_object(i915, phys_sz, sz);
150                 if (IS_ERR(dst)) {
151                         err = PTR_ERR(dst);
152                         goto err_put_src;
153                 }
154
155                 vaddr = i915_gem_object_pin_map(dst, I915_MAP_WB);
156                 if (IS_ERR(vaddr)) {
157                         err = PTR_ERR(vaddr);
158                         goto err_put_dst;
159                 }
160
161                 memset32(vaddr, val ^ 0xdeadbeaf,
162                          huge_gem_object_phys_size(dst) / sizeof(u32));
163
164                 if (!(dst->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE))
165                         dst->cache_dirty = true;
166
167                 err = i915_gem_object_copy_blt(src, dst, ce);
168                 if (err)
169                         goto err_unpin;
170
171                 i915_gem_object_lock(dst);
172                 err = i915_gem_object_set_to_cpu_domain(dst, false);
173                 i915_gem_object_unlock(dst);
174                 if (err)
175                         goto err_unpin;
176
177                 for (i = 0; i < huge_gem_object_phys_size(dst) / sizeof(u32); ++i) {
178                         if (vaddr[i] != val) {
179                                 pr_err("vaddr[%u]=%x, expected=%x\n", i,
180                                        vaddr[i], val);
181                                 err = -EINVAL;
182                                 goto err_unpin;
183                         }
184                 }
185
186                 i915_gem_object_unpin_map(dst);
187
188                 i915_gem_object_put(src);
189                 i915_gem_object_put(dst);
190         } while (!time_after(jiffies, end));
191
192         goto err_flush;
193
194 err_unpin:
195         i915_gem_object_unpin_map(dst);
196 err_put_dst:
197         i915_gem_object_put(dst);
198 err_put_src:
199         i915_gem_object_put(src);
200 err_flush:
201         if (err == -ENOMEM)
202                 err = 0;
203
204         return err;
205 }
206
207 int i915_gem_object_blt_live_selftests(struct drm_i915_private *i915)
208 {
209         static const struct i915_subtest tests[] = {
210                 SUBTEST(igt_fill_blt),
211                 SUBTEST(igt_copy_blt),
212         };
213
214         if (intel_gt_is_wedged(&i915->gt))
215                 return 0;
216
217         if (!HAS_ENGINE(i915, BCS0))
218                 return 0;
219
220         return i915_live_subtests(tests, i915);
221 }