Merge branch 'work.d_path' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
[linux-2.6-microblaze.git] / drivers / gpu / drm / i915 / gt / intel_ppgtt.c
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2020 Intel Corporation
4  */
5
6 #include <linux/slab.h>
7
8 #include "gem/i915_gem_lmem.h"
9
10 #include "i915_trace.h"
11 #include "intel_gtt.h"
12 #include "gen6_ppgtt.h"
13 #include "gen8_ppgtt.h"
14
15 struct i915_page_table *alloc_pt(struct i915_address_space *vm)
16 {
17         struct i915_page_table *pt;
18
19         pt = kmalloc(sizeof(*pt), I915_GFP_ALLOW_FAIL);
20         if (unlikely(!pt))
21                 return ERR_PTR(-ENOMEM);
22
23         pt->base = vm->alloc_pt_dma(vm, I915_GTT_PAGE_SIZE_4K);
24         if (IS_ERR(pt->base)) {
25                 kfree(pt);
26                 return ERR_PTR(-ENOMEM);
27         }
28
29         atomic_set(&pt->used, 0);
30         return pt;
31 }
32
33 struct i915_page_directory *__alloc_pd(int count)
34 {
35         struct i915_page_directory *pd;
36
37         pd = kzalloc(sizeof(*pd), I915_GFP_ALLOW_FAIL);
38         if (unlikely(!pd))
39                 return NULL;
40
41         pd->entry = kcalloc(count, sizeof(*pd->entry), I915_GFP_ALLOW_FAIL);
42         if (unlikely(!pd->entry)) {
43                 kfree(pd);
44                 return NULL;
45         }
46
47         spin_lock_init(&pd->lock);
48         return pd;
49 }
50
51 struct i915_page_directory *alloc_pd(struct i915_address_space *vm)
52 {
53         struct i915_page_directory *pd;
54
55         pd = __alloc_pd(I915_PDES);
56         if (unlikely(!pd))
57                 return ERR_PTR(-ENOMEM);
58
59         pd->pt.base = vm->alloc_pt_dma(vm, I915_GTT_PAGE_SIZE_4K);
60         if (IS_ERR(pd->pt.base)) {
61                 kfree(pd->entry);
62                 kfree(pd);
63                 return ERR_PTR(-ENOMEM);
64         }
65
66         return pd;
67 }
68
69 void free_px(struct i915_address_space *vm, struct i915_page_table *pt, int lvl)
70 {
71         BUILD_BUG_ON(offsetof(struct i915_page_directory, pt));
72
73         if (lvl) {
74                 struct i915_page_directory *pd =
75                         container_of(pt, typeof(*pd), pt);
76                 kfree(pd->entry);
77         }
78
79         if (pt->base)
80                 i915_gem_object_put(pt->base);
81
82         kfree(pt);
83 }
84
85 static void
86 write_dma_entry(struct drm_i915_gem_object * const pdma,
87                 const unsigned short idx,
88                 const u64 encoded_entry)
89 {
90         u64 * const vaddr = __px_vaddr(pdma);
91
92         vaddr[idx] = encoded_entry;
93         clflush_cache_range(&vaddr[idx], sizeof(u64));
94 }
95
96 void
97 __set_pd_entry(struct i915_page_directory * const pd,
98                const unsigned short idx,
99                struct i915_page_table * const to,
100                u64 (*encode)(const dma_addr_t, const enum i915_cache_level))
101 {
102         /* Each thread pre-pins the pd, and we may have a thread per pde. */
103         GEM_BUG_ON(atomic_read(px_used(pd)) > NALLOC * I915_PDES);
104
105         atomic_inc(px_used(pd));
106         pd->entry[idx] = to;
107         write_dma_entry(px_base(pd), idx, encode(px_dma(to), I915_CACHE_LLC));
108 }
109
110 void
111 clear_pd_entry(struct i915_page_directory * const pd,
112                const unsigned short idx,
113                const struct drm_i915_gem_object * const scratch)
114 {
115         GEM_BUG_ON(atomic_read(px_used(pd)) == 0);
116
117         write_dma_entry(px_base(pd), idx, scratch->encode);
118         pd->entry[idx] = NULL;
119         atomic_dec(px_used(pd));
120 }
121
122 bool
123 release_pd_entry(struct i915_page_directory * const pd,
124                  const unsigned short idx,
125                  struct i915_page_table * const pt,
126                  const struct drm_i915_gem_object * const scratch)
127 {
128         bool free = false;
129
130         if (atomic_add_unless(&pt->used, -1, 1))
131                 return false;
132
133         spin_lock(&pd->lock);
134         if (atomic_dec_and_test(&pt->used)) {
135                 clear_pd_entry(pd, idx, scratch);
136                 free = true;
137         }
138         spin_unlock(&pd->lock);
139
140         return free;
141 }
142
143 int i915_ppgtt_init_hw(struct intel_gt *gt)
144 {
145         struct drm_i915_private *i915 = gt->i915;
146
147         gtt_write_workarounds(gt);
148
149         if (GRAPHICS_VER(i915) == 6)
150                 gen6_ppgtt_enable(gt);
151         else if (GRAPHICS_VER(i915) == 7)
152                 gen7_ppgtt_enable(gt);
153
154         return 0;
155 }
156
157 static struct i915_ppgtt *
158 __ppgtt_create(struct intel_gt *gt)
159 {
160         if (GRAPHICS_VER(gt->i915) < 8)
161                 return gen6_ppgtt_create(gt);
162         else
163                 return gen8_ppgtt_create(gt);
164 }
165
166 struct i915_ppgtt *i915_ppgtt_create(struct intel_gt *gt)
167 {
168         struct i915_ppgtt *ppgtt;
169
170         ppgtt = __ppgtt_create(gt);
171         if (IS_ERR(ppgtt))
172                 return ppgtt;
173
174         trace_i915_ppgtt_create(&ppgtt->vm);
175
176         return ppgtt;
177 }
178
179 void ppgtt_bind_vma(struct i915_address_space *vm,
180                     struct i915_vm_pt_stash *stash,
181                     struct i915_vma *vma,
182                     enum i915_cache_level cache_level,
183                     u32 flags)
184 {
185         u32 pte_flags;
186
187         if (!test_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma))) {
188                 vm->allocate_va_range(vm, stash, vma->node.start, vma->size);
189                 set_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma));
190         }
191
192         /* Applicable to VLV, and gen8+ */
193         pte_flags = 0;
194         if (i915_gem_object_is_readonly(vma->obj))
195                 pte_flags |= PTE_READ_ONLY;
196         if (i915_gem_object_is_lmem(vma->obj))
197                 pte_flags |= PTE_LM;
198
199         vm->insert_entries(vm, vma, cache_level, pte_flags);
200         wmb();
201 }
202
203 void ppgtt_unbind_vma(struct i915_address_space *vm, struct i915_vma *vma)
204 {
205         if (test_and_clear_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma)))
206                 vm->clear_range(vm, vma->node.start, vma->size);
207 }
208
209 static unsigned long pd_count(u64 size, int shift)
210 {
211         /* Beware later misalignment */
212         return (size + 2 * (BIT_ULL(shift) - 1)) >> shift;
213 }
214
215 int i915_vm_alloc_pt_stash(struct i915_address_space *vm,
216                            struct i915_vm_pt_stash *stash,
217                            u64 size)
218 {
219         unsigned long count;
220         int shift, n;
221
222         shift = vm->pd_shift;
223         if (!shift)
224                 return 0;
225
226         count = pd_count(size, shift);
227         while (count--) {
228                 struct i915_page_table *pt;
229
230                 pt = alloc_pt(vm);
231                 if (IS_ERR(pt)) {
232                         i915_vm_free_pt_stash(vm, stash);
233                         return PTR_ERR(pt);
234                 }
235
236                 pt->stash = stash->pt[0];
237                 stash->pt[0] = pt;
238         }
239
240         for (n = 1; n < vm->top; n++) {
241                 shift += ilog2(I915_PDES); /* Each PD holds 512 entries */
242                 count = pd_count(size, shift);
243                 while (count--) {
244                         struct i915_page_directory *pd;
245
246                         pd = alloc_pd(vm);
247                         if (IS_ERR(pd)) {
248                                 i915_vm_free_pt_stash(vm, stash);
249                                 return PTR_ERR(pd);
250                         }
251
252                         pd->pt.stash = stash->pt[1];
253                         stash->pt[1] = &pd->pt;
254                 }
255         }
256
257         return 0;
258 }
259
260 int i915_vm_map_pt_stash(struct i915_address_space *vm,
261                          struct i915_vm_pt_stash *stash)
262 {
263         struct i915_page_table *pt;
264         int n, err;
265
266         for (n = 0; n < ARRAY_SIZE(stash->pt); n++) {
267                 for (pt = stash->pt[n]; pt; pt = pt->stash) {
268                         err = map_pt_dma_locked(vm, pt->base);
269                         if (err)
270                                 return err;
271                 }
272         }
273
274         return 0;
275 }
276
277 void i915_vm_free_pt_stash(struct i915_address_space *vm,
278                            struct i915_vm_pt_stash *stash)
279 {
280         struct i915_page_table *pt;
281         int n;
282
283         for (n = 0; n < ARRAY_SIZE(stash->pt); n++) {
284                 while ((pt = stash->pt[n])) {
285                         stash->pt[n] = pt->stash;
286                         free_px(vm, pt, n);
287                 }
288         }
289 }
290
291 int ppgtt_set_pages(struct i915_vma *vma)
292 {
293         GEM_BUG_ON(vma->pages);
294
295         vma->pages = vma->obj->mm.pages;
296         vma->page_sizes = vma->obj->mm.page_sizes;
297
298         return 0;
299 }
300
301 void ppgtt_init(struct i915_ppgtt *ppgtt, struct intel_gt *gt)
302 {
303         struct drm_i915_private *i915 = gt->i915;
304
305         ppgtt->vm.gt = gt;
306         ppgtt->vm.i915 = i915;
307         ppgtt->vm.dma = i915->drm.dev;
308         ppgtt->vm.total = BIT_ULL(INTEL_INFO(i915)->ppgtt_size);
309
310         dma_resv_init(&ppgtt->vm._resv);
311         i915_address_space_init(&ppgtt->vm, VM_CLASS_PPGTT);
312
313         ppgtt->vm.vma_ops.bind_vma    = ppgtt_bind_vma;
314         ppgtt->vm.vma_ops.unbind_vma  = ppgtt_unbind_vma;
315         ppgtt->vm.vma_ops.set_pages   = ppgtt_set_pages;
316         ppgtt->vm.vma_ops.clear_pages = clear_pages;
317 }