Merge tag 'socfpga_nand_fix_v4.17' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-microblaze.git] / drivers / gpu / drm / etnaviv / etnaviv_mmu.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2015-2018 Etnaviv Project
4  */
5
6 #include "common.xml.h"
7 #include "etnaviv_cmdbuf.h"
8 #include "etnaviv_drv.h"
9 #include "etnaviv_gem.h"
10 #include "etnaviv_gpu.h"
11 #include "etnaviv_iommu.h"
12 #include "etnaviv_mmu.h"
13
14 static void etnaviv_domain_unmap(struct etnaviv_iommu_domain *domain,
15                                  unsigned long iova, size_t size)
16 {
17         size_t unmapped_page, unmapped = 0;
18         size_t pgsize = SZ_4K;
19
20         if (!IS_ALIGNED(iova | size, pgsize)) {
21                 pr_err("unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%zx\n",
22                        iova, size, pgsize);
23                 return;
24         }
25
26         while (unmapped < size) {
27                 unmapped_page = domain->ops->unmap(domain, iova, pgsize);
28                 if (!unmapped_page)
29                         break;
30
31                 iova += unmapped_page;
32                 unmapped += unmapped_page;
33         }
34 }
35
36 static int etnaviv_domain_map(struct etnaviv_iommu_domain *domain,
37                               unsigned long iova, phys_addr_t paddr,
38                               size_t size, int prot)
39 {
40         unsigned long orig_iova = iova;
41         size_t pgsize = SZ_4K;
42         size_t orig_size = size;
43         int ret = 0;
44
45         if (!IS_ALIGNED(iova | paddr | size, pgsize)) {
46                 pr_err("unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 0x%zx\n",
47                        iova, &paddr, size, pgsize);
48                 return -EINVAL;
49         }
50
51         while (size) {
52                 ret = domain->ops->map(domain, iova, paddr, pgsize, prot);
53                 if (ret)
54                         break;
55
56                 iova += pgsize;
57                 paddr += pgsize;
58                 size -= pgsize;
59         }
60
61         /* unroll mapping in case something went wrong */
62         if (ret)
63                 etnaviv_domain_unmap(domain, orig_iova, orig_size - size);
64
65         return ret;
66 }
67
68 static int etnaviv_iommu_map(struct etnaviv_iommu *iommu, u32 iova,
69                              struct sg_table *sgt, unsigned len, int prot)
70 {
71         struct etnaviv_iommu_domain *domain = iommu->domain;
72         struct scatterlist *sg;
73         unsigned int da = iova;
74         unsigned int i, j;
75         int ret;
76
77         if (!domain || !sgt)
78                 return -EINVAL;
79
80         for_each_sg(sgt->sgl, sg, sgt->nents, i) {
81                 u32 pa = sg_dma_address(sg) - sg->offset;
82                 size_t bytes = sg_dma_len(sg) + sg->offset;
83
84                 VERB("map[%d]: %08x %08x(%zx)", i, iova, pa, bytes);
85
86                 ret = etnaviv_domain_map(domain, da, pa, bytes, prot);
87                 if (ret)
88                         goto fail;
89
90                 da += bytes;
91         }
92
93         return 0;
94
95 fail:
96         da = iova;
97
98         for_each_sg(sgt->sgl, sg, i, j) {
99                 size_t bytes = sg_dma_len(sg) + sg->offset;
100
101                 etnaviv_domain_unmap(domain, da, bytes);
102                 da += bytes;
103         }
104         return ret;
105 }
106
107 static void etnaviv_iommu_unmap(struct etnaviv_iommu *iommu, u32 iova,
108                                 struct sg_table *sgt, unsigned len)
109 {
110         struct etnaviv_iommu_domain *domain = iommu->domain;
111         struct scatterlist *sg;
112         unsigned int da = iova;
113         int i;
114
115         for_each_sg(sgt->sgl, sg, sgt->nents, i) {
116                 size_t bytes = sg_dma_len(sg) + sg->offset;
117
118                 etnaviv_domain_unmap(domain, da, bytes);
119
120                 VERB("unmap[%d]: %08x(%zx)", i, iova, bytes);
121
122                 BUG_ON(!PAGE_ALIGNED(bytes));
123
124                 da += bytes;
125         }
126 }
127
128 static void etnaviv_iommu_remove_mapping(struct etnaviv_iommu *mmu,
129         struct etnaviv_vram_mapping *mapping)
130 {
131         struct etnaviv_gem_object *etnaviv_obj = mapping->object;
132
133         etnaviv_iommu_unmap(mmu, mapping->vram_node.start,
134                             etnaviv_obj->sgt, etnaviv_obj->base.size);
135         drm_mm_remove_node(&mapping->vram_node);
136 }
137
138 static int etnaviv_iommu_find_iova(struct etnaviv_iommu *mmu,
139                                    struct drm_mm_node *node, size_t size)
140 {
141         struct etnaviv_vram_mapping *free = NULL;
142         enum drm_mm_insert_mode mode = DRM_MM_INSERT_LOW;
143         int ret;
144
145         lockdep_assert_held(&mmu->lock);
146
147         while (1) {
148                 struct etnaviv_vram_mapping *m, *n;
149                 struct drm_mm_scan scan;
150                 struct list_head list;
151                 bool found;
152
153                 ret = drm_mm_insert_node_in_range(&mmu->mm, node,
154                                                   size, 0, 0, 0, U64_MAX, mode);
155                 if (ret != -ENOSPC)
156                         break;
157
158                 /* Try to retire some entries */
159                 drm_mm_scan_init(&scan, &mmu->mm, size, 0, 0, mode);
160
161                 found = 0;
162                 INIT_LIST_HEAD(&list);
163                 list_for_each_entry(free, &mmu->mappings, mmu_node) {
164                         /* If this vram node has not been used, skip this. */
165                         if (!free->vram_node.mm)
166                                 continue;
167
168                         /*
169                          * If the iova is pinned, then it's in-use,
170                          * so we must keep its mapping.
171                          */
172                         if (free->use)
173                                 continue;
174
175                         list_add(&free->scan_node, &list);
176                         if (drm_mm_scan_add_block(&scan, &free->vram_node)) {
177                                 found = true;
178                                 break;
179                         }
180                 }
181
182                 if (!found) {
183                         /* Nothing found, clean up and fail */
184                         list_for_each_entry_safe(m, n, &list, scan_node)
185                                 BUG_ON(drm_mm_scan_remove_block(&scan, &m->vram_node));
186                         break;
187                 }
188
189                 /*
190                  * drm_mm does not allow any other operations while
191                  * scanning, so we have to remove all blocks first.
192                  * If drm_mm_scan_remove_block() returns false, we
193                  * can leave the block pinned.
194                  */
195                 list_for_each_entry_safe(m, n, &list, scan_node)
196                         if (!drm_mm_scan_remove_block(&scan, &m->vram_node))
197                                 list_del_init(&m->scan_node);
198
199                 /*
200                  * Unmap the blocks which need to be reaped from the MMU.
201                  * Clear the mmu pointer to prevent the mapping_get finding
202                  * this mapping.
203                  */
204                 list_for_each_entry_safe(m, n, &list, scan_node) {
205                         etnaviv_iommu_remove_mapping(mmu, m);
206                         m->mmu = NULL;
207                         list_del_init(&m->mmu_node);
208                         list_del_init(&m->scan_node);
209                 }
210
211                 mode = DRM_MM_INSERT_EVICT;
212
213                 /*
214                  * We removed enough mappings so that the new allocation will
215                  * succeed, retry the allocation one more time.
216                  */
217         }
218
219         return ret;
220 }
221
222 int etnaviv_iommu_map_gem(struct etnaviv_iommu *mmu,
223         struct etnaviv_gem_object *etnaviv_obj, u32 memory_base,
224         struct etnaviv_vram_mapping *mapping)
225 {
226         struct sg_table *sgt = etnaviv_obj->sgt;
227         struct drm_mm_node *node;
228         int ret;
229
230         lockdep_assert_held(&etnaviv_obj->lock);
231
232         mutex_lock(&mmu->lock);
233
234         /* v1 MMU can optimize single entry (contiguous) scatterlists */
235         if (mmu->version == ETNAVIV_IOMMU_V1 &&
236             sgt->nents == 1 && !(etnaviv_obj->flags & ETNA_BO_FORCE_MMU)) {
237                 u32 iova;
238
239                 iova = sg_dma_address(sgt->sgl) - memory_base;
240                 if (iova < 0x80000000 - sg_dma_len(sgt->sgl)) {
241                         mapping->iova = iova;
242                         list_add_tail(&mapping->mmu_node, &mmu->mappings);
243                         ret = 0;
244                         goto unlock;
245                 }
246         }
247
248         node = &mapping->vram_node;
249
250         ret = etnaviv_iommu_find_iova(mmu, node, etnaviv_obj->base.size);
251         if (ret < 0)
252                 goto unlock;
253
254         mapping->iova = node->start;
255         ret = etnaviv_iommu_map(mmu, node->start, sgt, etnaviv_obj->base.size,
256                                 ETNAVIV_PROT_READ | ETNAVIV_PROT_WRITE);
257
258         if (ret < 0) {
259                 drm_mm_remove_node(node);
260                 goto unlock;
261         }
262
263         list_add_tail(&mapping->mmu_node, &mmu->mappings);
264         mmu->need_flush = true;
265 unlock:
266         mutex_unlock(&mmu->lock);
267
268         return ret;
269 }
270
271 void etnaviv_iommu_unmap_gem(struct etnaviv_iommu *mmu,
272         struct etnaviv_vram_mapping *mapping)
273 {
274         WARN_ON(mapping->use);
275
276         mutex_lock(&mmu->lock);
277
278         /* If the vram node is on the mm, unmap and remove the node */
279         if (mapping->vram_node.mm == &mmu->mm)
280                 etnaviv_iommu_remove_mapping(mmu, mapping);
281
282         list_del(&mapping->mmu_node);
283         mmu->need_flush = true;
284         mutex_unlock(&mmu->lock);
285 }
286
287 void etnaviv_iommu_destroy(struct etnaviv_iommu *mmu)
288 {
289         drm_mm_takedown(&mmu->mm);
290         mmu->domain->ops->free(mmu->domain);
291         kfree(mmu);
292 }
293
294 struct etnaviv_iommu *etnaviv_iommu_new(struct etnaviv_gpu *gpu)
295 {
296         enum etnaviv_iommu_version version;
297         struct etnaviv_iommu *mmu;
298
299         mmu = kzalloc(sizeof(*mmu), GFP_KERNEL);
300         if (!mmu)
301                 return ERR_PTR(-ENOMEM);
302
303         if (!(gpu->identity.minor_features1 & chipMinorFeatures1_MMU_VERSION)) {
304                 mmu->domain = etnaviv_iommuv1_domain_alloc(gpu);
305                 version = ETNAVIV_IOMMU_V1;
306         } else {
307                 mmu->domain = etnaviv_iommuv2_domain_alloc(gpu);
308                 version = ETNAVIV_IOMMU_V2;
309         }
310
311         if (!mmu->domain) {
312                 dev_err(gpu->dev, "Failed to allocate GPU IOMMU domain\n");
313                 kfree(mmu);
314                 return ERR_PTR(-ENOMEM);
315         }
316
317         mmu->gpu = gpu;
318         mmu->version = version;
319         mutex_init(&mmu->lock);
320         INIT_LIST_HEAD(&mmu->mappings);
321
322         drm_mm_init(&mmu->mm, mmu->domain->base, mmu->domain->size);
323
324         return mmu;
325 }
326
327 void etnaviv_iommu_restore(struct etnaviv_gpu *gpu)
328 {
329         if (gpu->mmu->version == ETNAVIV_IOMMU_V1)
330                 etnaviv_iommuv1_restore(gpu);
331         else
332                 etnaviv_iommuv2_restore(gpu);
333 }
334
335 int etnaviv_iommu_get_suballoc_va(struct etnaviv_gpu *gpu, dma_addr_t paddr,
336                                   struct drm_mm_node *vram_node, size_t size,
337                                   u32 *iova)
338 {
339         struct etnaviv_iommu *mmu = gpu->mmu;
340
341         if (mmu->version == ETNAVIV_IOMMU_V1) {
342                 *iova = paddr - gpu->memory_base;
343                 return 0;
344         } else {
345                 int ret;
346
347                 mutex_lock(&mmu->lock);
348                 ret = etnaviv_iommu_find_iova(mmu, vram_node, size);
349                 if (ret < 0) {
350                         mutex_unlock(&mmu->lock);
351                         return ret;
352                 }
353                 ret = etnaviv_domain_map(mmu->domain, vram_node->start, paddr,
354                                          size, ETNAVIV_PROT_READ);
355                 if (ret < 0) {
356                         drm_mm_remove_node(vram_node);
357                         mutex_unlock(&mmu->lock);
358                         return ret;
359                 }
360                 gpu->mmu->need_flush = true;
361                 mutex_unlock(&mmu->lock);
362
363                 *iova = (u32)vram_node->start;
364                 return 0;
365         }
366 }
367
368 void etnaviv_iommu_put_suballoc_va(struct etnaviv_gpu *gpu,
369                                    struct drm_mm_node *vram_node, size_t size,
370                                    u32 iova)
371 {
372         struct etnaviv_iommu *mmu = gpu->mmu;
373
374         if (mmu->version == ETNAVIV_IOMMU_V2) {
375                 mutex_lock(&mmu->lock);
376                 etnaviv_domain_unmap(mmu->domain, iova, size);
377                 drm_mm_remove_node(vram_node);
378                 mutex_unlock(&mmu->lock);
379         }
380 }
381 size_t etnaviv_iommu_dump_size(struct etnaviv_iommu *iommu)
382 {
383         return iommu->domain->ops->dump_size(iommu->domain);
384 }
385
386 void etnaviv_iommu_dump(struct etnaviv_iommu *iommu, void *buf)
387 {
388         iommu->domain->ops->dump(iommu->domain, buf);
389 }