Merge tag 'amd-drm-next-5.8-2020-05-12' of git://people.freedesktop.org/~agd5f/linux...
[linux-2.6-microblaze.git] / drivers / tee / tee_shm.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2015-2016, Linaro Limited
4  */
5 #include <linux/device.h>
6 #include <linux/dma-buf.h>
7 #include <linux/fdtable.h>
8 #include <linux/idr.h>
9 #include <linux/sched.h>
10 #include <linux/slab.h>
11 #include <linux/tee_drv.h>
12 #include "tee_private.h"
13
14 static void tee_shm_release(struct tee_shm *shm)
15 {
16         struct tee_device *teedev = shm->ctx->teedev;
17
18         if (shm->flags & TEE_SHM_DMA_BUF) {
19                 mutex_lock(&teedev->mutex);
20                 idr_remove(&teedev->idr, shm->id);
21                 mutex_unlock(&teedev->mutex);
22         }
23
24         if (shm->flags & TEE_SHM_POOL) {
25                 struct tee_shm_pool_mgr *poolm;
26
27                 if (shm->flags & TEE_SHM_DMA_BUF)
28                         poolm = teedev->pool->dma_buf_mgr;
29                 else
30                         poolm = teedev->pool->private_mgr;
31
32                 poolm->ops->free(poolm, shm);
33         } else if (shm->flags & TEE_SHM_REGISTER) {
34                 size_t n;
35                 int rc = teedev->desc->ops->shm_unregister(shm->ctx, shm);
36
37                 if (rc)
38                         dev_err(teedev->dev.parent,
39                                 "unregister shm %p failed: %d", shm, rc);
40
41                 for (n = 0; n < shm->num_pages; n++)
42                         put_page(shm->pages[n]);
43
44                 kfree(shm->pages);
45         }
46
47         teedev_ctx_put(shm->ctx);
48
49         kfree(shm);
50
51         tee_device_put(teedev);
52 }
53
54 static struct sg_table *tee_shm_op_map_dma_buf(struct dma_buf_attachment
55                         *attach, enum dma_data_direction dir)
56 {
57         return NULL;
58 }
59
60 static void tee_shm_op_unmap_dma_buf(struct dma_buf_attachment *attach,
61                                      struct sg_table *table,
62                                      enum dma_data_direction dir)
63 {
64 }
65
66 static void tee_shm_op_release(struct dma_buf *dmabuf)
67 {
68         struct tee_shm *shm = dmabuf->priv;
69
70         tee_shm_release(shm);
71 }
72
73 static int tee_shm_op_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
74 {
75         struct tee_shm *shm = dmabuf->priv;
76         size_t size = vma->vm_end - vma->vm_start;
77
78         /* Refuse sharing shared memory provided by application */
79         if (shm->flags & TEE_SHM_USER_MAPPED)
80                 return -EINVAL;
81
82         return remap_pfn_range(vma, vma->vm_start, shm->paddr >> PAGE_SHIFT,
83                                size, vma->vm_page_prot);
84 }
85
86 static const struct dma_buf_ops tee_shm_dma_buf_ops = {
87         .map_dma_buf = tee_shm_op_map_dma_buf,
88         .unmap_dma_buf = tee_shm_op_unmap_dma_buf,
89         .release = tee_shm_op_release,
90         .mmap = tee_shm_op_mmap,
91 };
92
93 struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags)
94 {
95         struct tee_device *teedev = ctx->teedev;
96         struct tee_shm_pool_mgr *poolm = NULL;
97         struct tee_shm *shm;
98         void *ret;
99         int rc;
100
101         if (!(flags & TEE_SHM_MAPPED)) {
102                 dev_err(teedev->dev.parent,
103                         "only mapped allocations supported\n");
104                 return ERR_PTR(-EINVAL);
105         }
106
107         if ((flags & ~(TEE_SHM_MAPPED | TEE_SHM_DMA_BUF))) {
108                 dev_err(teedev->dev.parent, "invalid shm flags 0x%x", flags);
109                 return ERR_PTR(-EINVAL);
110         }
111
112         if (!tee_device_get(teedev))
113                 return ERR_PTR(-EINVAL);
114
115         if (!teedev->pool) {
116                 /* teedev has been detached from driver */
117                 ret = ERR_PTR(-EINVAL);
118                 goto err_dev_put;
119         }
120
121         shm = kzalloc(sizeof(*shm), GFP_KERNEL);
122         if (!shm) {
123                 ret = ERR_PTR(-ENOMEM);
124                 goto err_dev_put;
125         }
126
127         shm->flags = flags | TEE_SHM_POOL;
128         shm->ctx = ctx;
129         if (flags & TEE_SHM_DMA_BUF)
130                 poolm = teedev->pool->dma_buf_mgr;
131         else
132                 poolm = teedev->pool->private_mgr;
133
134         rc = poolm->ops->alloc(poolm, shm, size);
135         if (rc) {
136                 ret = ERR_PTR(rc);
137                 goto err_kfree;
138         }
139
140
141         if (flags & TEE_SHM_DMA_BUF) {
142                 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
143
144                 mutex_lock(&teedev->mutex);
145                 shm->id = idr_alloc(&teedev->idr, shm, 1, 0, GFP_KERNEL);
146                 mutex_unlock(&teedev->mutex);
147                 if (shm->id < 0) {
148                         ret = ERR_PTR(shm->id);
149                         goto err_pool_free;
150                 }
151
152                 exp_info.ops = &tee_shm_dma_buf_ops;
153                 exp_info.size = shm->size;
154                 exp_info.flags = O_RDWR;
155                 exp_info.priv = shm;
156
157                 shm->dmabuf = dma_buf_export(&exp_info);
158                 if (IS_ERR(shm->dmabuf)) {
159                         ret = ERR_CAST(shm->dmabuf);
160                         goto err_rem;
161                 }
162         }
163
164         if (ctx)
165                 teedev_ctx_get(ctx);
166
167         return shm;
168 err_rem:
169         if (flags & TEE_SHM_DMA_BUF) {
170                 mutex_lock(&teedev->mutex);
171                 idr_remove(&teedev->idr, shm->id);
172                 mutex_unlock(&teedev->mutex);
173         }
174 err_pool_free:
175         poolm->ops->free(poolm, shm);
176 err_kfree:
177         kfree(shm);
178 err_dev_put:
179         tee_device_put(teedev);
180         return ret;
181 }
182 EXPORT_SYMBOL_GPL(tee_shm_alloc);
183
184 struct tee_shm *tee_shm_register(struct tee_context *ctx, unsigned long addr,
185                                  size_t length, u32 flags)
186 {
187         struct tee_device *teedev = ctx->teedev;
188         const u32 req_flags = TEE_SHM_DMA_BUF | TEE_SHM_USER_MAPPED;
189         struct tee_shm *shm;
190         void *ret;
191         int rc;
192         int num_pages;
193         unsigned long start;
194
195         if (flags != req_flags)
196                 return ERR_PTR(-ENOTSUPP);
197
198         if (!tee_device_get(teedev))
199                 return ERR_PTR(-EINVAL);
200
201         if (!teedev->desc->ops->shm_register ||
202             !teedev->desc->ops->shm_unregister) {
203                 tee_device_put(teedev);
204                 return ERR_PTR(-ENOTSUPP);
205         }
206
207         teedev_ctx_get(ctx);
208
209         shm = kzalloc(sizeof(*shm), GFP_KERNEL);
210         if (!shm) {
211                 ret = ERR_PTR(-ENOMEM);
212                 goto err;
213         }
214
215         shm->flags = flags | TEE_SHM_REGISTER;
216         shm->ctx = ctx;
217         shm->id = -1;
218         addr = untagged_addr(addr);
219         start = rounddown(addr, PAGE_SIZE);
220         shm->offset = addr - start;
221         shm->size = length;
222         num_pages = (roundup(addr + length, PAGE_SIZE) - start) / PAGE_SIZE;
223         shm->pages = kcalloc(num_pages, sizeof(*shm->pages), GFP_KERNEL);
224         if (!shm->pages) {
225                 ret = ERR_PTR(-ENOMEM);
226                 goto err;
227         }
228
229         rc = get_user_pages_fast(start, num_pages, FOLL_WRITE, shm->pages);
230         if (rc > 0)
231                 shm->num_pages = rc;
232         if (rc != num_pages) {
233                 if (rc >= 0)
234                         rc = -ENOMEM;
235                 ret = ERR_PTR(rc);
236                 goto err;
237         }
238
239         mutex_lock(&teedev->mutex);
240         shm->id = idr_alloc(&teedev->idr, shm, 1, 0, GFP_KERNEL);
241         mutex_unlock(&teedev->mutex);
242
243         if (shm->id < 0) {
244                 ret = ERR_PTR(shm->id);
245                 goto err;
246         }
247
248         rc = teedev->desc->ops->shm_register(ctx, shm, shm->pages,
249                                              shm->num_pages, start);
250         if (rc) {
251                 ret = ERR_PTR(rc);
252                 goto err;
253         }
254
255         if (flags & TEE_SHM_DMA_BUF) {
256                 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
257
258                 exp_info.ops = &tee_shm_dma_buf_ops;
259                 exp_info.size = shm->size;
260                 exp_info.flags = O_RDWR;
261                 exp_info.priv = shm;
262
263                 shm->dmabuf = dma_buf_export(&exp_info);
264                 if (IS_ERR(shm->dmabuf)) {
265                         ret = ERR_CAST(shm->dmabuf);
266                         teedev->desc->ops->shm_unregister(ctx, shm);
267                         goto err;
268                 }
269         }
270
271         return shm;
272 err:
273         if (shm) {
274                 size_t n;
275
276                 if (shm->id >= 0) {
277                         mutex_lock(&teedev->mutex);
278                         idr_remove(&teedev->idr, shm->id);
279                         mutex_unlock(&teedev->mutex);
280                 }
281                 if (shm->pages) {
282                         for (n = 0; n < shm->num_pages; n++)
283                                 put_page(shm->pages[n]);
284                         kfree(shm->pages);
285                 }
286         }
287         kfree(shm);
288         teedev_ctx_put(ctx);
289         tee_device_put(teedev);
290         return ret;
291 }
292 EXPORT_SYMBOL_GPL(tee_shm_register);
293
294 /**
295  * tee_shm_get_fd() - Increase reference count and return file descriptor
296  * @shm:        Shared memory handle
297  * @returns user space file descriptor to shared memory
298  */
299 int tee_shm_get_fd(struct tee_shm *shm)
300 {
301         int fd;
302
303         if (!(shm->flags & TEE_SHM_DMA_BUF))
304                 return -EINVAL;
305
306         get_dma_buf(shm->dmabuf);
307         fd = dma_buf_fd(shm->dmabuf, O_CLOEXEC);
308         if (fd < 0)
309                 dma_buf_put(shm->dmabuf);
310         return fd;
311 }
312
313 /**
314  * tee_shm_free() - Free shared memory
315  * @shm:        Handle to shared memory to free
316  */
317 void tee_shm_free(struct tee_shm *shm)
318 {
319         /*
320          * dma_buf_put() decreases the dmabuf reference counter and will
321          * call tee_shm_release() when the last reference is gone.
322          *
323          * In the case of driver private memory we call tee_shm_release
324          * directly instead as it doesn't have a reference counter.
325          */
326         if (shm->flags & TEE_SHM_DMA_BUF)
327                 dma_buf_put(shm->dmabuf);
328         else
329                 tee_shm_release(shm);
330 }
331 EXPORT_SYMBOL_GPL(tee_shm_free);
332
333 /**
334  * tee_shm_va2pa() - Get physical address of a virtual address
335  * @shm:        Shared memory handle
336  * @va:         Virtual address to tranlsate
337  * @pa:         Returned physical address
338  * @returns 0 on success and < 0 on failure
339  */
340 int tee_shm_va2pa(struct tee_shm *shm, void *va, phys_addr_t *pa)
341 {
342         if (!(shm->flags & TEE_SHM_MAPPED))
343                 return -EINVAL;
344         /* Check that we're in the range of the shm */
345         if ((char *)va < (char *)shm->kaddr)
346                 return -EINVAL;
347         if ((char *)va >= ((char *)shm->kaddr + shm->size))
348                 return -EINVAL;
349
350         return tee_shm_get_pa(
351                         shm, (unsigned long)va - (unsigned long)shm->kaddr, pa);
352 }
353 EXPORT_SYMBOL_GPL(tee_shm_va2pa);
354
355 /**
356  * tee_shm_pa2va() - Get virtual address of a physical address
357  * @shm:        Shared memory handle
358  * @pa:         Physical address to tranlsate
359  * @va:         Returned virtual address
360  * @returns 0 on success and < 0 on failure
361  */
362 int tee_shm_pa2va(struct tee_shm *shm, phys_addr_t pa, void **va)
363 {
364         if (!(shm->flags & TEE_SHM_MAPPED))
365                 return -EINVAL;
366         /* Check that we're in the range of the shm */
367         if (pa < shm->paddr)
368                 return -EINVAL;
369         if (pa >= (shm->paddr + shm->size))
370                 return -EINVAL;
371
372         if (va) {
373                 void *v = tee_shm_get_va(shm, pa - shm->paddr);
374
375                 if (IS_ERR(v))
376                         return PTR_ERR(v);
377                 *va = v;
378         }
379         return 0;
380 }
381 EXPORT_SYMBOL_GPL(tee_shm_pa2va);
382
383 /**
384  * tee_shm_get_va() - Get virtual address of a shared memory plus an offset
385  * @shm:        Shared memory handle
386  * @offs:       Offset from start of this shared memory
387  * @returns virtual address of the shared memory + offs if offs is within
388  *      the bounds of this shared memory, else an ERR_PTR
389  */
390 void *tee_shm_get_va(struct tee_shm *shm, size_t offs)
391 {
392         if (!(shm->flags & TEE_SHM_MAPPED))
393                 return ERR_PTR(-EINVAL);
394         if (offs >= shm->size)
395                 return ERR_PTR(-EINVAL);
396         return (char *)shm->kaddr + offs;
397 }
398 EXPORT_SYMBOL_GPL(tee_shm_get_va);
399
400 /**
401  * tee_shm_get_pa() - Get physical address of a shared memory plus an offset
402  * @shm:        Shared memory handle
403  * @offs:       Offset from start of this shared memory
404  * @pa:         Physical address to return
405  * @returns 0 if offs is within the bounds of this shared memory, else an
406  *      error code.
407  */
408 int tee_shm_get_pa(struct tee_shm *shm, size_t offs, phys_addr_t *pa)
409 {
410         if (offs >= shm->size)
411                 return -EINVAL;
412         if (pa)
413                 *pa = shm->paddr + offs;
414         return 0;
415 }
416 EXPORT_SYMBOL_GPL(tee_shm_get_pa);
417
418 /**
419  * tee_shm_get_from_id() - Find shared memory object and increase reference
420  * count
421  * @ctx:        Context owning the shared memory
422  * @id:         Id of shared memory object
423  * @returns a pointer to 'struct tee_shm' on success or an ERR_PTR on failure
424  */
425 struct tee_shm *tee_shm_get_from_id(struct tee_context *ctx, int id)
426 {
427         struct tee_device *teedev;
428         struct tee_shm *shm;
429
430         if (!ctx)
431                 return ERR_PTR(-EINVAL);
432
433         teedev = ctx->teedev;
434         mutex_lock(&teedev->mutex);
435         shm = idr_find(&teedev->idr, id);
436         if (!shm || shm->ctx != ctx)
437                 shm = ERR_PTR(-EINVAL);
438         else if (shm->flags & TEE_SHM_DMA_BUF)
439                 get_dma_buf(shm->dmabuf);
440         mutex_unlock(&teedev->mutex);
441         return shm;
442 }
443 EXPORT_SYMBOL_GPL(tee_shm_get_from_id);
444
445 /**
446  * tee_shm_put() - Decrease reference count on a shared memory handle
447  * @shm:        Shared memory handle
448  */
449 void tee_shm_put(struct tee_shm *shm)
450 {
451         if (shm->flags & TEE_SHM_DMA_BUF)
452                 dma_buf_put(shm->dmabuf);
453 }
454 EXPORT_SYMBOL_GPL(tee_shm_put);