Merge tag 'drm-next-5.6-2019-12-11' of git://people.freedesktop.org/~agd5f/linux...
[linux-2.6-microblaze.git] / drivers / dma-buf / udmabuf.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/cred.h>
3 #include <linux/device.h>
4 #include <linux/dma-buf.h>
5 #include <linux/highmem.h>
6 #include <linux/init.h>
7 #include <linux/kernel.h>
8 #include <linux/memfd.h>
9 #include <linux/miscdevice.h>
10 #include <linux/module.h>
11 #include <linux/shmem_fs.h>
12 #include <linux/slab.h>
13 #include <linux/udmabuf.h>
14
15 static const u32    list_limit = 1024;  /* udmabuf_create_list->count limit */
16 static const size_t size_limit_mb = 64; /* total dmabuf size, in megabytes  */
17
18 struct udmabuf {
19         pgoff_t pagecount;
20         struct page **pages;
21         struct sg_table *sg;
22         struct miscdevice *device;
23 };
24
25 static vm_fault_t udmabuf_vm_fault(struct vm_fault *vmf)
26 {
27         struct vm_area_struct *vma = vmf->vma;
28         struct udmabuf *ubuf = vma->vm_private_data;
29
30         vmf->page = ubuf->pages[vmf->pgoff];
31         get_page(vmf->page);
32         return 0;
33 }
34
35 static const struct vm_operations_struct udmabuf_vm_ops = {
36         .fault = udmabuf_vm_fault,
37 };
38
39 static int mmap_udmabuf(struct dma_buf *buf, struct vm_area_struct *vma)
40 {
41         struct udmabuf *ubuf = buf->priv;
42
43         if ((vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) == 0)
44                 return -EINVAL;
45
46         vma->vm_ops = &udmabuf_vm_ops;
47         vma->vm_private_data = ubuf;
48         return 0;
49 }
50
51 static struct sg_table *get_sg_table(struct device *dev, struct dma_buf *buf,
52                                      enum dma_data_direction direction)
53 {
54         struct udmabuf *ubuf = buf->priv;
55         struct sg_table *sg;
56         int ret;
57
58         sg = kzalloc(sizeof(*sg), GFP_KERNEL);
59         if (!sg)
60                 return ERR_PTR(-ENOMEM);
61         ret = sg_alloc_table_from_pages(sg, ubuf->pages, ubuf->pagecount,
62                                         0, ubuf->pagecount << PAGE_SHIFT,
63                                         GFP_KERNEL);
64         if (ret < 0)
65                 goto err;
66         if (!dma_map_sg(dev, sg->sgl, sg->nents, direction)) {
67                 ret = -EINVAL;
68                 goto err;
69         }
70         return sg;
71
72 err:
73         sg_free_table(sg);
74         kfree(sg);
75         return ERR_PTR(ret);
76 }
77
78 static void put_sg_table(struct device *dev, struct sg_table *sg,
79                          enum dma_data_direction direction)
80 {
81         dma_unmap_sg(dev, sg->sgl, sg->nents, direction);
82         sg_free_table(sg);
83         kfree(sg);
84 }
85
86 static struct sg_table *map_udmabuf(struct dma_buf_attachment *at,
87                                     enum dma_data_direction direction)
88 {
89         return get_sg_table(at->dev, at->dmabuf, direction);
90 }
91
92 static void unmap_udmabuf(struct dma_buf_attachment *at,
93                           struct sg_table *sg,
94                           enum dma_data_direction direction)
95 {
96         return put_sg_table(at->dev, sg, direction);
97 }
98
99 static void release_udmabuf(struct dma_buf *buf)
100 {
101         struct udmabuf *ubuf = buf->priv;
102         struct device *dev = ubuf->device->this_device;
103         pgoff_t pg;
104
105         if (ubuf->sg)
106                 put_sg_table(dev, ubuf->sg, DMA_BIDIRECTIONAL);
107
108         for (pg = 0; pg < ubuf->pagecount; pg++)
109                 put_page(ubuf->pages[pg]);
110         kfree(ubuf->pages);
111         kfree(ubuf);
112 }
113
114 static int begin_cpu_udmabuf(struct dma_buf *buf,
115                              enum dma_data_direction direction)
116 {
117         struct udmabuf *ubuf = buf->priv;
118         struct device *dev = ubuf->device->this_device;
119
120         if (!ubuf->sg) {
121                 ubuf->sg = get_sg_table(dev, buf, direction);
122                 if (IS_ERR(ubuf->sg))
123                         return PTR_ERR(ubuf->sg);
124         } else {
125                 dma_sync_sg_for_device(dev, ubuf->sg->sgl,
126                                        ubuf->sg->nents,
127                                        direction);
128         }
129
130         return 0;
131 }
132
133 static int end_cpu_udmabuf(struct dma_buf *buf,
134                            enum dma_data_direction direction)
135 {
136         struct udmabuf *ubuf = buf->priv;
137         struct device *dev = ubuf->device->this_device;
138
139         if (!ubuf->sg)
140                 return -EINVAL;
141
142         dma_sync_sg_for_cpu(dev, ubuf->sg->sgl, ubuf->sg->nents, direction);
143         return 0;
144 }
145
146 static const struct dma_buf_ops udmabuf_ops = {
147         .cache_sgt_mapping = true,
148         .map_dma_buf       = map_udmabuf,
149         .unmap_dma_buf     = unmap_udmabuf,
150         .release           = release_udmabuf,
151         .mmap              = mmap_udmabuf,
152         .begin_cpu_access  = begin_cpu_udmabuf,
153         .end_cpu_access    = end_cpu_udmabuf,
154 };
155
156 #define SEALS_WANTED (F_SEAL_SHRINK)
157 #define SEALS_DENIED (F_SEAL_WRITE)
158
159 static long udmabuf_create(struct miscdevice *device,
160                            struct udmabuf_create_list *head,
161                            struct udmabuf_create_item *list)
162 {
163         DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
164         struct file *memfd = NULL;
165         struct udmabuf *ubuf;
166         struct dma_buf *buf;
167         pgoff_t pgoff, pgcnt, pgidx, pgbuf = 0, pglimit;
168         struct page *page;
169         int seals, ret = -EINVAL;
170         u32 i, flags;
171
172         ubuf = kzalloc(sizeof(*ubuf), GFP_KERNEL);
173         if (!ubuf)
174                 return -ENOMEM;
175
176         pglimit = (size_limit_mb * 1024 * 1024) >> PAGE_SHIFT;
177         for (i = 0; i < head->count; i++) {
178                 if (!IS_ALIGNED(list[i].offset, PAGE_SIZE))
179                         goto err;
180                 if (!IS_ALIGNED(list[i].size, PAGE_SIZE))
181                         goto err;
182                 ubuf->pagecount += list[i].size >> PAGE_SHIFT;
183                 if (ubuf->pagecount > pglimit)
184                         goto err;
185         }
186         ubuf->pages = kmalloc_array(ubuf->pagecount, sizeof(*ubuf->pages),
187                                     GFP_KERNEL);
188         if (!ubuf->pages) {
189                 ret = -ENOMEM;
190                 goto err;
191         }
192
193         pgbuf = 0;
194         for (i = 0; i < head->count; i++) {
195                 ret = -EBADFD;
196                 memfd = fget(list[i].memfd);
197                 if (!memfd)
198                         goto err;
199                 if (!shmem_mapping(file_inode(memfd)->i_mapping))
200                         goto err;
201                 seals = memfd_fcntl(memfd, F_GET_SEALS, 0);
202                 if (seals == -EINVAL)
203                         goto err;
204                 ret = -EINVAL;
205                 if ((seals & SEALS_WANTED) != SEALS_WANTED ||
206                     (seals & SEALS_DENIED) != 0)
207                         goto err;
208                 pgoff = list[i].offset >> PAGE_SHIFT;
209                 pgcnt = list[i].size   >> PAGE_SHIFT;
210                 for (pgidx = 0; pgidx < pgcnt; pgidx++) {
211                         page = shmem_read_mapping_page(
212                                 file_inode(memfd)->i_mapping, pgoff + pgidx);
213                         if (IS_ERR(page)) {
214                                 ret = PTR_ERR(page);
215                                 goto err;
216                         }
217                         ubuf->pages[pgbuf++] = page;
218                 }
219                 fput(memfd);
220                 memfd = NULL;
221         }
222
223         exp_info.ops  = &udmabuf_ops;
224         exp_info.size = ubuf->pagecount << PAGE_SHIFT;
225         exp_info.priv = ubuf;
226         exp_info.flags = O_RDWR;
227
228         ubuf->device = device;
229         buf = dma_buf_export(&exp_info);
230         if (IS_ERR(buf)) {
231                 ret = PTR_ERR(buf);
232                 goto err;
233         }
234
235         flags = 0;
236         if (head->flags & UDMABUF_FLAGS_CLOEXEC)
237                 flags |= O_CLOEXEC;
238         return dma_buf_fd(buf, flags);
239
240 err:
241         while (pgbuf > 0)
242                 put_page(ubuf->pages[--pgbuf]);
243         if (memfd)
244                 fput(memfd);
245         kfree(ubuf->pages);
246         kfree(ubuf);
247         return ret;
248 }
249
250 static long udmabuf_ioctl_create(struct file *filp, unsigned long arg)
251 {
252         struct udmabuf_create create;
253         struct udmabuf_create_list head;
254         struct udmabuf_create_item list;
255
256         if (copy_from_user(&create, (void __user *)arg,
257                            sizeof(create)))
258                 return -EFAULT;
259
260         head.flags  = create.flags;
261         head.count  = 1;
262         list.memfd  = create.memfd;
263         list.offset = create.offset;
264         list.size   = create.size;
265
266         return udmabuf_create(filp->private_data, &head, &list);
267 }
268
269 static long udmabuf_ioctl_create_list(struct file *filp, unsigned long arg)
270 {
271         struct udmabuf_create_list head;
272         struct udmabuf_create_item *list;
273         int ret = -EINVAL;
274         u32 lsize;
275
276         if (copy_from_user(&head, (void __user *)arg, sizeof(head)))
277                 return -EFAULT;
278         if (head.count > list_limit)
279                 return -EINVAL;
280         lsize = sizeof(struct udmabuf_create_item) * head.count;
281         list = memdup_user((void __user *)(arg + sizeof(head)), lsize);
282         if (IS_ERR(list))
283                 return PTR_ERR(list);
284
285         ret = udmabuf_create(filp->private_data, &head, list);
286         kfree(list);
287         return ret;
288 }
289
290 static long udmabuf_ioctl(struct file *filp, unsigned int ioctl,
291                           unsigned long arg)
292 {
293         long ret;
294
295         switch (ioctl) {
296         case UDMABUF_CREATE:
297                 ret = udmabuf_ioctl_create(filp, arg);
298                 break;
299         case UDMABUF_CREATE_LIST:
300                 ret = udmabuf_ioctl_create_list(filp, arg);
301                 break;
302         default:
303                 ret = -ENOTTY;
304                 break;
305         }
306         return ret;
307 }
308
309 static const struct file_operations udmabuf_fops = {
310         .owner          = THIS_MODULE,
311         .unlocked_ioctl = udmabuf_ioctl,
312 };
313
314 static struct miscdevice udmabuf_misc = {
315         .minor          = MISC_DYNAMIC_MINOR,
316         .name           = "udmabuf",
317         .fops           = &udmabuf_fops,
318 };
319
320 static int __init udmabuf_dev_init(void)
321 {
322         return misc_register(&udmabuf_misc);
323 }
324
325 static void __exit udmabuf_dev_exit(void)
326 {
327         misc_deregister(&udmabuf_misc);
328 }
329
330 module_init(udmabuf_dev_init)
331 module_exit(udmabuf_dev_exit)
332
333 MODULE_AUTHOR("Gerd Hoffmann <kraxel@redhat.com>");
334 MODULE_LICENSE("GPL v2");