Merge tag 'fixes-v5.11' of git://git.kernel.org/pub/scm/linux/kernel/git/brauner...
[linux-2.6-microblaze.git] / drivers / dma-buf / udmabuf.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/cred.h>
3 #include <linux/device.h>
4 #include <linux/dma-buf.h>
5 #include <linux/highmem.h>
6 #include <linux/init.h>
7 #include <linux/kernel.h>
8 #include <linux/memfd.h>
9 #include <linux/miscdevice.h>
10 #include <linux/module.h>
11 #include <linux/shmem_fs.h>
12 #include <linux/slab.h>
13 #include <linux/udmabuf.h>
14
15 static const u32    list_limit = 1024;  /* udmabuf_create_list->count limit */
16 static const size_t size_limit_mb = 64; /* total dmabuf size, in megabytes  */
17
18 struct udmabuf {
19         pgoff_t pagecount;
20         struct page **pages;
21         struct sg_table *sg;
22         struct miscdevice *device;
23 };
24
25 static vm_fault_t udmabuf_vm_fault(struct vm_fault *vmf)
26 {
27         struct vm_area_struct *vma = vmf->vma;
28         struct udmabuf *ubuf = vma->vm_private_data;
29
30         vmf->page = ubuf->pages[vmf->pgoff];
31         get_page(vmf->page);
32         return 0;
33 }
34
35 static const struct vm_operations_struct udmabuf_vm_ops = {
36         .fault = udmabuf_vm_fault,
37 };
38
39 static int mmap_udmabuf(struct dma_buf *buf, struct vm_area_struct *vma)
40 {
41         struct udmabuf *ubuf = buf->priv;
42
43         if ((vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) == 0)
44                 return -EINVAL;
45
46         vma->vm_ops = &udmabuf_vm_ops;
47         vma->vm_private_data = ubuf;
48         return 0;
49 }
50
51 static struct sg_table *get_sg_table(struct device *dev, struct dma_buf *buf,
52                                      enum dma_data_direction direction)
53 {
54         struct udmabuf *ubuf = buf->priv;
55         struct sg_table *sg;
56         int ret;
57
58         sg = kzalloc(sizeof(*sg), GFP_KERNEL);
59         if (!sg)
60                 return ERR_PTR(-ENOMEM);
61         ret = sg_alloc_table_from_pages(sg, ubuf->pages, ubuf->pagecount,
62                                         0, ubuf->pagecount << PAGE_SHIFT,
63                                         GFP_KERNEL);
64         if (ret < 0)
65                 goto err;
66         ret = dma_map_sgtable(dev, sg, direction, 0);
67         if (ret < 0)
68                 goto err;
69         return sg;
70
71 err:
72         sg_free_table(sg);
73         kfree(sg);
74         return ERR_PTR(ret);
75 }
76
77 static void put_sg_table(struct device *dev, struct sg_table *sg,
78                          enum dma_data_direction direction)
79 {
80         dma_unmap_sgtable(dev, sg, direction, 0);
81         sg_free_table(sg);
82         kfree(sg);
83 }
84
85 static struct sg_table *map_udmabuf(struct dma_buf_attachment *at,
86                                     enum dma_data_direction direction)
87 {
88         return get_sg_table(at->dev, at->dmabuf, direction);
89 }
90
91 static void unmap_udmabuf(struct dma_buf_attachment *at,
92                           struct sg_table *sg,
93                           enum dma_data_direction direction)
94 {
95         return put_sg_table(at->dev, sg, direction);
96 }
97
98 static void release_udmabuf(struct dma_buf *buf)
99 {
100         struct udmabuf *ubuf = buf->priv;
101         struct device *dev = ubuf->device->this_device;
102         pgoff_t pg;
103
104         if (ubuf->sg)
105                 put_sg_table(dev, ubuf->sg, DMA_BIDIRECTIONAL);
106
107         for (pg = 0; pg < ubuf->pagecount; pg++)
108                 put_page(ubuf->pages[pg]);
109         kfree(ubuf->pages);
110         kfree(ubuf);
111 }
112
113 static int begin_cpu_udmabuf(struct dma_buf *buf,
114                              enum dma_data_direction direction)
115 {
116         struct udmabuf *ubuf = buf->priv;
117         struct device *dev = ubuf->device->this_device;
118
119         if (!ubuf->sg) {
120                 ubuf->sg = get_sg_table(dev, buf, direction);
121                 if (IS_ERR(ubuf->sg))
122                         return PTR_ERR(ubuf->sg);
123         } else {
124                 dma_sync_sg_for_cpu(dev, ubuf->sg->sgl, ubuf->sg->nents,
125                                     direction);
126         }
127
128         return 0;
129 }
130
131 static int end_cpu_udmabuf(struct dma_buf *buf,
132                            enum dma_data_direction direction)
133 {
134         struct udmabuf *ubuf = buf->priv;
135         struct device *dev = ubuf->device->this_device;
136
137         if (!ubuf->sg)
138                 return -EINVAL;
139
140         dma_sync_sg_for_device(dev, ubuf->sg->sgl, ubuf->sg->nents, direction);
141         return 0;
142 }
143
144 static const struct dma_buf_ops udmabuf_ops = {
145         .cache_sgt_mapping = true,
146         .map_dma_buf       = map_udmabuf,
147         .unmap_dma_buf     = unmap_udmabuf,
148         .release           = release_udmabuf,
149         .mmap              = mmap_udmabuf,
150         .begin_cpu_access  = begin_cpu_udmabuf,
151         .end_cpu_access    = end_cpu_udmabuf,
152 };
153
154 #define SEALS_WANTED (F_SEAL_SHRINK)
155 #define SEALS_DENIED (F_SEAL_WRITE)
156
157 static long udmabuf_create(struct miscdevice *device,
158                            struct udmabuf_create_list *head,
159                            struct udmabuf_create_item *list)
160 {
161         DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
162         struct file *memfd = NULL;
163         struct udmabuf *ubuf;
164         struct dma_buf *buf;
165         pgoff_t pgoff, pgcnt, pgidx, pgbuf = 0, pglimit;
166         struct page *page;
167         int seals, ret = -EINVAL;
168         u32 i, flags;
169
170         ubuf = kzalloc(sizeof(*ubuf), GFP_KERNEL);
171         if (!ubuf)
172                 return -ENOMEM;
173
174         pglimit = (size_limit_mb * 1024 * 1024) >> PAGE_SHIFT;
175         for (i = 0; i < head->count; i++) {
176                 if (!IS_ALIGNED(list[i].offset, PAGE_SIZE))
177                         goto err;
178                 if (!IS_ALIGNED(list[i].size, PAGE_SIZE))
179                         goto err;
180                 ubuf->pagecount += list[i].size >> PAGE_SHIFT;
181                 if (ubuf->pagecount > pglimit)
182                         goto err;
183         }
184         ubuf->pages = kmalloc_array(ubuf->pagecount, sizeof(*ubuf->pages),
185                                     GFP_KERNEL);
186         if (!ubuf->pages) {
187                 ret = -ENOMEM;
188                 goto err;
189         }
190
191         pgbuf = 0;
192         for (i = 0; i < head->count; i++) {
193                 ret = -EBADFD;
194                 memfd = fget(list[i].memfd);
195                 if (!memfd)
196                         goto err;
197                 if (!shmem_mapping(file_inode(memfd)->i_mapping))
198                         goto err;
199                 seals = memfd_fcntl(memfd, F_GET_SEALS, 0);
200                 if (seals == -EINVAL)
201                         goto err;
202                 ret = -EINVAL;
203                 if ((seals & SEALS_WANTED) != SEALS_WANTED ||
204                     (seals & SEALS_DENIED) != 0)
205                         goto err;
206                 pgoff = list[i].offset >> PAGE_SHIFT;
207                 pgcnt = list[i].size   >> PAGE_SHIFT;
208                 for (pgidx = 0; pgidx < pgcnt; pgidx++) {
209                         page = shmem_read_mapping_page(
210                                 file_inode(memfd)->i_mapping, pgoff + pgidx);
211                         if (IS_ERR(page)) {
212                                 ret = PTR_ERR(page);
213                                 goto err;
214                         }
215                         ubuf->pages[pgbuf++] = page;
216                 }
217                 fput(memfd);
218                 memfd = NULL;
219         }
220
221         exp_info.ops  = &udmabuf_ops;
222         exp_info.size = ubuf->pagecount << PAGE_SHIFT;
223         exp_info.priv = ubuf;
224         exp_info.flags = O_RDWR;
225
226         ubuf->device = device;
227         buf = dma_buf_export(&exp_info);
228         if (IS_ERR(buf)) {
229                 ret = PTR_ERR(buf);
230                 goto err;
231         }
232
233         flags = 0;
234         if (head->flags & UDMABUF_FLAGS_CLOEXEC)
235                 flags |= O_CLOEXEC;
236         return dma_buf_fd(buf, flags);
237
238 err:
239         while (pgbuf > 0)
240                 put_page(ubuf->pages[--pgbuf]);
241         if (memfd)
242                 fput(memfd);
243         kfree(ubuf->pages);
244         kfree(ubuf);
245         return ret;
246 }
247
248 static long udmabuf_ioctl_create(struct file *filp, unsigned long arg)
249 {
250         struct udmabuf_create create;
251         struct udmabuf_create_list head;
252         struct udmabuf_create_item list;
253
254         if (copy_from_user(&create, (void __user *)arg,
255                            sizeof(create)))
256                 return -EFAULT;
257
258         head.flags  = create.flags;
259         head.count  = 1;
260         list.memfd  = create.memfd;
261         list.offset = create.offset;
262         list.size   = create.size;
263
264         return udmabuf_create(filp->private_data, &head, &list);
265 }
266
267 static long udmabuf_ioctl_create_list(struct file *filp, unsigned long arg)
268 {
269         struct udmabuf_create_list head;
270         struct udmabuf_create_item *list;
271         int ret = -EINVAL;
272         u32 lsize;
273
274         if (copy_from_user(&head, (void __user *)arg, sizeof(head)))
275                 return -EFAULT;
276         if (head.count > list_limit)
277                 return -EINVAL;
278         lsize = sizeof(struct udmabuf_create_item) * head.count;
279         list = memdup_user((void __user *)(arg + sizeof(head)), lsize);
280         if (IS_ERR(list))
281                 return PTR_ERR(list);
282
283         ret = udmabuf_create(filp->private_data, &head, list);
284         kfree(list);
285         return ret;
286 }
287
288 static long udmabuf_ioctl(struct file *filp, unsigned int ioctl,
289                           unsigned long arg)
290 {
291         long ret;
292
293         switch (ioctl) {
294         case UDMABUF_CREATE:
295                 ret = udmabuf_ioctl_create(filp, arg);
296                 break;
297         case UDMABUF_CREATE_LIST:
298                 ret = udmabuf_ioctl_create_list(filp, arg);
299                 break;
300         default:
301                 ret = -ENOTTY;
302                 break;
303         }
304         return ret;
305 }
306
307 static const struct file_operations udmabuf_fops = {
308         .owner          = THIS_MODULE,
309         .unlocked_ioctl = udmabuf_ioctl,
310 #ifdef CONFIG_COMPAT
311         .compat_ioctl   = udmabuf_ioctl,
312 #endif
313 };
314
315 static struct miscdevice udmabuf_misc = {
316         .minor          = MISC_DYNAMIC_MINOR,
317         .name           = "udmabuf",
318         .fops           = &udmabuf_fops,
319 };
320
321 static int __init udmabuf_dev_init(void)
322 {
323         return misc_register(&udmabuf_misc);
324 }
325
326 static void __exit udmabuf_dev_exit(void)
327 {
328         misc_deregister(&udmabuf_misc);
329 }
330
331 module_init(udmabuf_dev_init)
332 module_exit(udmabuf_dev_exit)
333
334 MODULE_AUTHOR("Gerd Hoffmann <kraxel@redhat.com>");
335 MODULE_LICENSE("GPL v2");