Merge tag 'leds-5.7-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/pavel/linux...
[linux-2.6-microblaze.git] / drivers / gpu / drm / vkms / vkms_gem.c
1 // SPDX-License-Identifier: GPL-2.0+
2
3 #include <linux/dma-buf.h>
4 #include <linux/shmem_fs.h>
5 #include <linux/vmalloc.h>
6 #include <drm/drm_prime.h>
7
8 #include "vkms_drv.h"
9
10 static struct vkms_gem_object *__vkms_gem_create(struct drm_device *dev,
11                                                  u64 size)
12 {
13         struct vkms_gem_object *obj;
14         int ret;
15
16         obj = kzalloc(sizeof(*obj), GFP_KERNEL);
17         if (!obj)
18                 return ERR_PTR(-ENOMEM);
19
20         size = roundup(size, PAGE_SIZE);
21         ret = drm_gem_object_init(dev, &obj->gem, size);
22         if (ret) {
23                 kfree(obj);
24                 return ERR_PTR(ret);
25         }
26
27         mutex_init(&obj->pages_lock);
28
29         return obj;
30 }
31
32 void vkms_gem_free_object(struct drm_gem_object *obj)
33 {
34         struct vkms_gem_object *gem = container_of(obj, struct vkms_gem_object,
35                                                    gem);
36
37         WARN_ON(gem->pages);
38         WARN_ON(gem->vaddr);
39
40         mutex_destroy(&gem->pages_lock);
41         drm_gem_object_release(obj);
42         kfree(gem);
43 }
44
45 vm_fault_t vkms_gem_fault(struct vm_fault *vmf)
46 {
47         struct vm_area_struct *vma = vmf->vma;
48         struct vkms_gem_object *obj = vma->vm_private_data;
49         unsigned long vaddr = vmf->address;
50         pgoff_t page_offset;
51         loff_t num_pages;
52         vm_fault_t ret = VM_FAULT_SIGBUS;
53
54         page_offset = (vaddr - vma->vm_start) >> PAGE_SHIFT;
55         num_pages = DIV_ROUND_UP(obj->gem.size, PAGE_SIZE);
56
57         if (page_offset > num_pages)
58                 return VM_FAULT_SIGBUS;
59
60         mutex_lock(&obj->pages_lock);
61         if (obj->pages) {
62                 get_page(obj->pages[page_offset]);
63                 vmf->page = obj->pages[page_offset];
64                 ret = 0;
65         }
66         mutex_unlock(&obj->pages_lock);
67         if (ret) {
68                 struct page *page;
69                 struct address_space *mapping;
70
71                 mapping = file_inode(obj->gem.filp)->i_mapping;
72                 page = shmem_read_mapping_page(mapping, page_offset);
73
74                 if (!IS_ERR(page)) {
75                         vmf->page = page;
76                         ret = 0;
77                 } else {
78                         switch (PTR_ERR(page)) {
79                         case -ENOSPC:
80                         case -ENOMEM:
81                                 ret = VM_FAULT_OOM;
82                                 break;
83                         case -EBUSY:
84                                 ret = VM_FAULT_RETRY;
85                                 break;
86                         case -EFAULT:
87                         case -EINVAL:
88                                 ret = VM_FAULT_SIGBUS;
89                                 break;
90                         default:
91                                 WARN_ON(PTR_ERR(page));
92                                 ret = VM_FAULT_SIGBUS;
93                                 break;
94                         }
95                 }
96         }
97         return ret;
98 }
99
100 struct drm_gem_object *vkms_gem_create(struct drm_device *dev,
101                                        struct drm_file *file,
102                                        u32 *handle,
103                                        u64 size)
104 {
105         struct vkms_gem_object *obj;
106         int ret;
107
108         if (!file || !dev || !handle)
109                 return ERR_PTR(-EINVAL);
110
111         obj = __vkms_gem_create(dev, size);
112         if (IS_ERR(obj))
113                 return ERR_CAST(obj);
114
115         ret = drm_gem_handle_create(file, &obj->gem, handle);
116         drm_gem_object_put_unlocked(&obj->gem);
117         if (ret)
118                 return ERR_PTR(ret);
119
120         return &obj->gem;
121 }
122
123 int vkms_dumb_create(struct drm_file *file, struct drm_device *dev,
124                      struct drm_mode_create_dumb *args)
125 {
126         struct drm_gem_object *gem_obj;
127         u64 pitch, size;
128
129         if (!args || !dev || !file)
130                 return -EINVAL;
131
132         pitch = args->width * DIV_ROUND_UP(args->bpp, 8);
133         size = pitch * args->height;
134
135         if (!size)
136                 return -EINVAL;
137
138         gem_obj = vkms_gem_create(dev, file, &args->handle, size);
139         if (IS_ERR(gem_obj))
140                 return PTR_ERR(gem_obj);
141
142         args->size = gem_obj->size;
143         args->pitch = pitch;
144
145         DRM_DEBUG_DRIVER("Created object of size %lld\n", size);
146
147         return 0;
148 }
149
150 static struct page **_get_pages(struct vkms_gem_object *vkms_obj)
151 {
152         struct drm_gem_object *gem_obj = &vkms_obj->gem;
153
154         if (!vkms_obj->pages) {
155                 struct page **pages = drm_gem_get_pages(gem_obj);
156
157                 if (IS_ERR(pages))
158                         return pages;
159
160                 if (cmpxchg(&vkms_obj->pages, NULL, pages))
161                         drm_gem_put_pages(gem_obj, pages, false, true);
162         }
163
164         return vkms_obj->pages;
165 }
166
167 void vkms_gem_vunmap(struct drm_gem_object *obj)
168 {
169         struct vkms_gem_object *vkms_obj = drm_gem_to_vkms_gem(obj);
170
171         mutex_lock(&vkms_obj->pages_lock);
172         if (vkms_obj->vmap_count < 1) {
173                 WARN_ON(vkms_obj->vaddr);
174                 WARN_ON(vkms_obj->pages);
175                 mutex_unlock(&vkms_obj->pages_lock);
176                 return;
177         }
178
179         vkms_obj->vmap_count--;
180
181         if (vkms_obj->vmap_count == 0) {
182                 vunmap(vkms_obj->vaddr);
183                 vkms_obj->vaddr = NULL;
184                 drm_gem_put_pages(obj, vkms_obj->pages, false, true);
185                 vkms_obj->pages = NULL;
186         }
187
188         mutex_unlock(&vkms_obj->pages_lock);
189 }
190
191 int vkms_gem_vmap(struct drm_gem_object *obj)
192 {
193         struct vkms_gem_object *vkms_obj = drm_gem_to_vkms_gem(obj);
194         int ret = 0;
195
196         mutex_lock(&vkms_obj->pages_lock);
197
198         if (!vkms_obj->vaddr) {
199                 unsigned int n_pages = obj->size >> PAGE_SHIFT;
200                 struct page **pages = _get_pages(vkms_obj);
201
202                 if (IS_ERR(pages)) {
203                         ret = PTR_ERR(pages);
204                         goto out;
205                 }
206
207                 vkms_obj->vaddr = vmap(pages, n_pages, VM_MAP, PAGE_KERNEL);
208                 if (!vkms_obj->vaddr)
209                         goto err_vmap;
210         }
211
212         vkms_obj->vmap_count++;
213         goto out;
214
215 err_vmap:
216         ret = -ENOMEM;
217         drm_gem_put_pages(obj, vkms_obj->pages, false, true);
218         vkms_obj->pages = NULL;
219 out:
220         mutex_unlock(&vkms_obj->pages_lock);
221         return ret;
222 }
223
224 struct drm_gem_object *
225 vkms_prime_import_sg_table(struct drm_device *dev,
226                            struct dma_buf_attachment *attach,
227                            struct sg_table *sg)
228 {
229         struct vkms_gem_object *obj;
230         int npages;
231
232         obj = __vkms_gem_create(dev, attach->dmabuf->size);
233         if (IS_ERR(obj))
234                 return ERR_CAST(obj);
235
236         npages = PAGE_ALIGN(attach->dmabuf->size) / PAGE_SIZE;
237         DRM_DEBUG_PRIME("Importing %d pages\n", npages);
238
239         obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
240         if (!obj->pages) {
241                 vkms_gem_free_object(&obj->gem);
242                 return ERR_PTR(-ENOMEM);
243         }
244
245         drm_prime_sg_to_page_addr_arrays(sg, obj->pages, NULL, npages);
246         return &obj->gem;
247 }