Merge tag 'for-5.11-rc3-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave...
[linux-2.6-microblaze.git] / drivers / gpu / drm / drm_vm.c
1 /*
2  * \file drm_vm.c
3  * Memory mapping for DRM
4  *
5  * \author Rickard E. (Rik) Faith <faith@valinux.com>
6  * \author Gareth Hughes <gareth@valinux.com>
7  */
8
9 /*
10  * Created: Mon Jan  4 08:58:31 1999 by faith@valinux.com
11  *
12  * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
13  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
14  * All Rights Reserved.
15  *
16  * Permission is hereby granted, free of charge, to any person obtaining a
17  * copy of this software and associated documentation files (the "Software"),
18  * to deal in the Software without restriction, including without limitation
19  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20  * and/or sell copies of the Software, and to permit persons to whom the
21  * Software is furnished to do so, subject to the following conditions:
22  *
23  * The above copyright notice and this permission notice (including the next
24  * paragraph) shall be included in all copies or substantial portions of the
25  * Software.
26  *
27  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
30  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33  * OTHER DEALINGS IN THE SOFTWARE.
34  */
35
36 #include <linux/export.h>
37 #include <linux/pci.h>
38 #include <linux/seq_file.h>
39 #include <linux/vmalloc.h>
40 #include <linux/pgtable.h>
41
42 #if defined(__ia64__)
43 #include <linux/efi.h>
44 #include <linux/slab.h>
45 #endif
46 #include <linux/mem_encrypt.h>
47
48
49 #include <drm/drm_agpsupport.h>
50 #include <drm/drm_device.h>
51 #include <drm/drm_drv.h>
52 #include <drm/drm_file.h>
53 #include <drm/drm_framebuffer.h>
54 #include <drm/drm_print.h>
55
56 #include "drm_internal.h"
57 #include "drm_legacy.h"
58
59 struct drm_vma_entry {
60         struct list_head head;
61         struct vm_area_struct *vma;
62         pid_t pid;
63 };
64
65 static void drm_vm_open(struct vm_area_struct *vma);
66 static void drm_vm_close(struct vm_area_struct *vma);
67
68 static pgprot_t drm_io_prot(struct drm_local_map *map,
69                             struct vm_area_struct *vma)
70 {
71         pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
72
73 #if defined(__i386__) || defined(__x86_64__) || defined(__powerpc__) || \
74     defined(__mips__)
75         if (map->type == _DRM_REGISTERS && !(map->flags & _DRM_WRITE_COMBINING))
76                 tmp = pgprot_noncached(tmp);
77         else
78                 tmp = pgprot_writecombine(tmp);
79 #elif defined(__ia64__)
80         if (efi_range_is_wc(vma->vm_start, vma->vm_end -
81                                     vma->vm_start))
82                 tmp = pgprot_writecombine(tmp);
83         else
84                 tmp = pgprot_noncached(tmp);
85 #elif defined(__sparc__) || defined(__arm__)
86         tmp = pgprot_noncached(tmp);
87 #endif
88         return tmp;
89 }
90
91 static pgprot_t drm_dma_prot(uint32_t map_type, struct vm_area_struct *vma)
92 {
93         pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
94
95 #if defined(__powerpc__) && defined(CONFIG_NOT_COHERENT_CACHE)
96         tmp = pgprot_noncached_wc(tmp);
97 #endif
98         return tmp;
99 }
100
101 /*
102  * \c fault method for AGP virtual memory.
103  *
104  * \param vma virtual memory area.
105  * \param address access address.
106  * \return pointer to the page structure.
107  *
108  * Find the right map and if it's AGP memory find the real physical page to
109  * map, get the page, increment the use count and return it.
110  */
111 #if IS_ENABLED(CONFIG_AGP)
112 static vm_fault_t drm_vm_fault(struct vm_fault *vmf)
113 {
114         struct vm_area_struct *vma = vmf->vma;
115         struct drm_file *priv = vma->vm_file->private_data;
116         struct drm_device *dev = priv->minor->dev;
117         struct drm_local_map *map = NULL;
118         struct drm_map_list *r_list;
119         struct drm_hash_item *hash;
120
121         /*
122          * Find the right map
123          */
124         if (!dev->agp)
125                 goto vm_fault_error;
126
127         if (!dev->agp || !dev->agp->cant_use_aperture)
128                 goto vm_fault_error;
129
130         if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash))
131                 goto vm_fault_error;
132
133         r_list = drm_hash_entry(hash, struct drm_map_list, hash);
134         map = r_list->map;
135
136         if (map && map->type == _DRM_AGP) {
137                 /*
138                  * Using vm_pgoff as a selector forces us to use this unusual
139                  * addressing scheme.
140                  */
141                 resource_size_t offset = vmf->address - vma->vm_start;
142                 resource_size_t baddr = map->offset + offset;
143                 struct drm_agp_mem *agpmem;
144                 struct page *page;
145
146 #ifdef __alpha__
147                 /*
148                  * Adjust to a bus-relative address
149                  */
150                 baddr -= dev->hose->mem_space->start;
151 #endif
152
153                 /*
154                  * It's AGP memory - find the real physical page to map
155                  */
156                 list_for_each_entry(agpmem, &dev->agp->memory, head) {
157                         if (agpmem->bound <= baddr &&
158                             agpmem->bound + agpmem->pages * PAGE_SIZE > baddr)
159                                 break;
160                 }
161
162                 if (&agpmem->head == &dev->agp->memory)
163                         goto vm_fault_error;
164
165                 /*
166                  * Get the page, inc the use count, and return it
167                  */
168                 offset = (baddr - agpmem->bound) >> PAGE_SHIFT;
169                 page = agpmem->memory->pages[offset];
170                 get_page(page);
171                 vmf->page = page;
172
173                 DRM_DEBUG
174                     ("baddr = 0x%llx page = 0x%p, offset = 0x%llx, count=%d\n",
175                      (unsigned long long)baddr,
176                      agpmem->memory->pages[offset],
177                      (unsigned long long)offset,
178                      page_count(page));
179                 return 0;
180         }
181 vm_fault_error:
182         return VM_FAULT_SIGBUS; /* Disallow mremap */
183 }
184 #else
185 static vm_fault_t drm_vm_fault(struct vm_fault *vmf)
186 {
187         return VM_FAULT_SIGBUS;
188 }
189 #endif
190
191 /*
192  * \c nopage method for shared virtual memory.
193  *
194  * \param vma virtual memory area.
195  * \param address access address.
196  * \return pointer to the page structure.
197  *
198  * Get the mapping, find the real physical page to map, get the page, and
199  * return it.
200  */
201 static vm_fault_t drm_vm_shm_fault(struct vm_fault *vmf)
202 {
203         struct vm_area_struct *vma = vmf->vma;
204         struct drm_local_map *map = vma->vm_private_data;
205         unsigned long offset;
206         unsigned long i;
207         struct page *page;
208
209         if (!map)
210                 return VM_FAULT_SIGBUS; /* Nothing allocated */
211
212         offset = vmf->address - vma->vm_start;
213         i = (unsigned long)map->handle + offset;
214         page = vmalloc_to_page((void *)i);
215         if (!page)
216                 return VM_FAULT_SIGBUS;
217         get_page(page);
218         vmf->page = page;
219
220         DRM_DEBUG("shm_fault 0x%lx\n", offset);
221         return 0;
222 }
223
224 /*
225  * \c close method for shared virtual memory.
226  *
227  * \param vma virtual memory area.
228  *
229  * Deletes map information if we are the last
230  * person to close a mapping and it's not in the global maplist.
231  */
232 static void drm_vm_shm_close(struct vm_area_struct *vma)
233 {
234         struct drm_file *priv = vma->vm_file->private_data;
235         struct drm_device *dev = priv->minor->dev;
236         struct drm_vma_entry *pt, *temp;
237         struct drm_local_map *map;
238         struct drm_map_list *r_list;
239         int found_maps = 0;
240
241         DRM_DEBUG("0x%08lx,0x%08lx\n",
242                   vma->vm_start, vma->vm_end - vma->vm_start);
243
244         map = vma->vm_private_data;
245
246         mutex_lock(&dev->struct_mutex);
247         list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
248                 if (pt->vma->vm_private_data == map)
249                         found_maps++;
250                 if (pt->vma == vma) {
251                         list_del(&pt->head);
252                         kfree(pt);
253                 }
254         }
255
256         /* We were the only map that was found */
257         if (found_maps == 1 && map->flags & _DRM_REMOVABLE) {
258                 /* Check to see if we are in the maplist, if we are not, then
259                  * we delete this mappings information.
260                  */
261                 found_maps = 0;
262                 list_for_each_entry(r_list, &dev->maplist, head) {
263                         if (r_list->map == map)
264                                 found_maps++;
265                 }
266
267                 if (!found_maps) {
268                         switch (map->type) {
269                         case _DRM_REGISTERS:
270                         case _DRM_FRAME_BUFFER:
271                                 arch_phys_wc_del(map->mtrr);
272                                 iounmap(map->handle);
273                                 break;
274                         case _DRM_SHM:
275                                 vfree(map->handle);
276                                 break;
277                         case _DRM_AGP:
278                         case _DRM_SCATTER_GATHER:
279                                 break;
280                         case _DRM_CONSISTENT:
281                                 dma_free_coherent(&dev->pdev->dev,
282                                                   map->size,
283                                                   map->handle,
284                                                   map->offset);
285                                 break;
286                         }
287                         kfree(map);
288                 }
289         }
290         mutex_unlock(&dev->struct_mutex);
291 }
292
293 /*
294  * \c fault method for DMA virtual memory.
295  *
296  * \param address access address.
297  * \return pointer to the page structure.
298  *
299  * Determine the page number from the page offset and get it from drm_device_dma::pagelist.
300  */
301 static vm_fault_t drm_vm_dma_fault(struct vm_fault *vmf)
302 {
303         struct vm_area_struct *vma = vmf->vma;
304         struct drm_file *priv = vma->vm_file->private_data;
305         struct drm_device *dev = priv->minor->dev;
306         struct drm_device_dma *dma = dev->dma;
307         unsigned long offset;
308         unsigned long page_nr;
309         struct page *page;
310
311         if (!dma)
312                 return VM_FAULT_SIGBUS; /* Error */
313         if (!dma->pagelist)
314                 return VM_FAULT_SIGBUS; /* Nothing allocated */
315
316         offset = vmf->address - vma->vm_start;
317                                         /* vm_[pg]off[set] should be 0 */
318         page_nr = offset >> PAGE_SHIFT; /* page_nr could just be vmf->pgoff */
319         page = virt_to_page((void *)dma->pagelist[page_nr]);
320
321         get_page(page);
322         vmf->page = page;
323
324         DRM_DEBUG("dma_fault 0x%lx (page %lu)\n", offset, page_nr);
325         return 0;
326 }
327
328 /*
329  * \c fault method for scatter-gather virtual memory.
330  *
331  * \param address access address.
332  * \return pointer to the page structure.
333  *
334  * Determine the map offset from the page offset and get it from drm_sg_mem::pagelist.
335  */
336 static vm_fault_t drm_vm_sg_fault(struct vm_fault *vmf)
337 {
338         struct vm_area_struct *vma = vmf->vma;
339         struct drm_local_map *map = vma->vm_private_data;
340         struct drm_file *priv = vma->vm_file->private_data;
341         struct drm_device *dev = priv->minor->dev;
342         struct drm_sg_mem *entry = dev->sg;
343         unsigned long offset;
344         unsigned long map_offset;
345         unsigned long page_offset;
346         struct page *page;
347
348         if (!entry)
349                 return VM_FAULT_SIGBUS; /* Error */
350         if (!entry->pagelist)
351                 return VM_FAULT_SIGBUS; /* Nothing allocated */
352
353         offset = vmf->address - vma->vm_start;
354         map_offset = map->offset - (unsigned long)dev->sg->virtual;
355         page_offset = (offset >> PAGE_SHIFT) + (map_offset >> PAGE_SHIFT);
356         page = entry->pagelist[page_offset];
357         get_page(page);
358         vmf->page = page;
359
360         return 0;
361 }
362
363 /** AGP virtual memory operations */
364 static const struct vm_operations_struct drm_vm_ops = {
365         .fault = drm_vm_fault,
366         .open = drm_vm_open,
367         .close = drm_vm_close,
368 };
369
370 /** Shared virtual memory operations */
371 static const struct vm_operations_struct drm_vm_shm_ops = {
372         .fault = drm_vm_shm_fault,
373         .open = drm_vm_open,
374         .close = drm_vm_shm_close,
375 };
376
377 /** DMA virtual memory operations */
378 static const struct vm_operations_struct drm_vm_dma_ops = {
379         .fault = drm_vm_dma_fault,
380         .open = drm_vm_open,
381         .close = drm_vm_close,
382 };
383
384 /** Scatter-gather virtual memory operations */
385 static const struct vm_operations_struct drm_vm_sg_ops = {
386         .fault = drm_vm_sg_fault,
387         .open = drm_vm_open,
388         .close = drm_vm_close,
389 };
390
391 static void drm_vm_open_locked(struct drm_device *dev,
392                                struct vm_area_struct *vma)
393 {
394         struct drm_vma_entry *vma_entry;
395
396         DRM_DEBUG("0x%08lx,0x%08lx\n",
397                   vma->vm_start, vma->vm_end - vma->vm_start);
398
399         vma_entry = kmalloc(sizeof(*vma_entry), GFP_KERNEL);
400         if (vma_entry) {
401                 vma_entry->vma = vma;
402                 vma_entry->pid = current->pid;
403                 list_add(&vma_entry->head, &dev->vmalist);
404         }
405 }
406
407 static void drm_vm_open(struct vm_area_struct *vma)
408 {
409         struct drm_file *priv = vma->vm_file->private_data;
410         struct drm_device *dev = priv->minor->dev;
411
412         mutex_lock(&dev->struct_mutex);
413         drm_vm_open_locked(dev, vma);
414         mutex_unlock(&dev->struct_mutex);
415 }
416
417 static void drm_vm_close_locked(struct drm_device *dev,
418                                 struct vm_area_struct *vma)
419 {
420         struct drm_vma_entry *pt, *temp;
421
422         DRM_DEBUG("0x%08lx,0x%08lx\n",
423                   vma->vm_start, vma->vm_end - vma->vm_start);
424
425         list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
426                 if (pt->vma == vma) {
427                         list_del(&pt->head);
428                         kfree(pt);
429                         break;
430                 }
431         }
432 }
433
434 /*
435  * \c close method for all virtual memory types.
436  *
437  * \param vma virtual memory area.
438  *
439  * Search the \p vma private data entry in drm_device::vmalist, unlink it, and
440  * free it.
441  */
442 static void drm_vm_close(struct vm_area_struct *vma)
443 {
444         struct drm_file *priv = vma->vm_file->private_data;
445         struct drm_device *dev = priv->minor->dev;
446
447         mutex_lock(&dev->struct_mutex);
448         drm_vm_close_locked(dev, vma);
449         mutex_unlock(&dev->struct_mutex);
450 }
451
452 /*
453  * mmap DMA memory.
454  *
455  * \param file_priv DRM file private.
456  * \param vma virtual memory area.
457  * \return zero on success or a negative number on failure.
458  *
459  * Sets the virtual memory area operations structure to vm_dma_ops, the file
460  * pointer, and calls vm_open().
461  */
462 static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
463 {
464         struct drm_file *priv = filp->private_data;
465         struct drm_device *dev;
466         struct drm_device_dma *dma;
467         unsigned long length = vma->vm_end - vma->vm_start;
468
469         dev = priv->minor->dev;
470         dma = dev->dma;
471         DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
472                   vma->vm_start, vma->vm_end, vma->vm_pgoff);
473
474         /* Length must match exact page count */
475         if (!dma || (length >> PAGE_SHIFT) != dma->page_count) {
476                 return -EINVAL;
477         }
478
479         if (!capable(CAP_SYS_ADMIN) &&
480             (dma->flags & _DRM_DMA_USE_PCI_RO)) {
481                 vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
482 #if defined(__i386__) || defined(__x86_64__)
483                 pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
484 #else
485                 /* Ye gads this is ugly.  With more thought
486                    we could move this up higher and use
487                    `protection_map' instead.  */
488                 vma->vm_page_prot =
489                     __pgprot(pte_val
490                              (pte_wrprotect
491                               (__pte(pgprot_val(vma->vm_page_prot)))));
492 #endif
493         }
494
495         vma->vm_ops = &drm_vm_dma_ops;
496
497         vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
498
499         drm_vm_open_locked(dev, vma);
500         return 0;
501 }
502
503 static resource_size_t drm_core_get_reg_ofs(struct drm_device *dev)
504 {
505 #ifdef __alpha__
506         return dev->hose->dense_mem_base;
507 #else
508         return 0;
509 #endif
510 }
511
512 /*
513  * mmap DMA memory.
514  *
515  * \param file_priv DRM file private.
516  * \param vma virtual memory area.
517  * \return zero on success or a negative number on failure.
518  *
519  * If the virtual memory area has no offset associated with it then it's a DMA
520  * area, so calls mmap_dma(). Otherwise searches the map in drm_device::maplist,
521  * checks that the restricted flag is not set, sets the virtual memory operations
522  * according to the mapping type and remaps the pages. Finally sets the file
523  * pointer and calls vm_open().
524  */
525 static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
526 {
527         struct drm_file *priv = filp->private_data;
528         struct drm_device *dev = priv->minor->dev;
529         struct drm_local_map *map = NULL;
530         resource_size_t offset = 0;
531         struct drm_hash_item *hash;
532
533         DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
534                   vma->vm_start, vma->vm_end, vma->vm_pgoff);
535
536         if (!priv->authenticated)
537                 return -EACCES;
538
539         /* We check for "dma". On Apple's UniNorth, it's valid to have
540          * the AGP mapped at physical address 0
541          * --BenH.
542          */
543         if (!vma->vm_pgoff
544 #if IS_ENABLED(CONFIG_AGP)
545             && (!dev->agp
546                 || dev->agp->agp_info.device->vendor != PCI_VENDOR_ID_APPLE)
547 #endif
548             )
549                 return drm_mmap_dma(filp, vma);
550
551         if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash)) {
552                 DRM_ERROR("Could not find map\n");
553                 return -EINVAL;
554         }
555
556         map = drm_hash_entry(hash, struct drm_map_list, hash)->map;
557         if (!map || ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN)))
558                 return -EPERM;
559
560         /* Check for valid size. */
561         if (map->size < vma->vm_end - vma->vm_start)
562                 return -EINVAL;
563
564         if (!capable(CAP_SYS_ADMIN) && (map->flags & _DRM_READ_ONLY)) {
565                 vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
566 #if defined(__i386__) || defined(__x86_64__)
567                 pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
568 #else
569                 /* Ye gads this is ugly.  With more thought
570                    we could move this up higher and use
571                    `protection_map' instead.  */
572                 vma->vm_page_prot =
573                     __pgprot(pte_val
574                              (pte_wrprotect
575                               (__pte(pgprot_val(vma->vm_page_prot)))));
576 #endif
577         }
578
579         switch (map->type) {
580 #if !defined(__arm__)
581         case _DRM_AGP:
582                 if (dev->agp && dev->agp->cant_use_aperture) {
583                         /*
584                          * On some platforms we can't talk to bus dma address from the CPU, so for
585                          * memory of type DRM_AGP, we'll deal with sorting out the real physical
586                          * pages and mappings in fault()
587                          */
588 #if defined(__powerpc__)
589                         vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
590 #endif
591                         vma->vm_ops = &drm_vm_ops;
592                         break;
593                 }
594                 fallthrough;    /* to _DRM_FRAME_BUFFER... */
595 #endif
596         case _DRM_FRAME_BUFFER:
597         case _DRM_REGISTERS:
598                 offset = drm_core_get_reg_ofs(dev);
599                 vma->vm_page_prot = drm_io_prot(map, vma);
600                 if (io_remap_pfn_range(vma, vma->vm_start,
601                                        (map->offset + offset) >> PAGE_SHIFT,
602                                        vma->vm_end - vma->vm_start,
603                                        vma->vm_page_prot))
604                         return -EAGAIN;
605                 DRM_DEBUG("   Type = %d; start = 0x%lx, end = 0x%lx,"
606                           " offset = 0x%llx\n",
607                           map->type,
608                           vma->vm_start, vma->vm_end, (unsigned long long)(map->offset + offset));
609
610                 vma->vm_ops = &drm_vm_ops;
611                 break;
612         case _DRM_CONSISTENT:
613                 /* Consistent memory is really like shared memory. But
614                  * it's allocated in a different way, so avoid fault */
615                 if (remap_pfn_range(vma, vma->vm_start,
616                     page_to_pfn(virt_to_page(map->handle)),
617                     vma->vm_end - vma->vm_start, vma->vm_page_prot))
618                         return -EAGAIN;
619                 vma->vm_page_prot = drm_dma_prot(map->type, vma);
620                 fallthrough;    /* to _DRM_SHM */
621         case _DRM_SHM:
622                 vma->vm_ops = &drm_vm_shm_ops;
623                 vma->vm_private_data = (void *)map;
624                 break;
625         case _DRM_SCATTER_GATHER:
626                 vma->vm_ops = &drm_vm_sg_ops;
627                 vma->vm_private_data = (void *)map;
628                 vma->vm_page_prot = drm_dma_prot(map->type, vma);
629                 break;
630         default:
631                 return -EINVAL; /* This should never happen. */
632         }
633         vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
634
635         drm_vm_open_locked(dev, vma);
636         return 0;
637 }
638
639 int drm_legacy_mmap(struct file *filp, struct vm_area_struct *vma)
640 {
641         struct drm_file *priv = filp->private_data;
642         struct drm_device *dev = priv->minor->dev;
643         int ret;
644
645         if (drm_dev_is_unplugged(dev))
646                 return -ENODEV;
647
648         mutex_lock(&dev->struct_mutex);
649         ret = drm_mmap_locked(filp, vma);
650         mutex_unlock(&dev->struct_mutex);
651
652         return ret;
653 }
654 EXPORT_SYMBOL(drm_legacy_mmap);
655
656 #if IS_ENABLED(CONFIG_DRM_LEGACY)
657 void drm_legacy_vma_flush(struct drm_device *dev)
658 {
659         struct drm_vma_entry *vma, *vma_temp;
660
661         /* Clear vma list (only needed for legacy drivers) */
662         list_for_each_entry_safe(vma, vma_temp, &dev->vmalist, head) {
663                 list_del(&vma->head);
664                 kfree(vma);
665         }
666 }
667 #endif