2 * Legacy: Generic DRM Buffer Management
4 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
5 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
8 * Author: Rickard E. (Rik) Faith <faith@valinux.com>
9 * Author: Gareth Hughes <gareth@valinux.com>
11 * Permission is hereby granted, free of charge, to any person obtaining a
12 * copy of this software and associated documentation files (the "Software"),
13 * to deal in the Software without restriction, including without limitation
14 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
15 * and/or sell copies of the Software, and to permit persons to whom the
16 * Software is furnished to do so, subject to the following conditions:
18 * The above copyright notice and this permission notice (including the next
19 * paragraph) shall be included in all copies or substantial portions of the
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
25 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
26 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
27 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
28 * OTHER DEALINGS IN THE SOFTWARE.
31 #include <linux/export.h>
32 #include <linux/log2.h>
34 #include <linux/mman.h>
35 #include <linux/nospec.h>
36 #include <linux/pci.h>
37 #include <linux/slab.h>
38 #include <linux/uaccess.h>
39 #include <linux/vmalloc.h>
41 #include <asm/shmparam.h>
43 #include <drm/drm_agpsupport.h>
44 #include <drm/drm_device.h>
45 #include <drm/drm_drv.h>
46 #include <drm/drm_file.h>
47 #include <drm/drm_print.h>
49 #include "drm_legacy.h"
52 static struct drm_map_list *drm_find_matching_map(struct drm_device *dev,
53 struct drm_local_map *map)
55 struct drm_map_list *entry;
57 list_for_each_entry(entry, &dev->maplist, head) {
59 * Because the kernel-userspace ABI is fixed at a 32-bit offset
60 * while PCI resources may live above that, we only compare the
61 * lower 32 bits of the map offset for maps of type
62 * _DRM_FRAMEBUFFER or _DRM_REGISTERS.
63 * It is assumed that if a driver have more than one resource
64 * of each type, the lower 32 bits are different.
67 map->type != entry->map->type ||
68 entry->master != dev->master)
72 if (map->flags != _DRM_CONTAINS_LOCK)
76 case _DRM_FRAME_BUFFER:
77 if ((entry->map->offset & 0xffffffff) ==
78 (map->offset & 0xffffffff))
81 default: /* Make gcc happy */
84 if (entry->map->offset == map->offset)
91 static int drm_map_handle(struct drm_device *dev, struct drm_hash_item *hash,
92 unsigned long user_token, int hashed_handle, int shm)
94 int use_hashed_handle, shift;
97 #if (BITS_PER_LONG == 64)
98 use_hashed_handle = ((user_token & 0xFFFFFFFF00000000UL) || hashed_handle);
99 #elif (BITS_PER_LONG == 32)
100 use_hashed_handle = hashed_handle;
102 #error Unsupported long size. Neither 64 nor 32 bits.
105 if (!use_hashed_handle) {
108 hash->key = user_token >> PAGE_SHIFT;
109 ret = drm_ht_insert_item(&dev->map_hash, hash);
115 add = DRM_MAP_HASH_OFFSET >> PAGE_SHIFT;
116 if (shm && (SHMLBA > PAGE_SIZE)) {
117 int bits = ilog2(SHMLBA >> PAGE_SHIFT) + 1;
119 /* For shared memory, we have to preserve the SHMLBA
120 * bits of the eventual vma->vm_pgoff value during
121 * mmap(). Otherwise we run into cache aliasing problems
122 * on some platforms. On these platforms, the pgoff of
123 * a mmap() request is used to pick a suitable virtual
124 * address for the mmap() region such that it will not
125 * cause cache aliasing problems.
127 * Therefore, make sure the SHMLBA relevant bits of the
128 * hash value we use are equal to those in the original
129 * kernel virtual address.
132 add |= ((user_token >> PAGE_SHIFT) & ((1UL << bits) - 1UL));
135 return drm_ht_just_insert_please(&dev->map_hash, hash,
136 user_token, 32 - PAGE_SHIFT - 3,
141 * Core function to create a range of memory available for mapping by a
144 * Adjusts the memory offset to its absolute value according to the mapping
145 * type. Adds the map to the map list drm_device::maplist. Adds MTRR's where
146 * applicable and if supported by the kernel.
148 static int drm_addmap_core(struct drm_device *dev, resource_size_t offset,
149 unsigned int size, enum drm_map_type type,
150 enum drm_map_flags flags,
151 struct drm_map_list **maplist)
153 struct drm_local_map *map;
154 struct drm_map_list *list;
155 unsigned long user_token;
158 map = kmalloc(sizeof(*map), GFP_KERNEL);
162 map->offset = offset;
167 /* Only allow shared memory to be removable since we only keep enough
168 * book keeping information about shared memory to allow for removal
169 * when processes fork.
171 if ((map->flags & _DRM_REMOVABLE) && map->type != _DRM_SHM) {
175 DRM_DEBUG("offset = 0x%08llx, size = 0x%08lx, type = %d\n",
176 (unsigned long long)map->offset, map->size, map->type);
178 /* page-align _DRM_SHM maps. They are allocated here so there is no security
179 * hole created by that and it works around various broken drivers that use
180 * a non-aligned quantity to map the SAREA. --BenH
182 if (map->type == _DRM_SHM)
183 map->size = PAGE_ALIGN(map->size);
185 if ((map->offset & (~(resource_size_t)PAGE_MASK)) || (map->size & (~PAGE_MASK))) {
194 case _DRM_FRAME_BUFFER:
195 #if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__) && !defined(__powerpc64__) && !defined(__x86_64__) && !defined(__arm__)
196 if (map->offset + (map->size-1) < map->offset ||
197 map->offset < virt_to_phys(high_memory)) {
202 /* Some drivers preinitialize some maps, without the X Server
203 * needing to be aware of it. Therefore, we just return success
204 * when the server tries to create a duplicate map.
206 list = drm_find_matching_map(dev, map);
208 if (list->map->size != map->size) {
209 DRM_DEBUG("Matching maps of type %d with "
210 "mismatched sizes, (%ld vs %ld)\n",
211 map->type, map->size,
213 list->map->size = map->size;
221 if (map->type == _DRM_FRAME_BUFFER ||
222 (map->flags & _DRM_WRITE_COMBINING)) {
224 arch_phys_wc_add(map->offset, map->size);
226 if (map->type == _DRM_REGISTERS) {
227 if (map->flags & _DRM_WRITE_COMBINING)
228 map->handle = ioremap_wc(map->offset,
231 map->handle = ioremap(map->offset, map->size);
240 list = drm_find_matching_map(dev, map);
242 if (list->map->size != map->size) {
243 DRM_DEBUG("Matching maps of type %d with "
244 "mismatched sizes, (%ld vs %ld)\n",
245 map->type, map->size, list->map->size);
246 list->map->size = map->size;
253 map->handle = vmalloc_user(map->size);
254 DRM_DEBUG("%lu %d %p\n",
255 map->size, order_base_2(map->size), map->handle);
260 map->offset = (unsigned long)map->handle;
261 if (map->flags & _DRM_CONTAINS_LOCK) {
262 /* Prevent a 2nd X Server from creating a 2nd lock */
263 if (dev->master->lock.hw_lock != NULL) {
268 dev->sigdata.lock = dev->master->lock.hw_lock = map->handle; /* Pointer to lock */
272 struct drm_agp_mem *entry;
280 map->offset += dev->hose->mem_space->start;
282 /* In some cases (i810 driver), user space may have already
283 * added the AGP base itself, because dev->agp->base previously
284 * only got set during AGP enable. So, only add the base
285 * address if the map's offset isn't already within the
288 if (map->offset < dev->agp->base ||
289 map->offset > dev->agp->base +
290 dev->agp->agp_info.aper_size * 1024 * 1024 - 1) {
291 map->offset += dev->agp->base;
293 map->mtrr = dev->agp->agp_mtrr; /* for getmap */
295 /* This assumes the DRM is in total control of AGP space.
296 * It's not always the case as AGP can be in the control
297 * of user space (i.e. i810 driver). So this loop will get
298 * skipped and we double check that dev->agp->memory is
299 * actually set as well as being invalid before EPERM'ing
301 list_for_each_entry(entry, &dev->agp->memory, head) {
302 if ((map->offset >= entry->bound) &&
303 (map->offset + map->size <= entry->bound + entry->pages * PAGE_SIZE)) {
308 if (!list_empty(&dev->agp->memory) && !valid) {
312 DRM_DEBUG("AGP offset = 0x%08llx, size = 0x%08lx\n",
313 (unsigned long long)map->offset, map->size);
317 case _DRM_SCATTER_GATHER:
322 map->offset += (unsigned long)dev->sg->virtual;
324 case _DRM_CONSISTENT:
325 /* dma_addr_t is 64bit on i386 with CONFIG_HIGHMEM64G,
326 * As we're limiting the address to 2^32-1 (or less),
327 * casting it down to 32 bits is no problem, but we
328 * need to point to a 64bit variable first.
330 map->handle = dma_alloc_coherent(dev->dev,
344 list = kzalloc(sizeof(*list), GFP_KERNEL);
346 if (map->type == _DRM_REGISTERS)
347 iounmap(map->handle);
353 mutex_lock(&dev->struct_mutex);
354 list_add(&list->head, &dev->maplist);
356 /* Assign a 32-bit handle */
357 /* We do it here so that dev->struct_mutex protects the increment */
358 user_token = (map->type == _DRM_SHM) ? (unsigned long)map->handle :
360 ret = drm_map_handle(dev, &list->hash, user_token, 0,
361 (map->type == _DRM_SHM));
363 if (map->type == _DRM_REGISTERS)
364 iounmap(map->handle);
367 mutex_unlock(&dev->struct_mutex);
371 list->user_token = list->hash.key << PAGE_SHIFT;
372 mutex_unlock(&dev->struct_mutex);
374 if (!(map->flags & _DRM_DRIVER))
375 list->master = dev->master;
380 int drm_legacy_addmap(struct drm_device *dev, resource_size_t offset,
381 unsigned int size, enum drm_map_type type,
382 enum drm_map_flags flags, struct drm_local_map **map_ptr)
384 struct drm_map_list *list;
387 rc = drm_addmap_core(dev, offset, size, type, flags, &list);
389 *map_ptr = list->map;
392 EXPORT_SYMBOL(drm_legacy_addmap);
394 struct drm_local_map *drm_legacy_findmap(struct drm_device *dev,
397 struct drm_map_list *_entry;
399 list_for_each_entry(_entry, &dev->maplist, head)
400 if (_entry->user_token == token)
404 EXPORT_SYMBOL(drm_legacy_findmap);
407 * Ioctl to specify a range of memory that is available for mapping by a
410 * \param inode device inode.
411 * \param file_priv DRM file private.
412 * \param cmd command.
413 * \param arg pointer to a drm_map structure.
414 * \return zero on success or a negative value on error.
417 int drm_legacy_addmap_ioctl(struct drm_device *dev, void *data,
418 struct drm_file *file_priv)
420 struct drm_map *map = data;
421 struct drm_map_list *maplist;
424 if (!(capable(CAP_SYS_ADMIN) || map->type == _DRM_AGP || map->type == _DRM_SHM))
427 if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
428 !drm_core_check_feature(dev, DRIVER_LEGACY))
431 err = drm_addmap_core(dev, map->offset, map->size, map->type,
432 map->flags, &maplist);
437 /* avoid a warning on 64-bit, this casting isn't very nice, but the API is set so too late */
438 map->handle = (void *)(unsigned long)maplist->user_token;
441 * It appears that there are no users of this value whatsoever --
442 * drmAddMap just discards it. Let's not encourage its use.
443 * (Keeping drm_addmap_core's returned mtrr value would be wrong --
444 * it's not a real mtrr index anymore.)
452 * Get a mapping information.
454 * \param inode device inode.
455 * \param file_priv DRM file private.
456 * \param cmd command.
457 * \param arg user argument, pointing to a drm_map structure.
459 * \return zero on success or a negative number on failure.
461 * Searches for the mapping with the specified offset and copies its information
464 int drm_legacy_getmap_ioctl(struct drm_device *dev, void *data,
465 struct drm_file *file_priv)
467 struct drm_map *map = data;
468 struct drm_map_list *r_list = NULL;
469 struct list_head *list;
473 if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
474 !drm_core_check_feature(dev, DRIVER_LEGACY))
482 mutex_lock(&dev->struct_mutex);
483 list_for_each(list, &dev->maplist) {
485 r_list = list_entry(list, struct drm_map_list, head);
490 if (!r_list || !r_list->map) {
491 mutex_unlock(&dev->struct_mutex);
495 map->offset = r_list->map->offset;
496 map->size = r_list->map->size;
497 map->type = r_list->map->type;
498 map->flags = r_list->map->flags;
499 map->handle = (void *)(unsigned long) r_list->user_token;
500 map->mtrr = arch_phys_wc_index(r_list->map->mtrr);
502 mutex_unlock(&dev->struct_mutex);
508 * Remove a map private from list and deallocate resources if the mapping
511 * Searches the map on drm_device::maplist, removes it from the list, see if
512 * it's being used, and free any associated resource (such as MTRR's) if it's not
515 * \sa drm_legacy_addmap
517 int drm_legacy_rmmap_locked(struct drm_device *dev, struct drm_local_map *map)
519 struct drm_map_list *r_list = NULL, *list_t;
521 struct drm_master *master;
523 /* Find the list entry for the map and remove it */
524 list_for_each_entry_safe(r_list, list_t, &dev->maplist, head) {
525 if (r_list->map == map) {
526 master = r_list->master;
527 list_del(&r_list->head);
528 drm_ht_remove_key(&dev->map_hash,
529 r_list->user_token >> PAGE_SHIFT);
541 iounmap(map->handle);
543 case _DRM_FRAME_BUFFER:
544 arch_phys_wc_del(map->mtrr);
549 if (dev->sigdata.lock == master->lock.hw_lock)
550 dev->sigdata.lock = NULL;
551 master->lock.hw_lock = NULL; /* SHM removed */
552 master->lock.file_priv = NULL;
553 wake_up_interruptible_all(&master->lock.lock_queue);
557 case _DRM_SCATTER_GATHER:
559 case _DRM_CONSISTENT:
560 dma_free_coherent(dev->dev,
570 EXPORT_SYMBOL(drm_legacy_rmmap_locked);
572 void drm_legacy_rmmap(struct drm_device *dev, struct drm_local_map *map)
574 if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
575 !drm_core_check_feature(dev, DRIVER_LEGACY))
578 mutex_lock(&dev->struct_mutex);
579 drm_legacy_rmmap_locked(dev, map);
580 mutex_unlock(&dev->struct_mutex);
582 EXPORT_SYMBOL(drm_legacy_rmmap);
584 void drm_legacy_master_rmmaps(struct drm_device *dev, struct drm_master *master)
586 struct drm_map_list *r_list, *list_temp;
588 if (!drm_core_check_feature(dev, DRIVER_LEGACY))
591 mutex_lock(&dev->struct_mutex);
592 list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head) {
593 if (r_list->master == master) {
594 drm_legacy_rmmap_locked(dev, r_list->map);
598 mutex_unlock(&dev->struct_mutex);
601 void drm_legacy_rmmaps(struct drm_device *dev)
603 struct drm_map_list *r_list, *list_temp;
605 list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head)
606 drm_legacy_rmmap(dev, r_list->map);
609 /* The rmmap ioctl appears to be unnecessary. All mappings are torn down on
610 * the last close of the device, and this is necessary for cleanup when things
611 * exit uncleanly. Therefore, having userland manually remove mappings seems
612 * like a pointless exercise since they're going away anyway.
614 * One use case might be after addmap is allowed for normal users for SHM and
615 * gets used by drivers that the server doesn't need to care about. This seems
618 * \param inode device inode.
619 * \param file_priv DRM file private.
620 * \param cmd command.
621 * \param arg pointer to a struct drm_map structure.
622 * \return zero on success or a negative value on error.
624 int drm_legacy_rmmap_ioctl(struct drm_device *dev, void *data,
625 struct drm_file *file_priv)
627 struct drm_map *request = data;
628 struct drm_local_map *map = NULL;
629 struct drm_map_list *r_list;
632 if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
633 !drm_core_check_feature(dev, DRIVER_LEGACY))
636 mutex_lock(&dev->struct_mutex);
637 list_for_each_entry(r_list, &dev->maplist, head) {
639 r_list->user_token == (unsigned long)request->handle &&
640 r_list->map->flags & _DRM_REMOVABLE) {
646 /* List has wrapped around to the head pointer, or it's empty we didn't
649 if (list_empty(&dev->maplist) || !map) {
650 mutex_unlock(&dev->struct_mutex);
654 /* Register and framebuffer maps are permanent */
655 if ((map->type == _DRM_REGISTERS) || (map->type == _DRM_FRAME_BUFFER)) {
656 mutex_unlock(&dev->struct_mutex);
660 ret = drm_legacy_rmmap_locked(dev, map);
662 mutex_unlock(&dev->struct_mutex);
668 * Cleanup after an error on one of the addbufs() functions.
670 * \param dev DRM device.
671 * \param entry buffer entry where the error occurred.
673 * Frees any pages and buffers associated with the given entry.
675 static void drm_cleanup_buf_error(struct drm_device *dev,
676 struct drm_buf_entry *entry)
678 drm_dma_handle_t *dmah;
681 if (entry->seg_count) {
682 for (i = 0; i < entry->seg_count; i++) {
683 if (entry->seglist[i]) {
684 dmah = entry->seglist[i];
685 dma_free_coherent(dev->dev,
691 kfree(entry->seglist);
693 entry->seg_count = 0;
696 if (entry->buf_count) {
697 for (i = 0; i < entry->buf_count; i++) {
698 kfree(entry->buflist[i].dev_private);
700 kfree(entry->buflist);
702 entry->buf_count = 0;
706 #if IS_ENABLED(CONFIG_AGP)
708 * Add AGP buffers for DMA transfers.
710 * \param dev struct drm_device to which the buffers are to be added.
711 * \param request pointer to a struct drm_buf_desc describing the request.
712 * \return zero on success or a negative number on failure.
714 * After some sanity checks creates a drm_buf structure for each buffer and
715 * reallocates the buffer list of the same size order to accommodate the new
718 int drm_legacy_addbufs_agp(struct drm_device *dev,
719 struct drm_buf_desc *request)
721 struct drm_device_dma *dma = dev->dma;
722 struct drm_buf_entry *entry;
723 struct drm_agp_mem *agp_entry;
725 unsigned long offset;
726 unsigned long agp_offset;
735 struct drm_buf **temp_buflist;
740 count = request->count;
741 order = order_base_2(request->size);
744 alignment = (request->flags & _DRM_PAGE_ALIGN)
745 ? PAGE_ALIGN(size) : size;
746 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
747 total = PAGE_SIZE << page_order;
750 agp_offset = dev->agp->base + request->agp_start;
752 DRM_DEBUG("count: %d\n", count);
753 DRM_DEBUG("order: %d\n", order);
754 DRM_DEBUG("size: %d\n", size);
755 DRM_DEBUG("agp_offset: %lx\n", agp_offset);
756 DRM_DEBUG("alignment: %d\n", alignment);
757 DRM_DEBUG("page_order: %d\n", page_order);
758 DRM_DEBUG("total: %d\n", total);
760 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
763 /* Make sure buffers are located in AGP memory that we own */
765 list_for_each_entry(agp_entry, &dev->agp->memory, head) {
766 if ((agp_offset >= agp_entry->bound) &&
767 (agp_offset + total * count <= agp_entry->bound + agp_entry->pages * PAGE_SIZE)) {
772 if (!list_empty(&dev->agp->memory) && !valid) {
773 DRM_DEBUG("zone invalid\n");
776 spin_lock(&dev->buf_lock);
778 spin_unlock(&dev->buf_lock);
781 atomic_inc(&dev->buf_alloc);
782 spin_unlock(&dev->buf_lock);
784 mutex_lock(&dev->struct_mutex);
785 entry = &dma->bufs[order];
786 if (entry->buf_count) {
787 mutex_unlock(&dev->struct_mutex);
788 atomic_dec(&dev->buf_alloc);
789 return -ENOMEM; /* May only call once for each order */
792 if (count < 0 || count > 4096) {
793 mutex_unlock(&dev->struct_mutex);
794 atomic_dec(&dev->buf_alloc);
798 entry->buflist = kcalloc(count, sizeof(*entry->buflist), GFP_KERNEL);
799 if (!entry->buflist) {
800 mutex_unlock(&dev->struct_mutex);
801 atomic_dec(&dev->buf_alloc);
805 entry->buf_size = size;
806 entry->page_order = page_order;
810 while (entry->buf_count < count) {
811 buf = &entry->buflist[entry->buf_count];
812 buf->idx = dma->buf_count + entry->buf_count;
813 buf->total = alignment;
817 buf->offset = (dma->byte_count + offset);
818 buf->bus_address = agp_offset + offset;
819 buf->address = (void *)(agp_offset + offset);
823 buf->file_priv = NULL;
825 buf->dev_priv_size = dev->driver->dev_priv_size;
826 buf->dev_private = kzalloc(buf->dev_priv_size, GFP_KERNEL);
827 if (!buf->dev_private) {
828 /* Set count correctly so we free the proper amount. */
829 entry->buf_count = count;
830 drm_cleanup_buf_error(dev, entry);
831 mutex_unlock(&dev->struct_mutex);
832 atomic_dec(&dev->buf_alloc);
836 DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
840 byte_count += PAGE_SIZE << page_order;
843 DRM_DEBUG("byte_count: %d\n", byte_count);
845 temp_buflist = krealloc(dma->buflist,
846 (dma->buf_count + entry->buf_count) *
847 sizeof(*dma->buflist), GFP_KERNEL);
849 /* Free the entry because it isn't valid */
850 drm_cleanup_buf_error(dev, entry);
851 mutex_unlock(&dev->struct_mutex);
852 atomic_dec(&dev->buf_alloc);
855 dma->buflist = temp_buflist;
857 for (i = 0; i < entry->buf_count; i++) {
858 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
861 dma->buf_count += entry->buf_count;
862 dma->seg_count += entry->seg_count;
863 dma->page_count += byte_count >> PAGE_SHIFT;
864 dma->byte_count += byte_count;
866 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
867 DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
869 mutex_unlock(&dev->struct_mutex);
871 request->count = entry->buf_count;
872 request->size = size;
874 dma->flags = _DRM_DMA_USE_AGP;
876 atomic_dec(&dev->buf_alloc);
879 EXPORT_SYMBOL(drm_legacy_addbufs_agp);
880 #endif /* CONFIG_AGP */
882 int drm_legacy_addbufs_pci(struct drm_device *dev,
883 struct drm_buf_desc *request)
885 struct drm_device_dma *dma = dev->dma;
891 struct drm_buf_entry *entry;
892 drm_dma_handle_t *dmah;
895 unsigned long offset;
899 unsigned long *temp_pagelist;
900 struct drm_buf **temp_buflist;
902 if (!drm_core_check_feature(dev, DRIVER_PCI_DMA))
908 if (!capable(CAP_SYS_ADMIN))
911 count = request->count;
912 order = order_base_2(request->size);
915 DRM_DEBUG("count=%d, size=%d (%d), order=%d\n",
916 request->count, request->size, size, order);
918 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
921 alignment = (request->flags & _DRM_PAGE_ALIGN)
922 ? PAGE_ALIGN(size) : size;
923 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
924 total = PAGE_SIZE << page_order;
926 spin_lock(&dev->buf_lock);
928 spin_unlock(&dev->buf_lock);
931 atomic_inc(&dev->buf_alloc);
932 spin_unlock(&dev->buf_lock);
934 mutex_lock(&dev->struct_mutex);
935 entry = &dma->bufs[order];
936 if (entry->buf_count) {
937 mutex_unlock(&dev->struct_mutex);
938 atomic_dec(&dev->buf_alloc);
939 return -ENOMEM; /* May only call once for each order */
942 if (count < 0 || count > 4096) {
943 mutex_unlock(&dev->struct_mutex);
944 atomic_dec(&dev->buf_alloc);
948 entry->buflist = kcalloc(count, sizeof(*entry->buflist), GFP_KERNEL);
949 if (!entry->buflist) {
950 mutex_unlock(&dev->struct_mutex);
951 atomic_dec(&dev->buf_alloc);
955 entry->seglist = kcalloc(count, sizeof(*entry->seglist), GFP_KERNEL);
956 if (!entry->seglist) {
957 kfree(entry->buflist);
958 mutex_unlock(&dev->struct_mutex);
959 atomic_dec(&dev->buf_alloc);
963 /* Keep the original pagelist until we know all the allocations
966 temp_pagelist = kmalloc_array(dma->page_count + (count << page_order),
967 sizeof(*dma->pagelist),
969 if (!temp_pagelist) {
970 kfree(entry->buflist);
971 kfree(entry->seglist);
972 mutex_unlock(&dev->struct_mutex);
973 atomic_dec(&dev->buf_alloc);
976 memcpy(temp_pagelist,
977 dma->pagelist, dma->page_count * sizeof(*dma->pagelist));
978 DRM_DEBUG("pagelist: %d entries\n",
979 dma->page_count + (count << page_order));
981 entry->buf_size = size;
982 entry->page_order = page_order;
986 while (entry->buf_count < count) {
987 dmah = kmalloc(sizeof(drm_dma_handle_t), GFP_KERNEL);
992 dmah->vaddr = dma_alloc_coherent(dev->dev,
999 /* Set count correctly so we free the proper amount. */
1000 entry->buf_count = count;
1001 entry->seg_count = count;
1002 drm_cleanup_buf_error(dev, entry);
1003 kfree(temp_pagelist);
1004 mutex_unlock(&dev->struct_mutex);
1005 atomic_dec(&dev->buf_alloc);
1008 entry->seglist[entry->seg_count++] = dmah;
1009 for (i = 0; i < (1 << page_order); i++) {
1010 DRM_DEBUG("page %d @ 0x%08lx\n",
1011 dma->page_count + page_count,
1012 (unsigned long)dmah->vaddr + PAGE_SIZE * i);
1013 temp_pagelist[dma->page_count + page_count++]
1014 = (unsigned long)dmah->vaddr + PAGE_SIZE * i;
1017 offset + size <= total && entry->buf_count < count;
1018 offset += alignment, ++entry->buf_count) {
1019 buf = &entry->buflist[entry->buf_count];
1020 buf->idx = dma->buf_count + entry->buf_count;
1021 buf->total = alignment;
1024 buf->offset = (dma->byte_count + byte_count + offset);
1025 buf->address = (void *)(dmah->vaddr + offset);
1026 buf->bus_address = dmah->busaddr + offset;
1030 buf->file_priv = NULL;
1032 buf->dev_priv_size = dev->driver->dev_priv_size;
1033 buf->dev_private = kzalloc(buf->dev_priv_size,
1035 if (!buf->dev_private) {
1036 /* Set count correctly so we free the proper amount. */
1037 entry->buf_count = count;
1038 entry->seg_count = count;
1039 drm_cleanup_buf_error(dev, entry);
1040 kfree(temp_pagelist);
1041 mutex_unlock(&dev->struct_mutex);
1042 atomic_dec(&dev->buf_alloc);
1046 DRM_DEBUG("buffer %d @ %p\n",
1047 entry->buf_count, buf->address);
1049 byte_count += PAGE_SIZE << page_order;
1052 temp_buflist = krealloc(dma->buflist,
1053 (dma->buf_count + entry->buf_count) *
1054 sizeof(*dma->buflist), GFP_KERNEL);
1055 if (!temp_buflist) {
1056 /* Free the entry because it isn't valid */
1057 drm_cleanup_buf_error(dev, entry);
1058 kfree(temp_pagelist);
1059 mutex_unlock(&dev->struct_mutex);
1060 atomic_dec(&dev->buf_alloc);
1063 dma->buflist = temp_buflist;
1065 for (i = 0; i < entry->buf_count; i++) {
1066 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
1069 /* No allocations failed, so now we can replace the original pagelist
1072 if (dma->page_count) {
1073 kfree(dma->pagelist);
1075 dma->pagelist = temp_pagelist;
1077 dma->buf_count += entry->buf_count;
1078 dma->seg_count += entry->seg_count;
1079 dma->page_count += entry->seg_count << page_order;
1080 dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
1082 mutex_unlock(&dev->struct_mutex);
1084 request->count = entry->buf_count;
1085 request->size = size;
1087 if (request->flags & _DRM_PCI_BUFFER_RO)
1088 dma->flags = _DRM_DMA_USE_PCI_RO;
1090 atomic_dec(&dev->buf_alloc);
1094 EXPORT_SYMBOL(drm_legacy_addbufs_pci);
1096 static int drm_legacy_addbufs_sg(struct drm_device *dev,
1097 struct drm_buf_desc *request)
1099 struct drm_device_dma *dma = dev->dma;
1100 struct drm_buf_entry *entry;
1101 struct drm_buf *buf;
1102 unsigned long offset;
1103 unsigned long agp_offset;
1112 struct drm_buf **temp_buflist;
1114 if (!drm_core_check_feature(dev, DRIVER_SG))
1120 if (!capable(CAP_SYS_ADMIN))
1123 count = request->count;
1124 order = order_base_2(request->size);
1127 alignment = (request->flags & _DRM_PAGE_ALIGN)
1128 ? PAGE_ALIGN(size) : size;
1129 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
1130 total = PAGE_SIZE << page_order;
1133 agp_offset = request->agp_start;
1135 DRM_DEBUG("count: %d\n", count);
1136 DRM_DEBUG("order: %d\n", order);
1137 DRM_DEBUG("size: %d\n", size);
1138 DRM_DEBUG("agp_offset: %lu\n", agp_offset);
1139 DRM_DEBUG("alignment: %d\n", alignment);
1140 DRM_DEBUG("page_order: %d\n", page_order);
1141 DRM_DEBUG("total: %d\n", total);
1143 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
1146 spin_lock(&dev->buf_lock);
1148 spin_unlock(&dev->buf_lock);
1151 atomic_inc(&dev->buf_alloc);
1152 spin_unlock(&dev->buf_lock);
1154 mutex_lock(&dev->struct_mutex);
1155 entry = &dma->bufs[order];
1156 if (entry->buf_count) {
1157 mutex_unlock(&dev->struct_mutex);
1158 atomic_dec(&dev->buf_alloc);
1159 return -ENOMEM; /* May only call once for each order */
1162 if (count < 0 || count > 4096) {
1163 mutex_unlock(&dev->struct_mutex);
1164 atomic_dec(&dev->buf_alloc);
1168 entry->buflist = kcalloc(count, sizeof(*entry->buflist), GFP_KERNEL);
1169 if (!entry->buflist) {
1170 mutex_unlock(&dev->struct_mutex);
1171 atomic_dec(&dev->buf_alloc);
1175 entry->buf_size = size;
1176 entry->page_order = page_order;
1180 while (entry->buf_count < count) {
1181 buf = &entry->buflist[entry->buf_count];
1182 buf->idx = dma->buf_count + entry->buf_count;
1183 buf->total = alignment;
1187 buf->offset = (dma->byte_count + offset);
1188 buf->bus_address = agp_offset + offset;
1189 buf->address = (void *)(agp_offset + offset
1190 + (unsigned long)dev->sg->virtual);
1194 buf->file_priv = NULL;
1196 buf->dev_priv_size = dev->driver->dev_priv_size;
1197 buf->dev_private = kzalloc(buf->dev_priv_size, GFP_KERNEL);
1198 if (!buf->dev_private) {
1199 /* Set count correctly so we free the proper amount. */
1200 entry->buf_count = count;
1201 drm_cleanup_buf_error(dev, entry);
1202 mutex_unlock(&dev->struct_mutex);
1203 atomic_dec(&dev->buf_alloc);
1207 DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
1209 offset += alignment;
1211 byte_count += PAGE_SIZE << page_order;
1214 DRM_DEBUG("byte_count: %d\n", byte_count);
1216 temp_buflist = krealloc(dma->buflist,
1217 (dma->buf_count + entry->buf_count) *
1218 sizeof(*dma->buflist), GFP_KERNEL);
1219 if (!temp_buflist) {
1220 /* Free the entry because it isn't valid */
1221 drm_cleanup_buf_error(dev, entry);
1222 mutex_unlock(&dev->struct_mutex);
1223 atomic_dec(&dev->buf_alloc);
1226 dma->buflist = temp_buflist;
1228 for (i = 0; i < entry->buf_count; i++) {
1229 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
1232 dma->buf_count += entry->buf_count;
1233 dma->seg_count += entry->seg_count;
1234 dma->page_count += byte_count >> PAGE_SHIFT;
1235 dma->byte_count += byte_count;
1237 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
1238 DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
1240 mutex_unlock(&dev->struct_mutex);
1242 request->count = entry->buf_count;
1243 request->size = size;
1245 dma->flags = _DRM_DMA_USE_SG;
1247 atomic_dec(&dev->buf_alloc);
1252 * Add buffers for DMA transfers (ioctl).
1254 * \param inode device inode.
1255 * \param file_priv DRM file private.
1256 * \param cmd command.
1257 * \param arg pointer to a struct drm_buf_desc request.
1258 * \return zero on success or a negative number on failure.
1260 * According with the memory type specified in drm_buf_desc::flags and the
1261 * build options, it dispatches the call either to addbufs_agp(),
1262 * addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent
1263 * PCI memory respectively.
1265 int drm_legacy_addbufs(struct drm_device *dev, void *data,
1266 struct drm_file *file_priv)
1268 struct drm_buf_desc *request = data;
1271 if (!drm_core_check_feature(dev, DRIVER_LEGACY))
1274 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1277 #if IS_ENABLED(CONFIG_AGP)
1278 if (request->flags & _DRM_AGP_BUFFER)
1279 ret = drm_legacy_addbufs_agp(dev, request);
1282 if (request->flags & _DRM_SG_BUFFER)
1283 ret = drm_legacy_addbufs_sg(dev, request);
1284 else if (request->flags & _DRM_FB_BUFFER)
1287 ret = drm_legacy_addbufs_pci(dev, request);
1293 * Get information about the buffer mappings.
1295 * This was originally mean for debugging purposes, or by a sophisticated
1296 * client library to determine how best to use the available buffers (e.g.,
1297 * large buffers can be used for image transfer).
1299 * \param inode device inode.
1300 * \param file_priv DRM file private.
1301 * \param cmd command.
1302 * \param arg pointer to a drm_buf_info structure.
1303 * \return zero on success or a negative number on failure.
1305 * Increments drm_device::buf_use while holding the drm_device::buf_lock
1306 * lock, preventing of allocating more buffers after this call. Information
1307 * about each requested buffer is then copied into user space.
1309 int __drm_legacy_infobufs(struct drm_device *dev,
1311 int (*f)(void *, int, struct drm_buf_entry *))
1313 struct drm_device_dma *dma = dev->dma;
1317 if (!drm_core_check_feature(dev, DRIVER_LEGACY))
1320 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1326 spin_lock(&dev->buf_lock);
1327 if (atomic_read(&dev->buf_alloc)) {
1328 spin_unlock(&dev->buf_lock);
1331 ++dev->buf_use; /* Can't allocate more after this call */
1332 spin_unlock(&dev->buf_lock);
1334 for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
1335 if (dma->bufs[i].buf_count)
1339 DRM_DEBUG("count = %d\n", count);
1342 for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
1343 struct drm_buf_entry *from = &dma->bufs[i];
1345 if (from->buf_count) {
1346 if (f(data, count, from) < 0)
1348 DRM_DEBUG("%d %d %d %d %d\n",
1350 dma->bufs[i].buf_count,
1351 dma->bufs[i].buf_size,
1352 dma->bufs[i].low_mark,
1353 dma->bufs[i].high_mark);
1363 static int copy_one_buf(void *data, int count, struct drm_buf_entry *from)
1365 struct drm_buf_info *request = data;
1366 struct drm_buf_desc __user *to = &request->list[count];
1367 struct drm_buf_desc v = {.count = from->buf_count,
1368 .size = from->buf_size,
1369 .low_mark = from->low_mark,
1370 .high_mark = from->high_mark};
1372 if (copy_to_user(to, &v, offsetof(struct drm_buf_desc, flags)))
1377 int drm_legacy_infobufs(struct drm_device *dev, void *data,
1378 struct drm_file *file_priv)
1380 struct drm_buf_info *request = data;
1382 return __drm_legacy_infobufs(dev, data, &request->count, copy_one_buf);
1386 * Specifies a low and high water mark for buffer allocation
1388 * \param inode device inode.
1389 * \param file_priv DRM file private.
1390 * \param cmd command.
1391 * \param arg a pointer to a drm_buf_desc structure.
1392 * \return zero on success or a negative number on failure.
1394 * Verifies that the size order is bounded between the admissible orders and
1395 * updates the respective drm_device_dma::bufs entry low and high water mark.
1397 * \note This ioctl is deprecated and mostly never used.
1399 int drm_legacy_markbufs(struct drm_device *dev, void *data,
1400 struct drm_file *file_priv)
1402 struct drm_device_dma *dma = dev->dma;
1403 struct drm_buf_desc *request = data;
1405 struct drm_buf_entry *entry;
1407 if (!drm_core_check_feature(dev, DRIVER_LEGACY))
1410 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1416 DRM_DEBUG("%d, %d, %d\n",
1417 request->size, request->low_mark, request->high_mark);
1418 order = order_base_2(request->size);
1419 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
1421 entry = &dma->bufs[order];
1423 if (request->low_mark < 0 || request->low_mark > entry->buf_count)
1425 if (request->high_mark < 0 || request->high_mark > entry->buf_count)
1428 entry->low_mark = request->low_mark;
1429 entry->high_mark = request->high_mark;
1435 * Unreserve the buffers in list, previously reserved using drmDMA.
1437 * \param inode device inode.
1438 * \param file_priv DRM file private.
1439 * \param cmd command.
1440 * \param arg pointer to a drm_buf_free structure.
1441 * \return zero on success or a negative number on failure.
1443 * Calls free_buffer() for each used buffer.
1444 * This function is primarily used for debugging.
1446 int drm_legacy_freebufs(struct drm_device *dev, void *data,
1447 struct drm_file *file_priv)
1449 struct drm_device_dma *dma = dev->dma;
1450 struct drm_buf_free *request = data;
1453 struct drm_buf *buf;
1455 if (!drm_core_check_feature(dev, DRIVER_LEGACY))
1458 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1464 DRM_DEBUG("%d\n", request->count);
1465 for (i = 0; i < request->count; i++) {
1466 if (copy_from_user(&idx, &request->list[i], sizeof(idx)))
1468 if (idx < 0 || idx >= dma->buf_count) {
1469 DRM_ERROR("Index %d (of %d max)\n",
1470 idx, dma->buf_count - 1);
1473 idx = array_index_nospec(idx, dma->buf_count);
1474 buf = dma->buflist[idx];
1475 if (buf->file_priv != file_priv) {
1476 DRM_ERROR("Process %d freeing buffer not owned\n",
1477 task_pid_nr(current));
1480 drm_legacy_free_buffer(dev, buf);
1487 * Maps all of the DMA buffers into client-virtual space (ioctl).
1489 * \param inode device inode.
1490 * \param file_priv DRM file private.
1491 * \param cmd command.
1492 * \param arg pointer to a drm_buf_map structure.
1493 * \return zero on success or a negative number on failure.
1495 * Maps the AGP, SG or PCI buffer region with vm_mmap(), and copies information
1496 * about each buffer into user space. For PCI buffers, it calls vm_mmap() with
1497 * offset equal to 0, which drm_mmap() interpretes as PCI buffers and calls
1500 int __drm_legacy_mapbufs(struct drm_device *dev, void *data, int *p,
1502 int (*f)(void *, int, unsigned long,
1504 struct drm_file *file_priv)
1506 struct drm_device_dma *dma = dev->dma;
1508 unsigned long virtual;
1511 if (!drm_core_check_feature(dev, DRIVER_LEGACY))
1514 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1520 spin_lock(&dev->buf_lock);
1521 if (atomic_read(&dev->buf_alloc)) {
1522 spin_unlock(&dev->buf_lock);
1525 dev->buf_use++; /* Can't allocate more after this call */
1526 spin_unlock(&dev->buf_lock);
1528 if (*p >= dma->buf_count) {
1529 if ((dev->agp && (dma->flags & _DRM_DMA_USE_AGP))
1530 || (drm_core_check_feature(dev, DRIVER_SG)
1531 && (dma->flags & _DRM_DMA_USE_SG))) {
1532 struct drm_local_map *map = dev->agp_buffer_map;
1533 unsigned long token = dev->agp_buffer_token;
1539 virtual = vm_mmap(file_priv->filp, 0, map->size,
1540 PROT_READ | PROT_WRITE,
1544 virtual = vm_mmap(file_priv->filp, 0, dma->byte_count,
1545 PROT_READ | PROT_WRITE,
1548 if (virtual > -1024UL) {
1550 retcode = (signed long)virtual;
1553 *v = (void __user *)virtual;
1555 for (i = 0; i < dma->buf_count; i++) {
1556 if (f(data, i, virtual, dma->buflist[i]) < 0) {
1563 *p = dma->buf_count;
1564 DRM_DEBUG("%d buffers, retcode = %d\n", *p, retcode);
1569 static int map_one_buf(void *data, int idx, unsigned long virtual,
1570 struct drm_buf *buf)
1572 struct drm_buf_map *request = data;
1573 unsigned long address = virtual + buf->offset; /* *** */
1575 if (copy_to_user(&request->list[idx].idx, &buf->idx,
1576 sizeof(request->list[0].idx)))
1578 if (copy_to_user(&request->list[idx].total, &buf->total,
1579 sizeof(request->list[0].total)))
1581 if (clear_user(&request->list[idx].used, sizeof(int)))
1583 if (copy_to_user(&request->list[idx].address, &address,
1589 int drm_legacy_mapbufs(struct drm_device *dev, void *data,
1590 struct drm_file *file_priv)
1592 struct drm_buf_map *request = data;
1594 return __drm_legacy_mapbufs(dev, data, &request->count,
1595 &request->virtual, map_one_buf,
1599 int drm_legacy_dma_ioctl(struct drm_device *dev, void *data,
1600 struct drm_file *file_priv)
1602 if (!drm_core_check_feature(dev, DRIVER_LEGACY))
1605 if (dev->driver->dma_ioctl)
1606 return dev->driver->dma_ioctl(dev, data, file_priv);
1611 struct drm_local_map *drm_legacy_getsarea(struct drm_device *dev)
1613 struct drm_map_list *entry;
1615 list_for_each_entry(entry, &dev->maplist, head) {
1616 if (entry->map && entry->map->type == _DRM_SHM &&
1617 (entry->map->flags & _DRM_CONTAINS_LOCK)) {
1623 EXPORT_SYMBOL(drm_legacy_getsarea);