2 * SPDX-License-Identifier: MIT
4 * Copyright © 2012-2014 Intel Corporation
7 #include <linux/mmu_context.h>
8 #include <linux/mmu_notifier.h>
9 #include <linux/mempolicy.h>
10 #include <linux/swap.h>
11 #include <linux/sched/mm.h>
14 #include "i915_gem_ioctls.h"
15 #include "i915_gem_object.h"
16 #include "i915_scatterlist.h"
18 struct i915_mm_struct {
20 struct drm_i915_private *i915;
21 struct i915_mmu_notifier *mn;
22 struct hlist_node node;
24 struct work_struct work;
27 #if defined(CONFIG_MMU_NOTIFIER)
28 #include <linux/interval_tree.h>
30 struct i915_mmu_notifier {
32 struct hlist_node node;
33 struct mmu_notifier mn;
34 struct rb_root_cached objects;
35 struct i915_mm_struct *mm;
38 struct i915_mmu_object {
39 struct i915_mmu_notifier *mn;
40 struct drm_i915_gem_object *obj;
41 struct interval_tree_node it;
44 static void add_object(struct i915_mmu_object *mo)
46 GEM_BUG_ON(!RB_EMPTY_NODE(&mo->it.rb));
47 interval_tree_insert(&mo->it, &mo->mn->objects);
50 static void del_object(struct i915_mmu_object *mo)
52 if (RB_EMPTY_NODE(&mo->it.rb))
55 interval_tree_remove(&mo->it, &mo->mn->objects);
56 RB_CLEAR_NODE(&mo->it.rb);
60 __i915_gem_userptr_set_active(struct drm_i915_gem_object *obj, bool value)
62 struct i915_mmu_object *mo = obj->userptr.mmu_object;
65 * During mm_invalidate_range we need to cancel any userptr that
66 * overlaps the range being invalidated. Doing so requires the
67 * struct_mutex, and that risks recursion. In order to cause
68 * recursion, the user must alias the userptr address space with
69 * a GTT mmapping (possible with a MAP_FIXED) - then when we have
70 * to invalidate that mmaping, mm_invalidate_range is called with
71 * the userptr address *and* the struct_mutex held. To prevent that
72 * we set a flag under the i915_mmu_notifier spinlock to indicate
73 * whether this object is valid.
78 spin_lock(&mo->mn->lock);
83 spin_unlock(&mo->mn->lock);
87 userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
88 const struct mmu_notifier_range *range)
90 struct i915_mmu_notifier *mn =
91 container_of(_mn, struct i915_mmu_notifier, mn);
92 struct interval_tree_node *it;
96 if (RB_EMPTY_ROOT(&mn->objects.rb_root))
99 /* interval ranges are inclusive, but invalidate range is exclusive */
100 end = range->end - 1;
102 spin_lock(&mn->lock);
103 it = interval_tree_iter_first(&mn->objects, range->start, end);
105 struct drm_i915_gem_object *obj;
107 if (!mmu_notifier_range_blockable(range)) {
113 * The mmu_object is released late when destroying the
114 * GEM object so it is entirely possible to gain a
115 * reference on an object in the process of being freed
116 * since our serialisation is via the spinlock and not
117 * the struct_mutex - and consequently use it after it
118 * is freed and then double free it. To prevent that
119 * use-after-free we only acquire a reference on the
120 * object if it is not in the process of being destroyed.
122 obj = container_of(it, struct i915_mmu_object, it)->obj;
123 if (!kref_get_unless_zero(&obj->base.refcount)) {
124 it = interval_tree_iter_next(it, range->start, end);
127 spin_unlock(&mn->lock);
129 ret = i915_gem_object_unbind(obj,
130 I915_GEM_OBJECT_UNBIND_ACTIVE |
131 I915_GEM_OBJECT_UNBIND_BARRIER);
133 ret = __i915_gem_object_put_pages(obj);
134 i915_gem_object_put(obj);
138 spin_lock(&mn->lock);
141 * As we do not (yet) protect the mmu from concurrent insertion
142 * over this range, there is no guarantee that this search will
143 * terminate given a pathologic workload.
145 it = interval_tree_iter_first(&mn->objects, range->start, end);
147 spin_unlock(&mn->lock);
153 static const struct mmu_notifier_ops i915_gem_userptr_notifier = {
154 .invalidate_range_start = userptr_mn_invalidate_range_start,
157 static struct i915_mmu_notifier *
158 i915_mmu_notifier_create(struct i915_mm_struct *mm)
160 struct i915_mmu_notifier *mn;
162 mn = kmalloc(sizeof(*mn), GFP_KERNEL);
164 return ERR_PTR(-ENOMEM);
166 spin_lock_init(&mn->lock);
167 mn->mn.ops = &i915_gem_userptr_notifier;
168 mn->objects = RB_ROOT_CACHED;
175 i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj)
177 struct i915_mmu_object *mo;
179 mo = fetch_and_zero(&obj->userptr.mmu_object);
183 spin_lock(&mo->mn->lock);
185 spin_unlock(&mo->mn->lock);
189 static struct i915_mmu_notifier *
190 i915_mmu_notifier_find(struct i915_mm_struct *mm)
192 struct i915_mmu_notifier *mn;
199 mn = i915_mmu_notifier_create(mm);
203 down_write(&mm->mm->mmap_sem);
204 mutex_lock(&mm->i915->mm_lock);
205 if (mm->mn == NULL && !err) {
206 /* Protected by mmap_sem (write-lock) */
207 err = __mmu_notifier_register(&mn->mn, mm->mm);
209 /* Protected by mm_lock */
210 mm->mn = fetch_and_zero(&mn);
214 * Someone else raced and successfully installed the mmu
215 * notifier, we can cancel our own errors.
219 mutex_unlock(&mm->i915->mm_lock);
220 up_write(&mm->mm->mmap_sem);
222 if (mn && !IS_ERR(mn))
225 return err ? ERR_PTR(err) : mm->mn;
229 i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj,
232 struct i915_mmu_notifier *mn;
233 struct i915_mmu_object *mo;
235 if (flags & I915_USERPTR_UNSYNCHRONIZED)
236 return capable(CAP_SYS_ADMIN) ? 0 : -EPERM;
238 if (WARN_ON(obj->userptr.mm == NULL))
241 mn = i915_mmu_notifier_find(obj->userptr.mm);
245 mo = kzalloc(sizeof(*mo), GFP_KERNEL);
251 mo->it.start = obj->userptr.ptr;
252 mo->it.last = obj->userptr.ptr + obj->base.size - 1;
253 RB_CLEAR_NODE(&mo->it.rb);
255 obj->userptr.mmu_object = mo;
260 i915_mmu_notifier_free(struct i915_mmu_notifier *mn,
261 struct mm_struct *mm)
266 mmu_notifier_unregister(&mn->mn, mm);
273 __i915_gem_userptr_set_active(struct drm_i915_gem_object *obj, bool value)
278 i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj)
283 i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj,
286 if ((flags & I915_USERPTR_UNSYNCHRONIZED) == 0)
289 if (!capable(CAP_SYS_ADMIN))
296 i915_mmu_notifier_free(struct i915_mmu_notifier *mn,
297 struct mm_struct *mm)
303 static struct i915_mm_struct *
304 __i915_mm_struct_find(struct drm_i915_private *dev_priv, struct mm_struct *real)
306 struct i915_mm_struct *mm;
308 /* Protected by dev_priv->mm_lock */
309 hash_for_each_possible(dev_priv->mm_structs, mm, node, (unsigned long)real)
317 i915_gem_userptr_init__mm_struct(struct drm_i915_gem_object *obj)
319 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
320 struct i915_mm_struct *mm;
323 /* During release of the GEM object we hold the struct_mutex. This
324 * precludes us from calling mmput() at that time as that may be
325 * the last reference and so call exit_mmap(). exit_mmap() will
326 * attempt to reap the vma, and if we were holding a GTT mmap
327 * would then call drm_gem_vm_close() and attempt to reacquire
328 * the struct mutex. So in order to avoid that recursion, we have
329 * to defer releasing the mm reference until after we drop the
330 * struct_mutex, i.e. we need to schedule a worker to do the clean
333 mutex_lock(&dev_priv->mm_lock);
334 mm = __i915_mm_struct_find(dev_priv, current->mm);
336 mm = kmalloc(sizeof(*mm), GFP_KERNEL);
342 kref_init(&mm->kref);
343 mm->i915 = to_i915(obj->base.dev);
345 mm->mm = current->mm;
350 /* Protected by dev_priv->mm_lock */
351 hash_add(dev_priv->mm_structs,
352 &mm->node, (unsigned long)mm->mm);
356 obj->userptr.mm = mm;
358 mutex_unlock(&dev_priv->mm_lock);
363 __i915_mm_struct_free__worker(struct work_struct *work)
365 struct i915_mm_struct *mm = container_of(work, typeof(*mm), work);
366 i915_mmu_notifier_free(mm->mn, mm->mm);
372 __i915_mm_struct_free(struct kref *kref)
374 struct i915_mm_struct *mm = container_of(kref, typeof(*mm), kref);
376 /* Protected by dev_priv->mm_lock */
378 mutex_unlock(&mm->i915->mm_lock);
380 INIT_WORK(&mm->work, __i915_mm_struct_free__worker);
381 queue_work(mm->i915->mm.userptr_wq, &mm->work);
385 i915_gem_userptr_release__mm_struct(struct drm_i915_gem_object *obj)
387 if (obj->userptr.mm == NULL)
390 kref_put_mutex(&obj->userptr.mm->kref,
391 __i915_mm_struct_free,
392 &to_i915(obj->base.dev)->mm_lock);
393 obj->userptr.mm = NULL;
396 struct get_pages_work {
397 struct work_struct work;
398 struct drm_i915_gem_object *obj;
399 struct task_struct *task;
402 static struct sg_table *
403 __i915_gem_userptr_alloc_pages(struct drm_i915_gem_object *obj,
404 struct page **pvec, unsigned long num_pages)
406 unsigned int max_segment = i915_sg_segment_size();
408 unsigned int sg_page_sizes;
411 st = kmalloc(sizeof(*st), GFP_KERNEL);
413 return ERR_PTR(-ENOMEM);
416 ret = __sg_alloc_table_from_pages(st, pvec, num_pages,
417 0, num_pages << PAGE_SHIFT,
425 ret = i915_gem_gtt_prepare_pages(obj, st);
429 if (max_segment > PAGE_SIZE) {
430 max_segment = PAGE_SIZE;
438 sg_page_sizes = i915_sg_page_sizes(st->sgl);
440 __i915_gem_object_set_pages(obj, st, sg_page_sizes);
446 __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
448 struct get_pages_work *work = container_of(_work, typeof(*work), work);
449 struct drm_i915_gem_object *obj = work->obj;
450 const unsigned long npages = obj->base.size >> PAGE_SHIFT;
451 unsigned long pinned;
458 pvec = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
460 struct mm_struct *mm = obj->userptr.mm->mm;
461 unsigned int flags = 0;
464 if (!i915_gem_object_is_readonly(obj))
468 if (mmget_not_zero(mm)) {
469 while (pinned < npages) {
471 down_read(&mm->mmap_sem);
474 ret = get_user_pages_remote
476 obj->userptr.ptr + pinned * PAGE_SIZE,
479 pvec + pinned, NULL, &locked);
486 up_read(&mm->mmap_sem);
491 mutex_lock_nested(&obj->mm.lock, I915_MM_GET_PAGES);
492 if (obj->userptr.work == &work->work) {
493 struct sg_table *pages = ERR_PTR(ret);
495 if (pinned == npages) {
496 pages = __i915_gem_userptr_alloc_pages(obj, pvec,
498 if (!IS_ERR(pages)) {
504 obj->userptr.work = ERR_CAST(pages);
506 __i915_gem_userptr_set_active(obj, false);
508 mutex_unlock(&obj->mm.lock);
510 release_pages(pvec, pinned);
513 i915_gem_object_put(obj);
514 put_task_struct(work->task);
518 static struct sg_table *
519 __i915_gem_userptr_get_pages_schedule(struct drm_i915_gem_object *obj)
521 struct get_pages_work *work;
523 /* Spawn a worker so that we can acquire the
524 * user pages without holding our mutex. Access
525 * to the user pages requires mmap_sem, and we have
526 * a strict lock ordering of mmap_sem, struct_mutex -
527 * we already hold struct_mutex here and so cannot
528 * call gup without encountering a lock inversion.
530 * Userspace will keep on repeating the operation
531 * (thanks to EAGAIN) until either we hit the fast
532 * path or the worker completes. If the worker is
533 * cancelled or superseded, the task is still run
534 * but the results ignored. (This leads to
535 * complications that we may have a stray object
536 * refcount that we need to be wary of when
537 * checking for existing objects during creation.)
538 * If the worker encounters an error, it reports
539 * that error back to this function through
540 * obj->userptr.work = ERR_PTR.
542 work = kmalloc(sizeof(*work), GFP_KERNEL);
544 return ERR_PTR(-ENOMEM);
546 obj->userptr.work = &work->work;
548 work->obj = i915_gem_object_get(obj);
550 work->task = current;
551 get_task_struct(work->task);
553 INIT_WORK(&work->work, __i915_gem_userptr_get_pages_worker);
554 queue_work(to_i915(obj->base.dev)->mm.userptr_wq, &work->work);
556 return ERR_PTR(-EAGAIN);
559 static int i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
561 const unsigned long num_pages = obj->base.size >> PAGE_SHIFT;
562 struct mm_struct *mm = obj->userptr.mm->mm;
564 struct sg_table *pages;
568 /* If userspace should engineer that these pages are replaced in
569 * the vma between us binding this page into the GTT and completion
570 * of rendering... Their loss. If they change the mapping of their
571 * pages they need to create a new bo to point to the new vma.
573 * However, that still leaves open the possibility of the vma
574 * being copied upon fork. Which falls under the same userspace
575 * synchronisation issue as a regular bo, except that this time
576 * the process may not be expecting that a particular piece of
577 * memory is tied to the GPU.
579 * Fortunately, we can hook into the mmu_notifier in order to
580 * discard the page references prior to anything nasty happening
581 * to the vma (discard or cloning) which should prevent the more
582 * egregious cases from causing harm.
585 if (obj->userptr.work) {
586 /* active flag should still be held for the pending work */
587 if (IS_ERR(obj->userptr.work))
588 return PTR_ERR(obj->userptr.work);
596 if (mm == current->mm) {
597 pvec = kvmalloc_array(num_pages, sizeof(struct page *),
602 * Using __get_user_pages_fast() with a read-only
603 * access is questionable. A read-only page may be
604 * COW-broken, and then this might end up giving
605 * the wrong side of the COW..
607 * We may or may not care.
609 if (pvec) /* defer to worker if malloc fails */
610 pinned = __get_user_pages_fast(obj->userptr.ptr,
612 !i915_gem_object_is_readonly(obj),
618 pages = ERR_PTR(pinned);
620 } else if (pinned < num_pages) {
621 pages = __i915_gem_userptr_get_pages_schedule(obj);
622 active = pages == ERR_PTR(-EAGAIN);
624 pages = __i915_gem_userptr_alloc_pages(obj, pvec, num_pages);
625 active = !IS_ERR(pages);
628 __i915_gem_userptr_set_active(obj, true);
631 release_pages(pvec, pinned);
634 return PTR_ERR_OR_ZERO(pages);
638 i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj,
639 struct sg_table *pages)
641 struct sgt_iter sgt_iter;
644 /* Cancel any inflight work and force them to restart their gup */
645 obj->userptr.work = NULL;
646 __i915_gem_userptr_set_active(obj, false);
650 __i915_gem_object_release_shmem(obj, pages, true);
651 i915_gem_gtt_finish_pages(obj, pages);
654 * We always mark objects as dirty when they are used by the GPU,
655 * just in case. However, if we set the vma as being read-only we know
656 * that the object will never have been written to.
658 if (i915_gem_object_is_readonly(obj))
659 obj->mm.dirty = false;
661 for_each_sgt_page(page, sgt_iter, pages) {
662 if (obj->mm.dirty && trylock_page(page)) {
664 * As this may not be anonymous memory (e.g. shmem)
665 * but exist on a real mapping, we have to lock
666 * the page in order to dirty it -- holding
667 * the page reference is not sufficient to
668 * prevent the inode from being truncated.
669 * Play safe and take the lock.
673 * The mmu-notifier can be invalidated for a
674 * migrate_page, that is alreadying holding the lock
675 * on the page. Such a try_to_unmap() will result
676 * in us calling put_pages() and so recursively try
677 * to lock the page. We avoid that deadlock with
678 * a trylock_page() and in exchange we risk missing
679 * some page dirtying.
681 set_page_dirty(page);
685 mark_page_accessed(page);
688 obj->mm.dirty = false;
690 sg_free_table(pages);
695 i915_gem_userptr_release(struct drm_i915_gem_object *obj)
697 i915_gem_userptr_release__mmu_notifier(obj);
698 i915_gem_userptr_release__mm_struct(obj);
702 i915_gem_userptr_dmabuf_export(struct drm_i915_gem_object *obj)
704 if (obj->userptr.mmu_object)
707 return i915_gem_userptr_init__mmu_notifier(obj, 0);
710 static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = {
711 .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
712 I915_GEM_OBJECT_IS_SHRINKABLE |
713 I915_GEM_OBJECT_NO_MMAP |
714 I915_GEM_OBJECT_ASYNC_CANCEL,
715 .get_pages = i915_gem_userptr_get_pages,
716 .put_pages = i915_gem_userptr_put_pages,
717 .dmabuf_export = i915_gem_userptr_dmabuf_export,
718 .release = i915_gem_userptr_release,
722 * Creates a new mm object that wraps some normal memory from the process
723 * context - user memory.
725 * We impose several restrictions upon the memory being mapped
727 * 1. It must be page aligned (both start/end addresses, i.e ptr and size).
728 * 2. It must be normal system memory, not a pointer into another map of IO
729 * space (e.g. it must not be a GTT mmapping of another object).
730 * 3. We only allow a bo as large as we could in theory map into the GTT,
731 * that is we limit the size to the total size of the GTT.
732 * 4. The bo is marked as being snoopable. The backing pages are left
733 * accessible directly by the CPU, but reads and writes by the GPU may
734 * incur the cost of a snoop (unless you have an LLC architecture).
736 * Synchronisation between multiple users and the GPU is left to userspace
737 * through the normal set-domain-ioctl. The kernel will enforce that the
738 * GPU relinquishes the VMA before it is returned back to the system
739 * i.e. upon free(), munmap() or process termination. However, the userspace
740 * malloc() library may not immediately relinquish the VMA after free() and
741 * instead reuse it whilst the GPU is still reading and writing to the VMA.
744 * Also note, that the object created here is not currently a "first class"
745 * object, in that several ioctls are banned. These are the CPU access
746 * ioctls: mmap(), pwrite and pread. In practice, you are expected to use
747 * direct access via your pointer rather than use those ioctls. Another
748 * restriction is that we do not allow userptr surfaces to be pinned to the
749 * hardware and so we reject any attempt to create a framebuffer out of a
752 * If you think this is a good interface to use to pass GPU memory between
753 * drivers, please use dma-buf instead. In fact, wherever possible use
757 i915_gem_userptr_ioctl(struct drm_device *dev,
759 struct drm_file *file)
761 static struct lock_class_key lock_class;
762 struct drm_i915_private *dev_priv = to_i915(dev);
763 struct drm_i915_gem_userptr *args = data;
764 struct drm_i915_gem_object *obj;
768 if (!HAS_LLC(dev_priv) && !HAS_SNOOP(dev_priv)) {
769 /* We cannot support coherent userptr objects on hw without
770 * LLC and broken snooping.
775 if (args->flags & ~(I915_USERPTR_READ_ONLY |
776 I915_USERPTR_UNSYNCHRONIZED))
780 * XXX: There is a prevalence of the assumption that we fit the
781 * object's page count inside a 32bit _signed_ variable. Let's document
782 * this and catch if we ever need to fix it. In the meantime, if you do
783 * spot such a local variable, please consider fixing!
785 * Aside from our own locals (for which we have no excuse!):
786 * - sg_table embeds unsigned int for num_pages
787 * - get_user_pages*() mixed ints with longs
790 if (args->user_size >> PAGE_SHIFT > INT_MAX)
793 if (overflows_type(args->user_size, obj->base.size))
796 if (!args->user_size)
799 if (offset_in_page(args->user_ptr | args->user_size))
802 if (!access_ok((char __user *)(unsigned long)args->user_ptr, args->user_size))
805 if (args->flags & I915_USERPTR_READ_ONLY) {
807 * On almost all of the older hw, we cannot tell the GPU that
808 * a page is readonly.
810 if (!dev_priv->gt.vm->has_read_only)
814 obj = i915_gem_object_alloc();
818 drm_gem_private_object_init(dev, &obj->base, args->user_size);
819 i915_gem_object_init(obj, &i915_gem_userptr_ops, &lock_class);
820 obj->read_domains = I915_GEM_DOMAIN_CPU;
821 obj->write_domain = I915_GEM_DOMAIN_CPU;
822 i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
824 obj->userptr.ptr = args->user_ptr;
825 if (args->flags & I915_USERPTR_READ_ONLY)
826 i915_gem_object_set_readonly(obj);
828 /* And keep a pointer to the current->mm for resolving the user pages
829 * at binding. This means that we need to hook into the mmu_notifier
830 * in order to detect if the mmu is destroyed.
832 ret = i915_gem_userptr_init__mm_struct(obj);
834 ret = i915_gem_userptr_init__mmu_notifier(obj, args->flags);
836 ret = drm_gem_handle_create(file, &obj->base, &handle);
838 /* drop reference from allocate - handle holds it now */
839 i915_gem_object_put(obj);
843 args->handle = handle;
847 int i915_gem_init_userptr(struct drm_i915_private *dev_priv)
849 mutex_init(&dev_priv->mm_lock);
850 hash_init(dev_priv->mm_structs);
852 dev_priv->mm.userptr_wq =
853 alloc_workqueue("i915-userptr-acquire",
854 WQ_HIGHPRI | WQ_UNBOUND,
856 if (!dev_priv->mm.userptr_wq)
862 void i915_gem_cleanup_userptr(struct drm_i915_private *dev_priv)
864 destroy_workqueue(dev_priv->mm.userptr_wq);