2 * SPDX-License-Identifier: MIT
4 * Copyright © 2012-2014 Intel Corporation
7 #include <linux/mmu_context.h>
8 #include <linux/mmu_notifier.h>
9 #include <linux/mempolicy.h>
10 #include <linux/swap.h>
11 #include <linux/sched/mm.h>
13 #include <drm/i915_drm.h>
15 #include "i915_gem_ioctls.h"
16 #include "i915_gem_object.h"
17 #include "i915_scatterlist.h"
18 #include "i915_trace.h"
19 #include "intel_drv.h"
21 struct i915_mm_struct {
23 struct drm_i915_private *i915;
24 struct i915_mmu_notifier *mn;
25 struct hlist_node node;
27 struct work_struct work;
30 #if defined(CONFIG_MMU_NOTIFIER)
31 #include <linux/interval_tree.h>
33 struct i915_mmu_notifier {
35 struct hlist_node node;
36 struct mmu_notifier mn;
37 struct rb_root_cached objects;
38 struct i915_mm_struct *mm;
41 struct i915_mmu_object {
42 struct i915_mmu_notifier *mn;
43 struct drm_i915_gem_object *obj;
44 struct interval_tree_node it;
47 static void add_object(struct i915_mmu_object *mo)
49 GEM_BUG_ON(!RB_EMPTY_NODE(&mo->it.rb));
50 interval_tree_insert(&mo->it, &mo->mn->objects);
53 static void del_object(struct i915_mmu_object *mo)
55 if (RB_EMPTY_NODE(&mo->it.rb))
58 interval_tree_remove(&mo->it, &mo->mn->objects);
59 RB_CLEAR_NODE(&mo->it.rb);
63 __i915_gem_userptr_set_active(struct drm_i915_gem_object *obj, bool value)
65 struct i915_mmu_object *mo = obj->userptr.mmu_object;
68 * During mm_invalidate_range we need to cancel any userptr that
69 * overlaps the range being invalidated. Doing so requires the
70 * struct_mutex, and that risks recursion. In order to cause
71 * recursion, the user must alias the userptr address space with
72 * a GTT mmapping (possible with a MAP_FIXED) - then when we have
73 * to invalidate that mmaping, mm_invalidate_range is called with
74 * the userptr address *and* the struct_mutex held. To prevent that
75 * we set a flag under the i915_mmu_notifier spinlock to indicate
76 * whether this object is valid.
81 spin_lock(&mo->mn->lock);
86 spin_unlock(&mo->mn->lock);
90 userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
91 const struct mmu_notifier_range *range)
93 struct i915_mmu_notifier *mn =
94 container_of(_mn, struct i915_mmu_notifier, mn);
95 struct interval_tree_node *it;
96 struct mutex *unlock = NULL;
100 if (RB_EMPTY_ROOT(&mn->objects.rb_root))
103 /* interval ranges are inclusive, but invalidate range is exclusive */
104 end = range->end - 1;
106 spin_lock(&mn->lock);
107 it = interval_tree_iter_first(&mn->objects, range->start, end);
109 struct drm_i915_gem_object *obj;
111 if (!mmu_notifier_range_blockable(range)) {
117 * The mmu_object is released late when destroying the
118 * GEM object so it is entirely possible to gain a
119 * reference on an object in the process of being freed
120 * since our serialisation is via the spinlock and not
121 * the struct_mutex - and consequently use it after it
122 * is freed and then double free it. To prevent that
123 * use-after-free we only acquire a reference on the
124 * object if it is not in the process of being destroyed.
126 obj = container_of(it, struct i915_mmu_object, it)->obj;
127 if (!kref_get_unless_zero(&obj->base.refcount)) {
128 it = interval_tree_iter_next(it, range->start, end);
131 spin_unlock(&mn->lock);
134 unlock = &mn->mm->i915->drm.struct_mutex;
136 switch (mutex_trylock_recursive(unlock)) {
138 case MUTEX_TRYLOCK_FAILED:
139 if (mutex_lock_killable_nested(unlock, I915_MM_SHRINKER)) {
140 i915_gem_object_put(obj);
144 case MUTEX_TRYLOCK_SUCCESS:
147 case MUTEX_TRYLOCK_RECURSIVE:
148 unlock = ERR_PTR(-EEXIST);
153 ret = i915_gem_object_unbind(obj);
155 ret = __i915_gem_object_put_pages(obj, I915_MM_SHRINKER);
156 i915_gem_object_put(obj);
160 spin_lock(&mn->lock);
163 * As we do not (yet) protect the mmu from concurrent insertion
164 * over this range, there is no guarantee that this search will
165 * terminate given a pathologic workload.
167 it = interval_tree_iter_first(&mn->objects, range->start, end);
169 spin_unlock(&mn->lock);
172 if (!IS_ERR_OR_NULL(unlock))
173 mutex_unlock(unlock);
179 static const struct mmu_notifier_ops i915_gem_userptr_notifier = {
180 .invalidate_range_start = userptr_mn_invalidate_range_start,
183 static struct i915_mmu_notifier *
184 i915_mmu_notifier_create(struct i915_mm_struct *mm)
186 struct i915_mmu_notifier *mn;
188 mn = kmalloc(sizeof(*mn), GFP_KERNEL);
190 return ERR_PTR(-ENOMEM);
192 spin_lock_init(&mn->lock);
193 mn->mn.ops = &i915_gem_userptr_notifier;
194 mn->objects = RB_ROOT_CACHED;
201 i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj)
203 struct i915_mmu_object *mo;
205 mo = fetch_and_zero(&obj->userptr.mmu_object);
209 spin_lock(&mo->mn->lock);
211 spin_unlock(&mo->mn->lock);
215 static struct i915_mmu_notifier *
216 i915_mmu_notifier_find(struct i915_mm_struct *mm)
218 struct i915_mmu_notifier *mn;
225 mn = i915_mmu_notifier_create(mm);
229 down_write(&mm->mm->mmap_sem);
230 mutex_lock(&mm->i915->mm_lock);
231 if (mm->mn == NULL && !err) {
232 /* Protected by mmap_sem (write-lock) */
233 err = __mmu_notifier_register(&mn->mn, mm->mm);
235 /* Protected by mm_lock */
236 mm->mn = fetch_and_zero(&mn);
240 * Someone else raced and successfully installed the mmu
241 * notifier, we can cancel our own errors.
245 mutex_unlock(&mm->i915->mm_lock);
246 up_write(&mm->mm->mmap_sem);
248 if (mn && !IS_ERR(mn))
251 return err ? ERR_PTR(err) : mm->mn;
255 i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj,
258 struct i915_mmu_notifier *mn;
259 struct i915_mmu_object *mo;
261 if (flags & I915_USERPTR_UNSYNCHRONIZED)
262 return capable(CAP_SYS_ADMIN) ? 0 : -EPERM;
264 if (WARN_ON(obj->userptr.mm == NULL))
267 mn = i915_mmu_notifier_find(obj->userptr.mm);
271 mo = kzalloc(sizeof(*mo), GFP_KERNEL);
277 mo->it.start = obj->userptr.ptr;
278 mo->it.last = obj->userptr.ptr + obj->base.size - 1;
279 RB_CLEAR_NODE(&mo->it.rb);
281 obj->userptr.mmu_object = mo;
286 i915_mmu_notifier_free(struct i915_mmu_notifier *mn,
287 struct mm_struct *mm)
292 mmu_notifier_unregister(&mn->mn, mm);
299 __i915_gem_userptr_set_active(struct drm_i915_gem_object *obj, bool value)
304 i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj)
309 i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj,
312 if ((flags & I915_USERPTR_UNSYNCHRONIZED) == 0)
315 if (!capable(CAP_SYS_ADMIN))
322 i915_mmu_notifier_free(struct i915_mmu_notifier *mn,
323 struct mm_struct *mm)
329 static struct i915_mm_struct *
330 __i915_mm_struct_find(struct drm_i915_private *dev_priv, struct mm_struct *real)
332 struct i915_mm_struct *mm;
334 /* Protected by dev_priv->mm_lock */
335 hash_for_each_possible(dev_priv->mm_structs, mm, node, (unsigned long)real)
343 i915_gem_userptr_init__mm_struct(struct drm_i915_gem_object *obj)
345 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
346 struct i915_mm_struct *mm;
349 /* During release of the GEM object we hold the struct_mutex. This
350 * precludes us from calling mmput() at that time as that may be
351 * the last reference and so call exit_mmap(). exit_mmap() will
352 * attempt to reap the vma, and if we were holding a GTT mmap
353 * would then call drm_gem_vm_close() and attempt to reacquire
354 * the struct mutex. So in order to avoid that recursion, we have
355 * to defer releasing the mm reference until after we drop the
356 * struct_mutex, i.e. we need to schedule a worker to do the clean
359 mutex_lock(&dev_priv->mm_lock);
360 mm = __i915_mm_struct_find(dev_priv, current->mm);
362 mm = kmalloc(sizeof(*mm), GFP_KERNEL);
368 kref_init(&mm->kref);
369 mm->i915 = to_i915(obj->base.dev);
371 mm->mm = current->mm;
376 /* Protected by dev_priv->mm_lock */
377 hash_add(dev_priv->mm_structs,
378 &mm->node, (unsigned long)mm->mm);
382 obj->userptr.mm = mm;
384 mutex_unlock(&dev_priv->mm_lock);
389 __i915_mm_struct_free__worker(struct work_struct *work)
391 struct i915_mm_struct *mm = container_of(work, typeof(*mm), work);
392 i915_mmu_notifier_free(mm->mn, mm->mm);
398 __i915_mm_struct_free(struct kref *kref)
400 struct i915_mm_struct *mm = container_of(kref, typeof(*mm), kref);
402 /* Protected by dev_priv->mm_lock */
404 mutex_unlock(&mm->i915->mm_lock);
406 INIT_WORK(&mm->work, __i915_mm_struct_free__worker);
407 queue_work(mm->i915->mm.userptr_wq, &mm->work);
411 i915_gem_userptr_release__mm_struct(struct drm_i915_gem_object *obj)
413 if (obj->userptr.mm == NULL)
416 kref_put_mutex(&obj->userptr.mm->kref,
417 __i915_mm_struct_free,
418 &to_i915(obj->base.dev)->mm_lock);
419 obj->userptr.mm = NULL;
422 struct get_pages_work {
423 struct work_struct work;
424 struct drm_i915_gem_object *obj;
425 struct task_struct *task;
428 static struct sg_table *
429 __i915_gem_userptr_alloc_pages(struct drm_i915_gem_object *obj,
430 struct page **pvec, int num_pages)
432 unsigned int max_segment = i915_sg_segment_size();
434 unsigned int sg_page_sizes;
437 st = kmalloc(sizeof(*st), GFP_KERNEL);
439 return ERR_PTR(-ENOMEM);
442 ret = __sg_alloc_table_from_pages(st, pvec, num_pages,
443 0, num_pages << PAGE_SHIFT,
451 ret = i915_gem_gtt_prepare_pages(obj, st);
455 if (max_segment > PAGE_SIZE) {
456 max_segment = PAGE_SIZE;
464 sg_page_sizes = i915_sg_page_sizes(st->sgl);
466 __i915_gem_object_set_pages(obj, st, sg_page_sizes);
472 __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
474 struct get_pages_work *work = container_of(_work, typeof(*work), work);
475 struct drm_i915_gem_object *obj = work->obj;
476 const int npages = obj->base.size >> PAGE_SHIFT;
483 pvec = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
485 struct mm_struct *mm = obj->userptr.mm->mm;
486 unsigned int flags = 0;
488 if (!i915_gem_object_is_readonly(obj))
492 if (mmget_not_zero(mm)) {
493 down_read(&mm->mmap_sem);
494 while (pinned < npages) {
495 ret = get_user_pages_remote
497 obj->userptr.ptr + pinned * PAGE_SIZE,
500 pvec + pinned, NULL, NULL);
506 up_read(&mm->mmap_sem);
511 mutex_lock(&obj->mm.lock);
512 if (obj->userptr.work == &work->work) {
513 struct sg_table *pages = ERR_PTR(ret);
515 if (pinned == npages) {
516 pages = __i915_gem_userptr_alloc_pages(obj, pvec,
518 if (!IS_ERR(pages)) {
524 obj->userptr.work = ERR_CAST(pages);
526 __i915_gem_userptr_set_active(obj, false);
528 mutex_unlock(&obj->mm.lock);
530 release_pages(pvec, pinned);
533 i915_gem_object_put(obj);
534 put_task_struct(work->task);
538 static struct sg_table *
539 __i915_gem_userptr_get_pages_schedule(struct drm_i915_gem_object *obj)
541 struct get_pages_work *work;
543 /* Spawn a worker so that we can acquire the
544 * user pages without holding our mutex. Access
545 * to the user pages requires mmap_sem, and we have
546 * a strict lock ordering of mmap_sem, struct_mutex -
547 * we already hold struct_mutex here and so cannot
548 * call gup without encountering a lock inversion.
550 * Userspace will keep on repeating the operation
551 * (thanks to EAGAIN) until either we hit the fast
552 * path or the worker completes. If the worker is
553 * cancelled or superseded, the task is still run
554 * but the results ignored. (This leads to
555 * complications that we may have a stray object
556 * refcount that we need to be wary of when
557 * checking for existing objects during creation.)
558 * If the worker encounters an error, it reports
559 * that error back to this function through
560 * obj->userptr.work = ERR_PTR.
562 work = kmalloc(sizeof(*work), GFP_KERNEL);
564 return ERR_PTR(-ENOMEM);
566 obj->userptr.work = &work->work;
568 work->obj = i915_gem_object_get(obj);
570 work->task = current;
571 get_task_struct(work->task);
573 INIT_WORK(&work->work, __i915_gem_userptr_get_pages_worker);
574 queue_work(to_i915(obj->base.dev)->mm.userptr_wq, &work->work);
576 return ERR_PTR(-EAGAIN);
579 static int i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
581 const int num_pages = obj->base.size >> PAGE_SHIFT;
582 struct mm_struct *mm = obj->userptr.mm->mm;
584 struct sg_table *pages;
588 /* If userspace should engineer that these pages are replaced in
589 * the vma between us binding this page into the GTT and completion
590 * of rendering... Their loss. If they change the mapping of their
591 * pages they need to create a new bo to point to the new vma.
593 * However, that still leaves open the possibility of the vma
594 * being copied upon fork. Which falls under the same userspace
595 * synchronisation issue as a regular bo, except that this time
596 * the process may not be expecting that a particular piece of
597 * memory is tied to the GPU.
599 * Fortunately, we can hook into the mmu_notifier in order to
600 * discard the page references prior to anything nasty happening
601 * to the vma (discard or cloning) which should prevent the more
602 * egregious cases from causing harm.
605 if (obj->userptr.work) {
606 /* active flag should still be held for the pending work */
607 if (IS_ERR(obj->userptr.work))
608 return PTR_ERR(obj->userptr.work);
616 if (mm == current->mm) {
617 pvec = kvmalloc_array(num_pages, sizeof(struct page *),
621 if (pvec) /* defer to worker if malloc fails */
622 pinned = __get_user_pages_fast(obj->userptr.ptr,
624 !i915_gem_object_is_readonly(obj),
630 pages = ERR_PTR(pinned);
632 } else if (pinned < num_pages) {
633 pages = __i915_gem_userptr_get_pages_schedule(obj);
634 active = pages == ERR_PTR(-EAGAIN);
636 pages = __i915_gem_userptr_alloc_pages(obj, pvec, num_pages);
637 active = !IS_ERR(pages);
640 __i915_gem_userptr_set_active(obj, true);
643 release_pages(pvec, pinned);
646 return PTR_ERR_OR_ZERO(pages);
650 i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj,
651 struct sg_table *pages)
653 struct sgt_iter sgt_iter;
656 /* Cancel any inflight work and force them to restart their gup */
657 obj->userptr.work = NULL;
658 __i915_gem_userptr_set_active(obj, false);
662 __i915_gem_object_release_shmem(obj, pages, true);
663 i915_gem_gtt_finish_pages(obj, pages);
665 for_each_sgt_page(page, sgt_iter, pages) {
668 * As this may not be anonymous memory (e.g. shmem)
669 * but exist on a real mapping, we have to lock
670 * the page in order to dirty it -- holding
671 * the page reference is not sufficient to
672 * prevent the inode from being truncated.
673 * Play safe and take the lock.
675 set_page_dirty_lock(page);
677 mark_page_accessed(page);
680 obj->mm.dirty = false;
682 sg_free_table(pages);
687 i915_gem_userptr_release(struct drm_i915_gem_object *obj)
689 i915_gem_userptr_release__mmu_notifier(obj);
690 i915_gem_userptr_release__mm_struct(obj);
694 i915_gem_userptr_dmabuf_export(struct drm_i915_gem_object *obj)
696 if (obj->userptr.mmu_object)
699 return i915_gem_userptr_init__mmu_notifier(obj, 0);
702 static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = {
703 .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
704 I915_GEM_OBJECT_IS_SHRINKABLE |
705 I915_GEM_OBJECT_ASYNC_CANCEL,
706 .get_pages = i915_gem_userptr_get_pages,
707 .put_pages = i915_gem_userptr_put_pages,
708 .dmabuf_export = i915_gem_userptr_dmabuf_export,
709 .release = i915_gem_userptr_release,
713 * Creates a new mm object that wraps some normal memory from the process
714 * context - user memory.
716 * We impose several restrictions upon the memory being mapped
718 * 1. It must be page aligned (both start/end addresses, i.e ptr and size).
719 * 2. It must be normal system memory, not a pointer into another map of IO
720 * space (e.g. it must not be a GTT mmapping of another object).
721 * 3. We only allow a bo as large as we could in theory map into the GTT,
722 * that is we limit the size to the total size of the GTT.
723 * 4. The bo is marked as being snoopable. The backing pages are left
724 * accessible directly by the CPU, but reads and writes by the GPU may
725 * incur the cost of a snoop (unless you have an LLC architecture).
727 * Synchronisation between multiple users and the GPU is left to userspace
728 * through the normal set-domain-ioctl. The kernel will enforce that the
729 * GPU relinquishes the VMA before it is returned back to the system
730 * i.e. upon free(), munmap() or process termination. However, the userspace
731 * malloc() library may not immediately relinquish the VMA after free() and
732 * instead reuse it whilst the GPU is still reading and writing to the VMA.
735 * Also note, that the object created here is not currently a "first class"
736 * object, in that several ioctls are banned. These are the CPU access
737 * ioctls: mmap(), pwrite and pread. In practice, you are expected to use
738 * direct access via your pointer rather than use those ioctls. Another
739 * restriction is that we do not allow userptr surfaces to be pinned to the
740 * hardware and so we reject any attempt to create a framebuffer out of a
743 * If you think this is a good interface to use to pass GPU memory between
744 * drivers, please use dma-buf instead. In fact, wherever possible use
748 i915_gem_userptr_ioctl(struct drm_device *dev,
750 struct drm_file *file)
752 struct drm_i915_private *dev_priv = to_i915(dev);
753 struct drm_i915_gem_userptr *args = data;
754 struct drm_i915_gem_object *obj;
758 if (!HAS_LLC(dev_priv) && !HAS_SNOOP(dev_priv)) {
759 /* We cannot support coherent userptr objects on hw without
760 * LLC and broken snooping.
765 if (args->flags & ~(I915_USERPTR_READ_ONLY |
766 I915_USERPTR_UNSYNCHRONIZED))
769 if (!args->user_size)
772 if (offset_in_page(args->user_ptr | args->user_size))
775 if (!access_ok((char __user *)(unsigned long)args->user_ptr, args->user_size))
778 if (args->flags & I915_USERPTR_READ_ONLY) {
779 struct i915_address_space *vm;
782 * On almost all of the older hw, we cannot tell the GPU that
783 * a page is readonly.
785 vm = dev_priv->kernel_context->vm;
786 if (!vm || !vm->has_read_only)
790 obj = i915_gem_object_alloc();
794 drm_gem_private_object_init(dev, &obj->base, args->user_size);
795 i915_gem_object_init(obj, &i915_gem_userptr_ops);
796 obj->read_domains = I915_GEM_DOMAIN_CPU;
797 obj->write_domain = I915_GEM_DOMAIN_CPU;
798 i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
800 obj->userptr.ptr = args->user_ptr;
801 if (args->flags & I915_USERPTR_READ_ONLY)
802 i915_gem_object_set_readonly(obj);
804 /* And keep a pointer to the current->mm for resolving the user pages
805 * at binding. This means that we need to hook into the mmu_notifier
806 * in order to detect if the mmu is destroyed.
808 ret = i915_gem_userptr_init__mm_struct(obj);
810 ret = i915_gem_userptr_init__mmu_notifier(obj, args->flags);
812 ret = drm_gem_handle_create(file, &obj->base, &handle);
814 /* drop reference from allocate - handle holds it now */
815 i915_gem_object_put(obj);
819 args->handle = handle;
823 int i915_gem_init_userptr(struct drm_i915_private *dev_priv)
825 mutex_init(&dev_priv->mm_lock);
826 hash_init(dev_priv->mm_structs);
828 dev_priv->mm.userptr_wq =
829 alloc_workqueue("i915-userptr-acquire",
830 WQ_HIGHPRI | WQ_UNBOUND,
832 if (!dev_priv->mm.userptr_wq)
838 void i915_gem_cleanup_userptr(struct drm_i915_private *dev_priv)
840 destroy_workqueue(dev_priv->mm.userptr_wq);