2 * SPDX-License-Identifier: MIT
4 * Copyright © 2012-2014 Intel Corporation
6 * Based on amdgpu_mn, which bears the following notice:
8 * Copyright 2014 Advanced Micro Devices, Inc.
11 * Permission is hereby granted, free of charge, to any person obtaining a
12 * copy of this software and associated documentation files (the
13 * "Software"), to deal in the Software without restriction, including
14 * without limitation the rights to use, copy, modify, merge, publish,
15 * distribute, sub license, and/or sell copies of the Software, and to
16 * permit persons to whom the Software is furnished to do so, subject to
17 * the following conditions:
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
27 * The above copyright notice and this permission notice (including the
28 * next paragraph) shall be included in all copies or substantial portions
34 * Christian König <christian.koenig@amd.com>
37 #include <linux/mmu_context.h>
38 #include <linux/mempolicy.h>
39 #include <linux/swap.h>
40 #include <linux/sched/mm.h>
43 #include "i915_gem_ioctls.h"
44 #include "i915_gem_object.h"
45 #include "i915_scatterlist.h"
47 #ifdef CONFIG_MMU_NOTIFIER
50 * i915_gem_userptr_invalidate - callback to notify about mm change
52 * @mni: the range (mm) is about to update
53 * @range: details on the invalidation
54 * @cur_seq: Value to pass to mmu_interval_set_seq()
56 * Block for operations on BOs to finish and mark pages as accessed and
59 static bool i915_gem_userptr_invalidate(struct mmu_interval_notifier *mni,
60 const struct mmu_notifier_range *range,
61 unsigned long cur_seq)
63 mmu_interval_set_seq(mni, cur_seq);
67 static const struct mmu_interval_notifier_ops i915_gem_userptr_notifier_ops = {
68 .invalidate = i915_gem_userptr_invalidate,
72 i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj)
74 return mmu_interval_notifier_insert(&obj->userptr.notifier, current->mm,
75 obj->userptr.ptr, obj->base.size,
76 &i915_gem_userptr_notifier_ops);
79 static void i915_gem_object_userptr_drop_ref(struct drm_i915_gem_object *obj)
81 struct page **pvec = NULL;
83 assert_object_held_shared(obj);
85 if (!--obj->userptr.page_ref) {
86 pvec = obj->userptr.pvec;
87 obj->userptr.pvec = NULL;
89 GEM_BUG_ON(obj->userptr.page_ref < 0);
92 const unsigned long num_pages = obj->base.size >> PAGE_SHIFT;
94 unpin_user_pages(pvec, num_pages);
99 static int i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
101 unsigned int max_segment = i915_sg_segment_size(obj->base.dev->dev);
104 unsigned int num_pages; /* limited by sg_alloc_table_from_pages_segment */
107 if (overflows_type(obj->base.size >> PAGE_SHIFT, num_pages))
110 num_pages = obj->base.size >> PAGE_SHIFT;
111 st = kmalloc(sizeof(*st), GFP_KERNEL);
115 if (!obj->userptr.page_ref) {
120 obj->userptr.page_ref++;
121 pvec = obj->userptr.pvec;
124 ret = sg_alloc_table_from_pages_segment(st, pvec, num_pages, 0,
125 num_pages << PAGE_SHIFT,
126 max_segment, GFP_KERNEL);
130 ret = i915_gem_gtt_prepare_pages(obj, st);
134 if (max_segment > PAGE_SIZE) {
135 max_segment = PAGE_SIZE;
142 WARN_ON_ONCE(!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE));
143 if (i915_gem_object_can_bypass_llc(obj))
144 obj->cache_dirty = true;
146 __i915_gem_object_set_pages(obj, st);
151 i915_gem_object_userptr_drop_ref(obj);
158 i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj,
159 struct sg_table *pages)
161 struct sgt_iter sgt_iter;
167 __i915_gem_object_release_shmem(obj, pages, true);
168 i915_gem_gtt_finish_pages(obj, pages);
171 * We always mark objects as dirty when they are used by the GPU,
172 * just in case. However, if we set the vma as being read-only we know
173 * that the object will never have been written to.
175 if (i915_gem_object_is_readonly(obj))
176 obj->mm.dirty = false;
178 for_each_sgt_page(page, sgt_iter, pages) {
179 if (obj->mm.dirty && trylock_page(page)) {
181 * As this may not be anonymous memory (e.g. shmem)
182 * but exist on a real mapping, we have to lock
183 * the page in order to dirty it -- holding
184 * the page reference is not sufficient to
185 * prevent the inode from being truncated.
186 * Play safe and take the lock.
190 * The mmu-notifier can be invalidated for a
191 * migrate_folio, that is alreadying holding the lock
192 * on the folio. Such a try_to_unmap() will result
193 * in us calling put_pages() and so recursively try
194 * to lock the page. We avoid that deadlock with
195 * a trylock_page() and in exchange we risk missing
196 * some page dirtying.
198 set_page_dirty(page);
202 mark_page_accessed(page);
204 obj->mm.dirty = false;
206 sg_free_table(pages);
209 i915_gem_object_userptr_drop_ref(obj);
212 static int i915_gem_object_userptr_unbind(struct drm_i915_gem_object *obj)
214 struct sg_table *pages;
217 err = i915_gem_object_unbind(obj, I915_GEM_OBJECT_UNBIND_ACTIVE);
221 if (GEM_WARN_ON(i915_gem_object_has_pinned_pages(obj)))
224 assert_object_held(obj);
226 pages = __i915_gem_object_unset_pages(obj);
227 if (!IS_ERR_OR_NULL(pages))
228 i915_gem_userptr_put_pages(obj, pages);
233 int i915_gem_object_userptr_submit_init(struct drm_i915_gem_object *obj)
235 const unsigned long num_pages = obj->base.size >> PAGE_SHIFT;
237 unsigned int gup_flags = 0;
238 unsigned long notifier_seq;
241 if (obj->userptr.notifier.mm != current->mm)
244 notifier_seq = mmu_interval_read_begin(&obj->userptr.notifier);
246 ret = i915_gem_object_lock_interruptible(obj, NULL);
250 if (notifier_seq == obj->userptr.notifier_seq && obj->userptr.pvec) {
251 i915_gem_object_unlock(obj);
255 ret = i915_gem_object_userptr_unbind(obj);
256 i915_gem_object_unlock(obj);
260 pvec = kvmalloc_array(num_pages, sizeof(struct page *), GFP_KERNEL);
264 if (!i915_gem_object_is_readonly(obj))
265 gup_flags |= FOLL_WRITE;
268 while (pinned < num_pages) {
269 ret = pin_user_pages_fast(obj->userptr.ptr + pinned * PAGE_SIZE,
270 num_pages - pinned, gup_flags,
278 ret = i915_gem_object_lock_interruptible(obj, NULL);
282 if (mmu_interval_read_retry(&obj->userptr.notifier,
283 !obj->userptr.page_ref ? notifier_seq :
284 obj->userptr.notifier_seq)) {
289 if (!obj->userptr.page_ref++) {
290 obj->userptr.pvec = pvec;
291 obj->userptr.notifier_seq = notifier_seq;
293 ret = ____i915_gem_object_get_pages(obj);
296 obj->userptr.page_ref--;
299 i915_gem_object_unlock(obj);
303 unpin_user_pages(pvec, pinned);
310 int i915_gem_object_userptr_submit_done(struct drm_i915_gem_object *obj)
312 if (mmu_interval_read_retry(&obj->userptr.notifier,
313 obj->userptr.notifier_seq)) {
314 /* We collided with the mmu notifier, need to retry */
322 int i915_gem_object_userptr_validate(struct drm_i915_gem_object *obj)
326 err = i915_gem_object_userptr_submit_init(obj);
330 err = i915_gem_object_lock_interruptible(obj, NULL);
333 * Since we only check validity, not use the pages,
334 * it doesn't matter if we collide with the mmu notifier,
335 * and -EAGAIN handling is not required.
337 err = i915_gem_object_pin_pages(obj);
339 i915_gem_object_unpin_pages(obj);
341 i915_gem_object_unlock(obj);
348 i915_gem_userptr_release(struct drm_i915_gem_object *obj)
350 GEM_WARN_ON(obj->userptr.page_ref);
352 if (!obj->userptr.notifier.mm)
355 mmu_interval_notifier_remove(&obj->userptr.notifier);
356 obj->userptr.notifier.mm = NULL;
360 i915_gem_userptr_dmabuf_export(struct drm_i915_gem_object *obj)
362 drm_dbg(obj->base.dev, "Exporting userptr no longer allowed\n");
368 i915_gem_userptr_pwrite(struct drm_i915_gem_object *obj,
369 const struct drm_i915_gem_pwrite *args)
371 drm_dbg(obj->base.dev, "pwrite to userptr no longer allowed\n");
377 i915_gem_userptr_pread(struct drm_i915_gem_object *obj,
378 const struct drm_i915_gem_pread *args)
380 drm_dbg(obj->base.dev, "pread from userptr no longer allowed\n");
385 static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = {
386 .name = "i915_gem_object_userptr",
387 .flags = I915_GEM_OBJECT_IS_SHRINKABLE |
388 I915_GEM_OBJECT_NO_MMAP |
389 I915_GEM_OBJECT_IS_PROXY,
390 .get_pages = i915_gem_userptr_get_pages,
391 .put_pages = i915_gem_userptr_put_pages,
392 .dmabuf_export = i915_gem_userptr_dmabuf_export,
393 .pwrite = i915_gem_userptr_pwrite,
394 .pread = i915_gem_userptr_pread,
395 .release = i915_gem_userptr_release,
401 probe_range(struct mm_struct *mm, unsigned long addr, unsigned long len)
403 VMA_ITERATOR(vmi, mm, addr);
404 struct vm_area_struct *vma;
405 unsigned long end = addr + len;
408 for_each_vma_range(vmi, vma, end) {
409 /* Check for holes, note that we also update the addr below */
410 if (vma->vm_start > addr)
413 if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP))
418 mmap_read_unlock(mm);
420 if (vma || addr < end)
426 * Creates a new mm object that wraps some normal memory from the process
427 * context - user memory.
429 * We impose several restrictions upon the memory being mapped
431 * 1. It must be page aligned (both start/end addresses, i.e ptr and size).
432 * 2. It must be normal system memory, not a pointer into another map of IO
433 * space (e.g. it must not be a GTT mmapping of another object).
434 * 3. We only allow a bo as large as we could in theory map into the GTT,
435 * that is we limit the size to the total size of the GTT.
436 * 4. The bo is marked as being snoopable. The backing pages are left
437 * accessible directly by the CPU, but reads and writes by the GPU may
438 * incur the cost of a snoop (unless you have an LLC architecture).
440 * Synchronisation between multiple users and the GPU is left to userspace
441 * through the normal set-domain-ioctl. The kernel will enforce that the
442 * GPU relinquishes the VMA before it is returned back to the system
443 * i.e. upon free(), munmap() or process termination. However, the userspace
444 * malloc() library may not immediately relinquish the VMA after free() and
445 * instead reuse it whilst the GPU is still reading and writing to the VMA.
448 * Also note, that the object created here is not currently a "first class"
449 * object, in that several ioctls are banned. These are the CPU access
450 * ioctls: mmap(), pwrite and pread. In practice, you are expected to use
451 * direct access via your pointer rather than use those ioctls. Another
452 * restriction is that we do not allow userptr surfaces to be pinned to the
453 * hardware and so we reject any attempt to create a framebuffer out of a
456 * If you think this is a good interface to use to pass GPU memory between
457 * drivers, please use dma-buf instead. In fact, wherever possible use
461 i915_gem_userptr_ioctl(struct drm_device *dev,
463 struct drm_file *file)
465 static struct lock_class_key __maybe_unused lock_class;
466 struct drm_i915_private *dev_priv = to_i915(dev);
467 struct drm_i915_gem_userptr *args = data;
468 struct drm_i915_gem_object __maybe_unused *obj;
469 int __maybe_unused ret;
470 u32 __maybe_unused handle;
472 if (!HAS_LLC(dev_priv) && !HAS_SNOOP(dev_priv)) {
473 /* We cannot support coherent userptr objects on hw without
474 * LLC and broken snooping.
479 if (args->flags & ~(I915_USERPTR_READ_ONLY |
480 I915_USERPTR_UNSYNCHRONIZED |
484 if (i915_gem_object_size_2big(args->user_size))
487 if (!args->user_size)
490 if (offset_in_page(args->user_ptr | args->user_size))
493 if (!access_ok((char __user *)(unsigned long)args->user_ptr, args->user_size))
496 if (args->flags & I915_USERPTR_UNSYNCHRONIZED)
499 if (args->flags & I915_USERPTR_READ_ONLY) {
501 * On almost all of the older hw, we cannot tell the GPU that
502 * a page is readonly.
504 if (!to_gt(dev_priv)->vm->has_read_only)
508 if (args->flags & I915_USERPTR_PROBE) {
510 * Check that the range pointed to represents real struct
511 * pages and not iomappings (at this moment in time!)
513 ret = probe_range(current->mm, args->user_ptr, args->user_size);
518 #ifdef CONFIG_MMU_NOTIFIER
519 obj = i915_gem_object_alloc();
523 drm_gem_private_object_init(dev, &obj->base, args->user_size);
524 i915_gem_object_init(obj, &i915_gem_userptr_ops, &lock_class,
526 obj->mem_flags = I915_BO_FLAG_STRUCT_PAGE;
527 obj->read_domains = I915_GEM_DOMAIN_CPU;
528 obj->write_domain = I915_GEM_DOMAIN_CPU;
529 i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
531 obj->userptr.ptr = args->user_ptr;
532 obj->userptr.notifier_seq = ULONG_MAX;
533 if (args->flags & I915_USERPTR_READ_ONLY)
534 i915_gem_object_set_readonly(obj);
536 /* And keep a pointer to the current->mm for resolving the user pages
537 * at binding. This means that we need to hook into the mmu_notifier
538 * in order to detect if the mmu is destroyed.
540 ret = i915_gem_userptr_init__mmu_notifier(obj);
542 ret = drm_gem_handle_create(file, &obj->base, &handle);
544 /* drop reference from allocate - handle holds it now */
545 i915_gem_object_put(obj);
549 args->handle = handle;