2 * Copyright 2018 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
22 #include "nouveau_svm.h"
23 #include "nouveau_drv.h"
24 #include "nouveau_chan.h"
25 #include "nouveau_dmem.h"
27 #include <nvif/notify.h>
28 #include <nvif/object.h>
31 #include <nvif/class.h>
32 #include <nvif/clb069.h>
33 #include <nvif/ifc00d.h>
35 #include <linux/sched/mm.h>
36 #include <linux/sort.h>
37 #include <linux/hmm.h>
38 #include <linux/rmap.h>
41 struct nouveau_drm *drm;
43 struct list_head inst;
45 struct nouveau_svm_fault_buffer {
47 struct nvif_object object;
53 struct nvif_notify notify;
55 struct nouveau_svm_fault {
65 struct nouveau_svmm *svmm;
71 #define FAULT_ACCESS_READ 0
72 #define FAULT_ACCESS_WRITE 1
73 #define FAULT_ACCESS_ATOMIC 2
74 #define FAULT_ACCESS_PREFETCH 3
76 #define SVM_DBG(s,f,a...) NV_DEBUG((s)->drm, "svm: "f"\n", ##a)
77 #define SVM_ERR(s,f,a...) NV_WARN((s)->drm, "svm: "f"\n", ##a)
79 struct nouveau_pfnmap_args {
80 struct nvif_ioctl_v0 i;
81 struct nvif_ioctl_mthd_v0 m;
82 struct nvif_vmm_pfnmap_v0 p;
86 struct nouveau_svmm *svmm;
88 struct list_head head;
91 static struct nouveau_ivmm *
92 nouveau_ivmm_find(struct nouveau_svm *svm, u64 inst)
94 struct nouveau_ivmm *ivmm;
95 list_for_each_entry(ivmm, &svm->inst, head) {
96 if (ivmm->inst == inst)
102 #define SVMM_DBG(s,f,a...) \
103 NV_DEBUG((s)->vmm->cli->drm, "svm-%p: "f"\n", (s), ##a)
104 #define SVMM_ERR(s,f,a...) \
105 NV_WARN((s)->vmm->cli->drm, "svm-%p: "f"\n", (s), ##a)
108 nouveau_svmm_bind(struct drm_device *dev, void *data,
109 struct drm_file *file_priv)
111 struct nouveau_cli *cli = nouveau_cli(file_priv);
112 struct drm_nouveau_svm_bind *args = data;
113 unsigned target, cmd, priority;
114 unsigned long addr, end;
115 struct mm_struct *mm;
117 args->va_start &= PAGE_MASK;
118 args->va_end = ALIGN(args->va_end, PAGE_SIZE);
120 /* Sanity check arguments */
121 if (args->reserved0 || args->reserved1)
123 if (args->header & (~NOUVEAU_SVM_BIND_VALID_MASK))
125 if (args->va_start >= args->va_end)
128 cmd = args->header >> NOUVEAU_SVM_BIND_COMMAND_SHIFT;
129 cmd &= NOUVEAU_SVM_BIND_COMMAND_MASK;
131 case NOUVEAU_SVM_BIND_COMMAND__MIGRATE:
137 priority = args->header >> NOUVEAU_SVM_BIND_PRIORITY_SHIFT;
138 priority &= NOUVEAU_SVM_BIND_PRIORITY_MASK;
140 /* FIXME support CPU target ie all target value < GPU_VRAM */
141 target = args->header >> NOUVEAU_SVM_BIND_TARGET_SHIFT;
142 target &= NOUVEAU_SVM_BIND_TARGET_MASK;
144 case NOUVEAU_SVM_BIND_TARGET__GPU_VRAM:
151 * FIXME: For now refuse non 0 stride, we need to change the migrate
152 * kernel function to handle stride to avoid to create a mess within
153 * each device driver.
159 * Ok we are ask to do something sane, for now we only support migrate
160 * commands but we will add things like memory policy (what to do on
161 * page fault) and maybe some other commands.
164 mm = get_task_mm(current);
167 if (!cli->svm.svmm) {
168 mmap_read_unlock(mm);
172 for (addr = args->va_start, end = args->va_end; addr < end;) {
173 struct vm_area_struct *vma;
176 vma = find_vma_intersection(mm, addr, end);
180 addr = max(addr, vma->vm_start);
181 next = min(vma->vm_end, end);
182 /* This is a best effort so we ignore errors */
183 nouveau_dmem_migrate_vma(cli->drm, cli->svm.svmm, vma, addr,
189 * FIXME Return the number of page we have migrated, again we need to
190 * update the migrate API to return that information so that we can
191 * report it to user space.
195 mmap_read_unlock(mm);
201 /* Unlink channel instance from SVMM. */
203 nouveau_svmm_part(struct nouveau_svmm *svmm, u64 inst)
205 struct nouveau_ivmm *ivmm;
207 mutex_lock(&svmm->vmm->cli->drm->svm->mutex);
208 ivmm = nouveau_ivmm_find(svmm->vmm->cli->drm->svm, inst);
210 list_del(&ivmm->head);
213 mutex_unlock(&svmm->vmm->cli->drm->svm->mutex);
217 /* Link channel instance to SVMM. */
219 nouveau_svmm_join(struct nouveau_svmm *svmm, u64 inst)
221 struct nouveau_ivmm *ivmm;
223 if (!(ivmm = kmalloc(sizeof(*ivmm), GFP_KERNEL)))
228 mutex_lock(&svmm->vmm->cli->drm->svm->mutex);
229 list_add(&ivmm->head, &svmm->vmm->cli->drm->svm->inst);
230 mutex_unlock(&svmm->vmm->cli->drm->svm->mutex);
235 /* Invalidate SVMM address-range on GPU. */
237 nouveau_svmm_invalidate(struct nouveau_svmm *svmm, u64 start, u64 limit)
240 bool super = svmm->vmm->vmm.object.client->super;
241 svmm->vmm->vmm.object.client->super = true;
242 nvif_object_mthd(&svmm->vmm->vmm.object, NVIF_VMM_V0_PFNCLR,
243 &(struct nvif_vmm_pfnclr_v0) {
245 .size = limit - start,
246 }, sizeof(struct nvif_vmm_pfnclr_v0));
247 svmm->vmm->vmm.object.client->super = super;
252 nouveau_svmm_invalidate_range_start(struct mmu_notifier *mn,
253 const struct mmu_notifier_range *update)
255 struct nouveau_svmm *svmm =
256 container_of(mn, struct nouveau_svmm, notifier);
257 unsigned long start = update->start;
258 unsigned long limit = update->end;
260 if (!mmu_notifier_range_blockable(update))
263 SVMM_DBG(svmm, "invalidate %016lx-%016lx", start, limit);
265 mutex_lock(&svmm->mutex);
266 if (unlikely(!svmm->vmm))
270 * Ignore invalidation callbacks for device private pages since
271 * the invalidation is handled as part of the migration process.
273 if (update->event == MMU_NOTIFY_MIGRATE &&
274 update->owner == svmm->vmm->cli->drm->dev)
277 if (limit > svmm->unmanaged.start && start < svmm->unmanaged.limit) {
278 if (start < svmm->unmanaged.start) {
279 nouveau_svmm_invalidate(svmm, start,
280 svmm->unmanaged.limit);
282 start = svmm->unmanaged.limit;
285 nouveau_svmm_invalidate(svmm, start, limit);
288 mutex_unlock(&svmm->mutex);
292 static void nouveau_svmm_free_notifier(struct mmu_notifier *mn)
294 kfree(container_of(mn, struct nouveau_svmm, notifier));
297 static const struct mmu_notifier_ops nouveau_mn_ops = {
298 .invalidate_range_start = nouveau_svmm_invalidate_range_start,
299 .free_notifier = nouveau_svmm_free_notifier,
303 nouveau_svmm_fini(struct nouveau_svmm **psvmm)
305 struct nouveau_svmm *svmm = *psvmm;
307 mutex_lock(&svmm->mutex);
309 mutex_unlock(&svmm->mutex);
310 mmu_notifier_put(&svmm->notifier);
316 nouveau_svmm_init(struct drm_device *dev, void *data,
317 struct drm_file *file_priv)
319 struct nouveau_cli *cli = nouveau_cli(file_priv);
320 struct nouveau_svmm *svmm;
321 struct drm_nouveau_svm_init *args = data;
324 /* We need to fail if svm is disabled */
328 /* Allocate tracking for SVM-enabled VMM. */
329 if (!(svmm = kzalloc(sizeof(*svmm), GFP_KERNEL)))
331 svmm->vmm = &cli->svm;
332 svmm->unmanaged.start = args->unmanaged_addr;
333 svmm->unmanaged.limit = args->unmanaged_addr + args->unmanaged_size;
334 mutex_init(&svmm->mutex);
336 /* Check that SVM isn't already enabled for the client. */
337 mutex_lock(&cli->mutex);
343 /* Allocate a new GPU VMM that can support SVM (managed by the
344 * client, with replayable faults enabled).
346 * All future channel/memory allocations will make use of this
347 * VMM instead of the standard one.
349 ret = nvif_vmm_ctor(&cli->mmu, "svmVmm",
350 cli->vmm.vmm.object.oclass, true,
351 args->unmanaged_addr, args->unmanaged_size,
352 &(struct gp100_vmm_v0) {
353 .fault_replay = true,
354 }, sizeof(struct gp100_vmm_v0), &cli->svm.vmm);
358 mmap_write_lock(current->mm);
359 svmm->notifier.ops = &nouveau_mn_ops;
360 ret = __mmu_notifier_register(&svmm->notifier, current->mm);
363 /* Note, ownership of svmm transfers to mmu_notifier */
365 cli->svm.svmm = svmm;
367 mmap_write_unlock(current->mm);
368 mutex_unlock(&cli->mutex);
372 mmap_write_unlock(current->mm);
374 mutex_unlock(&cli->mutex);
379 /* Issue fault replay for GPU to retry accesses that faulted previously. */
381 nouveau_svm_fault_replay(struct nouveau_svm *svm)
383 SVM_DBG(svm, "replay");
384 WARN_ON(nvif_object_mthd(&svm->drm->client.vmm.vmm.object,
385 GP100_VMM_VN_FAULT_REPLAY,
386 &(struct gp100_vmm_fault_replay_vn) {},
387 sizeof(struct gp100_vmm_fault_replay_vn)));
390 /* Cancel a replayable fault that could not be handled.
392 * Cancelling the fault will trigger recovery to reset the engine
393 * and kill the offending channel (ie. GPU SIGSEGV).
396 nouveau_svm_fault_cancel(struct nouveau_svm *svm,
397 u64 inst, u8 hub, u8 gpc, u8 client)
399 SVM_DBG(svm, "cancel %016llx %d %02x %02x", inst, hub, gpc, client);
400 WARN_ON(nvif_object_mthd(&svm->drm->client.vmm.vmm.object,
401 GP100_VMM_VN_FAULT_CANCEL,
402 &(struct gp100_vmm_fault_cancel_v0) {
407 }, sizeof(struct gp100_vmm_fault_cancel_v0)));
411 nouveau_svm_fault_cancel_fault(struct nouveau_svm *svm,
412 struct nouveau_svm_fault *fault)
414 nouveau_svm_fault_cancel(svm, fault->inst,
421 nouveau_svm_fault_priority(u8 fault)
424 case FAULT_ACCESS_PREFETCH:
426 case FAULT_ACCESS_READ:
428 case FAULT_ACCESS_WRITE:
430 case FAULT_ACCESS_ATOMIC:
439 nouveau_svm_fault_cmp(const void *a, const void *b)
441 const struct nouveau_svm_fault *fa = *(struct nouveau_svm_fault **)a;
442 const struct nouveau_svm_fault *fb = *(struct nouveau_svm_fault **)b;
444 if ((ret = (s64)fa->inst - fb->inst))
446 if ((ret = (s64)fa->addr - fb->addr))
448 return nouveau_svm_fault_priority(fa->access) -
449 nouveau_svm_fault_priority(fb->access);
453 nouveau_svm_fault_cache(struct nouveau_svm *svm,
454 struct nouveau_svm_fault_buffer *buffer, u32 offset)
456 struct nvif_object *memory = &buffer->object;
457 const u32 instlo = nvif_rd32(memory, offset + 0x00);
458 const u32 insthi = nvif_rd32(memory, offset + 0x04);
459 const u32 addrlo = nvif_rd32(memory, offset + 0x08);
460 const u32 addrhi = nvif_rd32(memory, offset + 0x0c);
461 const u32 timelo = nvif_rd32(memory, offset + 0x10);
462 const u32 timehi = nvif_rd32(memory, offset + 0x14);
463 const u32 engine = nvif_rd32(memory, offset + 0x18);
464 const u32 info = nvif_rd32(memory, offset + 0x1c);
465 const u64 inst = (u64)insthi << 32 | instlo;
466 const u8 gpc = (info & 0x1f000000) >> 24;
467 const u8 hub = (info & 0x00100000) >> 20;
468 const u8 client = (info & 0x00007f00) >> 8;
469 struct nouveau_svm_fault *fault;
471 //XXX: i think we're supposed to spin waiting */
472 if (WARN_ON(!(info & 0x80000000)))
475 nvif_mask(memory, offset + 0x1c, 0x80000000, 0x00000000);
477 if (!buffer->fault[buffer->fault_nr]) {
478 fault = kmalloc(sizeof(*fault), GFP_KERNEL);
479 if (WARN_ON(!fault)) {
480 nouveau_svm_fault_cancel(svm, inst, hub, gpc, client);
483 buffer->fault[buffer->fault_nr] = fault;
486 fault = buffer->fault[buffer->fault_nr++];
488 fault->addr = (u64)addrhi << 32 | addrlo;
489 fault->time = (u64)timehi << 32 | timelo;
490 fault->engine = engine;
493 fault->access = (info & 0x000f0000) >> 16;
494 fault->client = client;
495 fault->fault = (info & 0x0000001f);
497 SVM_DBG(svm, "fault %016llx %016llx %02x",
498 fault->inst, fault->addr, fault->access);
501 struct svm_notifier {
502 struct mmu_interval_notifier notifier;
503 struct nouveau_svmm *svmm;
506 static bool nouveau_svm_range_invalidate(struct mmu_interval_notifier *mni,
507 const struct mmu_notifier_range *range,
508 unsigned long cur_seq)
510 struct svm_notifier *sn =
511 container_of(mni, struct svm_notifier, notifier);
513 if (range->event == MMU_NOTIFY_EXCLUSIVE &&
514 range->owner == sn->svmm->vmm->cli->drm->dev)
518 * serializes the update to mni->invalidate_seq done by caller and
519 * prevents invalidation of the PTE from progressing while HW is being
520 * programmed. This is very hacky and only works because the normal
521 * notifier that does invalidation is always called after the range
524 if (mmu_notifier_range_blockable(range))
525 mutex_lock(&sn->svmm->mutex);
526 else if (!mutex_trylock(&sn->svmm->mutex))
528 mmu_interval_set_seq(mni, cur_seq);
529 mutex_unlock(&sn->svmm->mutex);
533 static const struct mmu_interval_notifier_ops nouveau_svm_mni_ops = {
534 .invalidate = nouveau_svm_range_invalidate,
537 static void nouveau_hmm_convert_pfn(struct nouveau_drm *drm,
538 struct hmm_range *range,
539 struct nouveau_pfnmap_args *args)
544 * The address prepared here is passed through nvif_object_ioctl()
545 * to an eventual DMA map in something like gp100_vmm_pgt_pfn()
547 * This is all just encoding the internal hmm representation into a
548 * different nouveau internal representation.
550 if (!(range->hmm_pfns[0] & HMM_PFN_VALID)) {
555 page = hmm_pfn_to_page(range->hmm_pfns[0]);
557 * Only map compound pages to the GPU if the CPU is also mapping the
558 * page as a compound page. Otherwise, the PTE protections might not be
559 * consistent (e.g., CPU only maps part of a compound page).
560 * Note that the underlying page might still be larger than the
561 * CPU mapping (e.g., a PUD sized compound page partially mapped with
562 * a PMD sized page table entry).
564 if (hmm_pfn_to_map_order(range->hmm_pfns[0])) {
565 unsigned long addr = args->p.addr;
567 args->p.page = hmm_pfn_to_map_order(range->hmm_pfns[0]) +
569 args->p.size = 1UL << args->p.page;
570 args->p.addr &= ~(args->p.size - 1);
571 page -= (addr - args->p.addr) >> PAGE_SHIFT;
573 if (is_device_private_page(page))
574 args->p.phys[0] = nouveau_dmem_page_addr(page) |
575 NVIF_VMM_PFNMAP_V0_V |
576 NVIF_VMM_PFNMAP_V0_VRAM;
578 args->p.phys[0] = page_to_phys(page) |
579 NVIF_VMM_PFNMAP_V0_V |
580 NVIF_VMM_PFNMAP_V0_HOST;
581 if (range->hmm_pfns[0] & HMM_PFN_WRITE)
582 args->p.phys[0] |= NVIF_VMM_PFNMAP_V0_W;
585 static int nouveau_atomic_range_fault(struct nouveau_svmm *svmm,
586 struct nouveau_drm *drm,
587 struct nouveau_pfnmap_args *args, u32 size,
588 struct svm_notifier *notifier)
590 unsigned long timeout =
591 jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
592 struct mm_struct *mm = svmm->notifier.mm;
594 unsigned long start = args->p.addr;
595 unsigned long notifier_seq;
598 ret = mmu_interval_notifier_insert(¬ifier->notifier, mm,
599 args->p.addr, args->p.size,
600 &nouveau_svm_mni_ops);
605 if (time_after(jiffies, timeout)) {
610 notifier_seq = mmu_interval_read_begin(¬ifier->notifier);
612 ret = make_device_exclusive_range(mm, start, start + PAGE_SIZE,
614 mmap_read_unlock(mm);
615 if (ret <= 0 || !page) {
620 mutex_lock(&svmm->mutex);
621 if (!mmu_interval_read_retry(¬ifier->notifier,
624 mutex_unlock(&svmm->mutex);
627 /* Map the page on the GPU. */
629 args->p.size = PAGE_SIZE;
630 args->p.addr = start;
631 args->p.phys[0] = page_to_phys(page) |
632 NVIF_VMM_PFNMAP_V0_V |
633 NVIF_VMM_PFNMAP_V0_W |
634 NVIF_VMM_PFNMAP_V0_A |
635 NVIF_VMM_PFNMAP_V0_HOST;
637 svmm->vmm->vmm.object.client->super = true;
638 ret = nvif_object_ioctl(&svmm->vmm->vmm.object, args, size, NULL);
639 svmm->vmm->vmm.object.client->super = false;
640 mutex_unlock(&svmm->mutex);
646 mmu_interval_notifier_remove(¬ifier->notifier);
650 static int nouveau_range_fault(struct nouveau_svmm *svmm,
651 struct nouveau_drm *drm,
652 struct nouveau_pfnmap_args *args, u32 size,
653 unsigned long hmm_flags,
654 struct svm_notifier *notifier)
656 unsigned long timeout =
657 jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
658 /* Have HMM fault pages within the fault window to the GPU. */
659 unsigned long hmm_pfns[1];
660 struct hmm_range range = {
661 .notifier = ¬ifier->notifier,
662 .default_flags = hmm_flags,
663 .hmm_pfns = hmm_pfns,
664 .dev_private_owner = drm->dev,
666 struct mm_struct *mm = svmm->notifier.mm;
669 ret = mmu_interval_notifier_insert(¬ifier->notifier, mm,
670 args->p.addr, args->p.size,
671 &nouveau_svm_mni_ops);
675 range.start = notifier->notifier.interval_tree.start;
676 range.end = notifier->notifier.interval_tree.last + 1;
679 if (time_after(jiffies, timeout)) {
684 range.notifier_seq = mmu_interval_read_begin(range.notifier);
686 ret = hmm_range_fault(&range);
687 mmap_read_unlock(mm);
694 mutex_lock(&svmm->mutex);
695 if (mmu_interval_read_retry(range.notifier,
696 range.notifier_seq)) {
697 mutex_unlock(&svmm->mutex);
703 nouveau_hmm_convert_pfn(drm, &range, args);
705 svmm->vmm->vmm.object.client->super = true;
706 ret = nvif_object_ioctl(&svmm->vmm->vmm.object, args, size, NULL);
707 svmm->vmm->vmm.object.client->super = false;
708 mutex_unlock(&svmm->mutex);
711 mmu_interval_notifier_remove(¬ifier->notifier);
717 nouveau_svm_fault(struct nvif_notify *notify)
719 struct nouveau_svm_fault_buffer *buffer =
720 container_of(notify, typeof(*buffer), notify);
721 struct nouveau_svm *svm =
722 container_of(buffer, typeof(*svm), buffer[buffer->id]);
723 struct nvif_object *device = &svm->drm->client.device.object;
724 struct nouveau_svmm *svmm;
726 struct nouveau_pfnmap_args i;
729 unsigned long hmm_flags;
730 u64 inst, start, limit;
732 int replay = 0, atomic = 0, ret;
734 /* Parse available fault buffer entries into a cache, and update
735 * the GET pointer so HW can reuse the entries.
737 SVM_DBG(svm, "fault handler");
738 if (buffer->get == buffer->put) {
739 buffer->put = nvif_rd32(device, buffer->putaddr);
740 buffer->get = nvif_rd32(device, buffer->getaddr);
741 if (buffer->get == buffer->put)
742 return NVIF_NOTIFY_KEEP;
744 buffer->fault_nr = 0;
746 SVM_DBG(svm, "get %08x put %08x", buffer->get, buffer->put);
747 while (buffer->get != buffer->put) {
748 nouveau_svm_fault_cache(svm, buffer, buffer->get * 0x20);
749 if (++buffer->get == buffer->entries)
752 nvif_wr32(device, buffer->getaddr, buffer->get);
753 SVM_DBG(svm, "%d fault(s) pending", buffer->fault_nr);
755 /* Sort parsed faults by instance pointer to prevent unnecessary
756 * instance to SVMM translations, followed by address and access
757 * type to reduce the amount of work when handling the faults.
759 sort(buffer->fault, buffer->fault_nr, sizeof(*buffer->fault),
760 nouveau_svm_fault_cmp, NULL);
762 /* Lookup SVMM structure for each unique instance pointer. */
763 mutex_lock(&svm->mutex);
764 for (fi = 0, svmm = NULL; fi < buffer->fault_nr; fi++) {
765 if (!svmm || buffer->fault[fi]->inst != inst) {
766 struct nouveau_ivmm *ivmm =
767 nouveau_ivmm_find(svm, buffer->fault[fi]->inst);
768 svmm = ivmm ? ivmm->svmm : NULL;
769 inst = buffer->fault[fi]->inst;
770 SVM_DBG(svm, "inst %016llx -> svm-%p", inst, svmm);
772 buffer->fault[fi]->svmm = svmm;
774 mutex_unlock(&svm->mutex);
776 /* Process list of faults. */
777 args.i.i.version = 0;
778 args.i.i.type = NVIF_IOCTL_V0_MTHD;
779 args.i.m.version = 0;
780 args.i.m.method = NVIF_VMM_V0_PFNMAP;
781 args.i.p.version = 0;
783 for (fi = 0; fn = fi + 1, fi < buffer->fault_nr; fi = fn) {
784 struct svm_notifier notifier;
785 struct mm_struct *mm;
787 /* Cancel any faults from non-SVM channels. */
788 if (!(svmm = buffer->fault[fi]->svmm)) {
789 nouveau_svm_fault_cancel_fault(svm, buffer->fault[fi]);
792 SVMM_DBG(svmm, "addr %016llx", buffer->fault[fi]->addr);
794 /* We try and group handling of faults within a small
795 * window into a single update.
797 start = buffer->fault[fi]->addr;
798 limit = start + PAGE_SIZE;
799 if (start < svmm->unmanaged.limit)
800 limit = min_t(u64, limit, svmm->unmanaged.start);
803 * Prepare the GPU-side update of all pages within the
804 * fault window, determining required pages and access
805 * permissions based on pending faults.
807 args.i.p.addr = start;
808 args.i.p.page = PAGE_SHIFT;
809 args.i.p.size = PAGE_SIZE;
811 * Determine required permissions based on GPU fault
814 switch (buffer->fault[fi]->access) {
816 hmm_flags = HMM_PFN_REQ_FAULT;
818 case 2: /* ATOMIC. */
821 case 3: /* PREFETCH. */
825 hmm_flags = HMM_PFN_REQ_FAULT | HMM_PFN_REQ_WRITE;
829 mm = svmm->notifier.mm;
830 if (!mmget_not_zero(mm)) {
831 nouveau_svm_fault_cancel_fault(svm, buffer->fault[fi]);
835 notifier.svmm = svmm;
837 ret = nouveau_atomic_range_fault(svmm, svm->drm,
838 &args.i, sizeof(args),
841 ret = nouveau_range_fault(svmm, svm->drm, &args.i,
842 sizeof(args), hmm_flags,
846 limit = args.i.p.addr + args.i.p.size;
847 for (fn = fi; ++fn < buffer->fault_nr; ) {
848 /* It's okay to skip over duplicate addresses from the
849 * same SVMM as faults are ordered by access type such
850 * that only the first one needs to be handled.
852 * ie. WRITE faults appear first, thus any handling of
853 * pending READ faults will already be satisfied.
854 * But if a large page is mapped, make sure subsequent
855 * fault addresses have sufficient access permission.
857 if (buffer->fault[fn]->svmm != svmm ||
858 buffer->fault[fn]->addr >= limit ||
859 (buffer->fault[fi]->access == FAULT_ACCESS_READ &&
860 !(args.phys[0] & NVIF_VMM_PFNMAP_V0_V)) ||
861 (buffer->fault[fi]->access != FAULT_ACCESS_READ &&
862 buffer->fault[fi]->access != FAULT_ACCESS_PREFETCH &&
863 !(args.phys[0] & NVIF_VMM_PFNMAP_V0_W)) ||
864 (buffer->fault[fi]->access != FAULT_ACCESS_READ &&
865 buffer->fault[fi]->access != FAULT_ACCESS_WRITE &&
866 buffer->fault[fi]->access != FAULT_ACCESS_PREFETCH &&
867 !(args.phys[0] & NVIF_VMM_PFNMAP_V0_A)))
871 /* If handling failed completely, cancel all faults. */
874 struct nouveau_svm_fault *fault =
877 nouveau_svm_fault_cancel_fault(svm, fault);
883 /* Issue fault replay to the GPU. */
885 nouveau_svm_fault_replay(svm);
886 return NVIF_NOTIFY_KEEP;
889 static struct nouveau_pfnmap_args *
890 nouveau_pfns_to_args(void *pfns)
892 return container_of(pfns, struct nouveau_pfnmap_args, p.phys);
896 nouveau_pfns_alloc(unsigned long npages)
898 struct nouveau_pfnmap_args *args;
900 args = kzalloc(struct_size(args, p.phys, npages), GFP_KERNEL);
904 args->i.type = NVIF_IOCTL_V0_MTHD;
905 args->m.method = NVIF_VMM_V0_PFNMAP;
906 args->p.page = PAGE_SHIFT;
912 nouveau_pfns_free(u64 *pfns)
914 struct nouveau_pfnmap_args *args = nouveau_pfns_to_args(pfns);
920 nouveau_pfns_map(struct nouveau_svmm *svmm, struct mm_struct *mm,
921 unsigned long addr, u64 *pfns, unsigned long npages)
923 struct nouveau_pfnmap_args *args = nouveau_pfns_to_args(pfns);
927 args->p.size = npages << PAGE_SHIFT;
929 mutex_lock(&svmm->mutex);
931 svmm->vmm->vmm.object.client->super = true;
932 ret = nvif_object_ioctl(&svmm->vmm->vmm.object, args, sizeof(*args) +
933 npages * sizeof(args->p.phys[0]), NULL);
934 svmm->vmm->vmm.object.client->super = false;
936 mutex_unlock(&svmm->mutex);
940 nouveau_svm_fault_buffer_fini(struct nouveau_svm *svm, int id)
942 struct nouveau_svm_fault_buffer *buffer = &svm->buffer[id];
943 nvif_notify_put(&buffer->notify);
947 nouveau_svm_fault_buffer_init(struct nouveau_svm *svm, int id)
949 struct nouveau_svm_fault_buffer *buffer = &svm->buffer[id];
950 struct nvif_object *device = &svm->drm->client.device.object;
951 buffer->get = nvif_rd32(device, buffer->getaddr);
952 buffer->put = nvif_rd32(device, buffer->putaddr);
953 SVM_DBG(svm, "get %08x put %08x (init)", buffer->get, buffer->put);
954 return nvif_notify_get(&buffer->notify);
958 nouveau_svm_fault_buffer_dtor(struct nouveau_svm *svm, int id)
960 struct nouveau_svm_fault_buffer *buffer = &svm->buffer[id];
964 for (i = 0; buffer->fault[i] && i < buffer->entries; i++)
965 kfree(buffer->fault[i]);
966 kvfree(buffer->fault);
969 nouveau_svm_fault_buffer_fini(svm, id);
971 nvif_notify_dtor(&buffer->notify);
972 nvif_object_dtor(&buffer->object);
976 nouveau_svm_fault_buffer_ctor(struct nouveau_svm *svm, s32 oclass, int id)
978 struct nouveau_svm_fault_buffer *buffer = &svm->buffer[id];
979 struct nouveau_drm *drm = svm->drm;
980 struct nvif_object *device = &drm->client.device.object;
981 struct nvif_clb069_v0 args = {};
986 ret = nvif_object_ctor(device, "svmFaultBuffer", 0, oclass, &args,
987 sizeof(args), &buffer->object);
989 SVM_ERR(svm, "Fault buffer allocation failed: %d", ret);
993 nvif_object_map(&buffer->object, NULL, 0);
994 buffer->entries = args.entries;
995 buffer->getaddr = args.get;
996 buffer->putaddr = args.put;
998 ret = nvif_notify_ctor(&buffer->object, "svmFault", nouveau_svm_fault,
999 true, NVB069_V0_NTFY_FAULT, NULL, 0, 0,
1004 buffer->fault = kvzalloc(sizeof(*buffer->fault) * buffer->entries, GFP_KERNEL);
1008 return nouveau_svm_fault_buffer_init(svm, id);
1012 nouveau_svm_resume(struct nouveau_drm *drm)
1014 struct nouveau_svm *svm = drm->svm;
1016 nouveau_svm_fault_buffer_init(svm, 0);
1020 nouveau_svm_suspend(struct nouveau_drm *drm)
1022 struct nouveau_svm *svm = drm->svm;
1024 nouveau_svm_fault_buffer_fini(svm, 0);
1028 nouveau_svm_fini(struct nouveau_drm *drm)
1030 struct nouveau_svm *svm = drm->svm;
1032 nouveau_svm_fault_buffer_dtor(svm, 0);
1039 nouveau_svm_init(struct nouveau_drm *drm)
1041 static const struct nvif_mclass buffers[] = {
1042 { VOLTA_FAULT_BUFFER_A, 0 },
1043 { MAXWELL_FAULT_BUFFER_A, 0 },
1046 struct nouveau_svm *svm;
1049 /* Disable on Volta and newer until channel recovery is fixed,
1050 * otherwise clients will have a trivial way to trash the GPU
1053 if (drm->client.device.info.family > NV_DEVICE_INFO_V0_PASCAL)
1056 if (!(drm->svm = svm = kzalloc(sizeof(*drm->svm), GFP_KERNEL)))
1059 drm->svm->drm = drm;
1060 mutex_init(&drm->svm->mutex);
1061 INIT_LIST_HEAD(&drm->svm->inst);
1063 ret = nvif_mclass(&drm->client.device.object, buffers);
1065 SVM_DBG(svm, "No supported fault buffer class");
1066 nouveau_svm_fini(drm);
1070 ret = nouveau_svm_fault_buffer_ctor(svm, buffers[ret].oclass, 0);
1072 nouveau_svm_fini(drm);
1076 SVM_DBG(svm, "Initialised");