2 * Copyright 2018 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
22 #include "nouveau_svm.h"
23 #include "nouveau_drv.h"
24 #include "nouveau_chan.h"
25 #include "nouveau_dmem.h"
27 #include <nvif/notify.h>
28 #include <nvif/object.h>
31 #include <nvif/class.h>
32 #include <nvif/clb069.h>
33 #include <nvif/ifc00d.h>
35 #include <linux/sched/mm.h>
36 #include <linux/sort.h>
37 #include <linux/hmm.h>
40 struct nouveau_drm *drm;
42 struct list_head inst;
44 struct nouveau_svm_fault_buffer {
46 struct nvif_object object;
52 struct nvif_notify notify;
54 struct nouveau_svm_fault {
64 struct nouveau_svmm *svmm;
70 #define SVM_DBG(s,f,a...) NV_DEBUG((s)->drm, "svm: "f"\n", ##a)
71 #define SVM_ERR(s,f,a...) NV_WARN((s)->drm, "svm: "f"\n", ##a)
73 struct nouveau_pfnmap_args {
74 struct nvif_ioctl_v0 i;
75 struct nvif_ioctl_mthd_v0 m;
76 struct nvif_vmm_pfnmap_v0 p;
80 struct nouveau_svmm *svmm;
82 struct list_head head;
85 static struct nouveau_ivmm *
86 nouveau_ivmm_find(struct nouveau_svm *svm, u64 inst)
88 struct nouveau_ivmm *ivmm;
89 list_for_each_entry(ivmm, &svm->inst, head) {
90 if (ivmm->inst == inst)
97 struct mmu_notifier notifier;
98 struct nouveau_vmm *vmm;
107 #define SVMM_DBG(s,f,a...) \
108 NV_DEBUG((s)->vmm->cli->drm, "svm-%p: "f"\n", (s), ##a)
109 #define SVMM_ERR(s,f,a...) \
110 NV_WARN((s)->vmm->cli->drm, "svm-%p: "f"\n", (s), ##a)
113 nouveau_svmm_bind(struct drm_device *dev, void *data,
114 struct drm_file *file_priv)
116 struct nouveau_cli *cli = nouveau_cli(file_priv);
117 struct drm_nouveau_svm_bind *args = data;
118 unsigned target, cmd, priority;
119 unsigned long addr, end, size;
120 struct mm_struct *mm;
122 args->va_start &= PAGE_MASK;
123 args->va_end &= PAGE_MASK;
125 /* Sanity check arguments */
126 if (args->reserved0 || args->reserved1)
128 if (args->header & (~NOUVEAU_SVM_BIND_VALID_MASK))
130 if (args->va_start >= args->va_end)
135 cmd = args->header >> NOUVEAU_SVM_BIND_COMMAND_SHIFT;
136 cmd &= NOUVEAU_SVM_BIND_COMMAND_MASK;
138 case NOUVEAU_SVM_BIND_COMMAND__MIGRATE:
144 priority = args->header >> NOUVEAU_SVM_BIND_PRIORITY_SHIFT;
145 priority &= NOUVEAU_SVM_BIND_PRIORITY_MASK;
147 /* FIXME support CPU target ie all target value < GPU_VRAM */
148 target = args->header >> NOUVEAU_SVM_BIND_TARGET_SHIFT;
149 target &= NOUVEAU_SVM_BIND_TARGET_MASK;
151 case NOUVEAU_SVM_BIND_TARGET__GPU_VRAM:
158 * FIXME: For now refuse non 0 stride, we need to change the migrate
159 * kernel function to handle stride to avoid to create a mess within
160 * each device driver.
165 size = ((unsigned long)args->npages) << PAGE_SHIFT;
166 if ((args->va_start + size) <= args->va_start)
168 if ((args->va_start + size) > args->va_end)
172 * Ok we are ask to do something sane, for now we only support migrate
173 * commands but we will add things like memory policy (what to do on
174 * page fault) and maybe some other commands.
177 mm = get_task_mm(current);
180 if (!cli->svm.svmm) {
181 mmap_read_unlock(mm);
185 for (addr = args->va_start, end = args->va_start + size; addr < end;) {
186 struct vm_area_struct *vma;
189 vma = find_vma_intersection(mm, addr, end);
193 addr = max(addr, vma->vm_start);
194 next = min(vma->vm_end, end);
195 /* This is a best effort so we ignore errors */
196 nouveau_dmem_migrate_vma(cli->drm, cli->svm.svmm, vma, addr,
202 * FIXME Return the number of page we have migrated, again we need to
203 * update the migrate API to return that information so that we can
204 * report it to user space.
208 mmap_read_unlock(mm);
214 /* Unlink channel instance from SVMM. */
216 nouveau_svmm_part(struct nouveau_svmm *svmm, u64 inst)
218 struct nouveau_ivmm *ivmm;
220 mutex_lock(&svmm->vmm->cli->drm->svm->mutex);
221 ivmm = nouveau_ivmm_find(svmm->vmm->cli->drm->svm, inst);
223 list_del(&ivmm->head);
226 mutex_unlock(&svmm->vmm->cli->drm->svm->mutex);
230 /* Link channel instance to SVMM. */
232 nouveau_svmm_join(struct nouveau_svmm *svmm, u64 inst)
234 struct nouveau_ivmm *ivmm;
236 if (!(ivmm = kmalloc(sizeof(*ivmm), GFP_KERNEL)))
241 mutex_lock(&svmm->vmm->cli->drm->svm->mutex);
242 list_add(&ivmm->head, &svmm->vmm->cli->drm->svm->inst);
243 mutex_unlock(&svmm->vmm->cli->drm->svm->mutex);
248 /* Invalidate SVMM address-range on GPU. */
250 nouveau_svmm_invalidate(struct nouveau_svmm *svmm, u64 start, u64 limit)
253 bool super = svmm->vmm->vmm.object.client->super;
254 svmm->vmm->vmm.object.client->super = true;
255 nvif_object_mthd(&svmm->vmm->vmm.object, NVIF_VMM_V0_PFNCLR,
256 &(struct nvif_vmm_pfnclr_v0) {
258 .size = limit - start,
259 }, sizeof(struct nvif_vmm_pfnclr_v0));
260 svmm->vmm->vmm.object.client->super = super;
265 nouveau_svmm_invalidate_range_start(struct mmu_notifier *mn,
266 const struct mmu_notifier_range *update)
268 struct nouveau_svmm *svmm =
269 container_of(mn, struct nouveau_svmm, notifier);
270 unsigned long start = update->start;
271 unsigned long limit = update->end;
273 if (!mmu_notifier_range_blockable(update))
276 SVMM_DBG(svmm, "invalidate %016lx-%016lx", start, limit);
278 mutex_lock(&svmm->mutex);
279 if (unlikely(!svmm->vmm))
282 if (limit > svmm->unmanaged.start && start < svmm->unmanaged.limit) {
283 if (start < svmm->unmanaged.start) {
284 nouveau_svmm_invalidate(svmm, start,
285 svmm->unmanaged.limit);
287 start = svmm->unmanaged.limit;
290 nouveau_svmm_invalidate(svmm, start, limit);
293 mutex_unlock(&svmm->mutex);
297 static void nouveau_svmm_free_notifier(struct mmu_notifier *mn)
299 kfree(container_of(mn, struct nouveau_svmm, notifier));
302 static const struct mmu_notifier_ops nouveau_mn_ops = {
303 .invalidate_range_start = nouveau_svmm_invalidate_range_start,
304 .free_notifier = nouveau_svmm_free_notifier,
308 nouveau_svmm_fini(struct nouveau_svmm **psvmm)
310 struct nouveau_svmm *svmm = *psvmm;
312 mutex_lock(&svmm->mutex);
314 mutex_unlock(&svmm->mutex);
315 mmu_notifier_put(&svmm->notifier);
321 nouveau_svmm_init(struct drm_device *dev, void *data,
322 struct drm_file *file_priv)
324 struct nouveau_cli *cli = nouveau_cli(file_priv);
325 struct nouveau_svmm *svmm;
326 struct drm_nouveau_svm_init *args = data;
329 /* Allocate tracking for SVM-enabled VMM. */
330 if (!(svmm = kzalloc(sizeof(*svmm), GFP_KERNEL)))
332 svmm->vmm = &cli->svm;
333 svmm->unmanaged.start = args->unmanaged_addr;
334 svmm->unmanaged.limit = args->unmanaged_addr + args->unmanaged_size;
335 mutex_init(&svmm->mutex);
337 /* Check that SVM isn't already enabled for the client. */
338 mutex_lock(&cli->mutex);
344 /* Allocate a new GPU VMM that can support SVM (managed by the
345 * client, with replayable faults enabled).
347 * All future channel/memory allocations will make use of this
348 * VMM instead of the standard one.
350 ret = nvif_vmm_ctor(&cli->mmu, "svmVmm",
351 cli->vmm.vmm.object.oclass, true,
352 args->unmanaged_addr, args->unmanaged_size,
353 &(struct gp100_vmm_v0) {
354 .fault_replay = true,
355 }, sizeof(struct gp100_vmm_v0), &cli->svm.vmm);
359 mmap_write_lock(current->mm);
360 svmm->notifier.ops = &nouveau_mn_ops;
361 ret = __mmu_notifier_register(&svmm->notifier, current->mm);
364 /* Note, ownership of svmm transfers to mmu_notifier */
366 cli->svm.svmm = svmm;
368 mmap_write_unlock(current->mm);
369 mutex_unlock(&cli->mutex);
373 mmap_write_unlock(current->mm);
375 mutex_unlock(&cli->mutex);
380 /* Issue fault replay for GPU to retry accesses that faulted previously. */
382 nouveau_svm_fault_replay(struct nouveau_svm *svm)
384 SVM_DBG(svm, "replay");
385 WARN_ON(nvif_object_mthd(&svm->drm->client.vmm.vmm.object,
386 GP100_VMM_VN_FAULT_REPLAY,
387 &(struct gp100_vmm_fault_replay_vn) {},
388 sizeof(struct gp100_vmm_fault_replay_vn)));
391 /* Cancel a replayable fault that could not be handled.
393 * Cancelling the fault will trigger recovery to reset the engine
394 * and kill the offending channel (ie. GPU SIGSEGV).
397 nouveau_svm_fault_cancel(struct nouveau_svm *svm,
398 u64 inst, u8 hub, u8 gpc, u8 client)
400 SVM_DBG(svm, "cancel %016llx %d %02x %02x", inst, hub, gpc, client);
401 WARN_ON(nvif_object_mthd(&svm->drm->client.vmm.vmm.object,
402 GP100_VMM_VN_FAULT_CANCEL,
403 &(struct gp100_vmm_fault_cancel_v0) {
408 }, sizeof(struct gp100_vmm_fault_cancel_v0)));
412 nouveau_svm_fault_cancel_fault(struct nouveau_svm *svm,
413 struct nouveau_svm_fault *fault)
415 nouveau_svm_fault_cancel(svm, fault->inst,
422 nouveau_svm_fault_cmp(const void *a, const void *b)
424 const struct nouveau_svm_fault *fa = *(struct nouveau_svm_fault **)a;
425 const struct nouveau_svm_fault *fb = *(struct nouveau_svm_fault **)b;
427 if ((ret = (s64)fa->inst - fb->inst))
429 if ((ret = (s64)fa->addr - fb->addr))
432 return (fa->access == 0 || fa->access == 3) -
433 (fb->access == 0 || fb->access == 3);
437 nouveau_svm_fault_cache(struct nouveau_svm *svm,
438 struct nouveau_svm_fault_buffer *buffer, u32 offset)
440 struct nvif_object *memory = &buffer->object;
441 const u32 instlo = nvif_rd32(memory, offset + 0x00);
442 const u32 insthi = nvif_rd32(memory, offset + 0x04);
443 const u32 addrlo = nvif_rd32(memory, offset + 0x08);
444 const u32 addrhi = nvif_rd32(memory, offset + 0x0c);
445 const u32 timelo = nvif_rd32(memory, offset + 0x10);
446 const u32 timehi = nvif_rd32(memory, offset + 0x14);
447 const u32 engine = nvif_rd32(memory, offset + 0x18);
448 const u32 info = nvif_rd32(memory, offset + 0x1c);
449 const u64 inst = (u64)insthi << 32 | instlo;
450 const u8 gpc = (info & 0x1f000000) >> 24;
451 const u8 hub = (info & 0x00100000) >> 20;
452 const u8 client = (info & 0x00007f00) >> 8;
453 struct nouveau_svm_fault *fault;
455 //XXX: i think we're supposed to spin waiting */
456 if (WARN_ON(!(info & 0x80000000)))
459 nvif_mask(memory, offset + 0x1c, 0x80000000, 0x00000000);
461 if (!buffer->fault[buffer->fault_nr]) {
462 fault = kmalloc(sizeof(*fault), GFP_KERNEL);
463 if (WARN_ON(!fault)) {
464 nouveau_svm_fault_cancel(svm, inst, hub, gpc, client);
467 buffer->fault[buffer->fault_nr] = fault;
470 fault = buffer->fault[buffer->fault_nr++];
472 fault->addr = (u64)addrhi << 32 | addrlo;
473 fault->time = (u64)timehi << 32 | timelo;
474 fault->engine = engine;
477 fault->access = (info & 0x000f0000) >> 16;
478 fault->client = client;
479 fault->fault = (info & 0x0000001f);
481 SVM_DBG(svm, "fault %016llx %016llx %02x",
482 fault->inst, fault->addr, fault->access);
485 struct svm_notifier {
486 struct mmu_interval_notifier notifier;
487 struct nouveau_svmm *svmm;
490 static bool nouveau_svm_range_invalidate(struct mmu_interval_notifier *mni,
491 const struct mmu_notifier_range *range,
492 unsigned long cur_seq)
494 struct svm_notifier *sn =
495 container_of(mni, struct svm_notifier, notifier);
498 * serializes the update to mni->invalidate_seq done by caller and
499 * prevents invalidation of the PTE from progressing while HW is being
500 * programmed. This is very hacky and only works because the normal
501 * notifier that does invalidation is always called after the range
504 if (mmu_notifier_range_blockable(range))
505 mutex_lock(&sn->svmm->mutex);
506 else if (!mutex_trylock(&sn->svmm->mutex))
508 mmu_interval_set_seq(mni, cur_seq);
509 mutex_unlock(&sn->svmm->mutex);
513 static const struct mmu_interval_notifier_ops nouveau_svm_mni_ops = {
514 .invalidate = nouveau_svm_range_invalidate,
517 static void nouveau_hmm_convert_pfn(struct nouveau_drm *drm,
518 struct hmm_range *range, u64 *ioctl_addr)
520 unsigned long i, npages;
523 * The ioctl_addr prepared here is passed through nvif_object_ioctl()
524 * to an eventual DMA map in something like gp100_vmm_pgt_pfn()
526 * This is all just encoding the internal hmm representation into a
527 * different nouveau internal representation.
529 npages = (range->end - range->start) >> PAGE_SHIFT;
530 for (i = 0; i < npages; ++i) {
533 if (!(range->hmm_pfns[i] & HMM_PFN_VALID)) {
538 page = hmm_pfn_to_page(range->hmm_pfns[i]);
539 if (is_device_private_page(page))
540 ioctl_addr[i] = nouveau_dmem_page_addr(page) |
541 NVIF_VMM_PFNMAP_V0_V |
542 NVIF_VMM_PFNMAP_V0_VRAM;
544 ioctl_addr[i] = page_to_phys(page) |
545 NVIF_VMM_PFNMAP_V0_V |
546 NVIF_VMM_PFNMAP_V0_HOST;
547 if (range->hmm_pfns[i] & HMM_PFN_WRITE)
548 ioctl_addr[i] |= NVIF_VMM_PFNMAP_V0_W;
552 static int nouveau_range_fault(struct nouveau_svmm *svmm,
553 struct nouveau_drm *drm, void *data, u32 size,
554 unsigned long hmm_pfns[], u64 *ioctl_addr,
555 struct svm_notifier *notifier)
557 unsigned long timeout =
558 jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
559 /* Have HMM fault pages within the fault window to the GPU. */
560 struct hmm_range range = {
561 .notifier = ¬ifier->notifier,
562 .start = notifier->notifier.interval_tree.start,
563 .end = notifier->notifier.interval_tree.last + 1,
564 .pfn_flags_mask = HMM_PFN_REQ_FAULT | HMM_PFN_REQ_WRITE,
565 .hmm_pfns = hmm_pfns,
566 .dev_private_owner = drm->dev,
568 struct mm_struct *mm = notifier->notifier.mm;
572 if (time_after(jiffies, timeout))
575 range.notifier_seq = mmu_interval_read_begin(range.notifier);
577 ret = hmm_range_fault(&range);
578 mmap_read_unlock(mm);
581 * FIXME: the input PFN_REQ flags are destroyed on
582 * -EBUSY, we need to regenerate them, also for the
583 * other continue below
590 mutex_lock(&svmm->mutex);
591 if (mmu_interval_read_retry(range.notifier,
592 range.notifier_seq)) {
593 mutex_unlock(&svmm->mutex);
599 nouveau_hmm_convert_pfn(drm, &range, ioctl_addr);
601 svmm->vmm->vmm.object.client->super = true;
602 ret = nvif_object_ioctl(&svmm->vmm->vmm.object, data, size, NULL);
603 svmm->vmm->vmm.object.client->super = false;
604 mutex_unlock(&svmm->mutex);
610 nouveau_svm_fault(struct nvif_notify *notify)
612 struct nouveau_svm_fault_buffer *buffer =
613 container_of(notify, typeof(*buffer), notify);
614 struct nouveau_svm *svm =
615 container_of(buffer, typeof(*svm), buffer[buffer->id]);
616 struct nvif_object *device = &svm->drm->client.device.object;
617 struct nouveau_svmm *svmm;
620 struct nvif_ioctl_v0 i;
621 struct nvif_ioctl_mthd_v0 m;
622 struct nvif_vmm_pfnmap_v0 p;
626 unsigned long hmm_pfns[ARRAY_SIZE(args.phys)];
627 struct vm_area_struct *vma;
628 u64 inst, start, limit;
629 int fi, fn, pi, fill;
632 /* Parse available fault buffer entries into a cache, and update
633 * the GET pointer so HW can reuse the entries.
635 SVM_DBG(svm, "fault handler");
636 if (buffer->get == buffer->put) {
637 buffer->put = nvif_rd32(device, buffer->putaddr);
638 buffer->get = nvif_rd32(device, buffer->getaddr);
639 if (buffer->get == buffer->put)
640 return NVIF_NOTIFY_KEEP;
642 buffer->fault_nr = 0;
644 SVM_DBG(svm, "get %08x put %08x", buffer->get, buffer->put);
645 while (buffer->get != buffer->put) {
646 nouveau_svm_fault_cache(svm, buffer, buffer->get * 0x20);
647 if (++buffer->get == buffer->entries)
650 nvif_wr32(device, buffer->getaddr, buffer->get);
651 SVM_DBG(svm, "%d fault(s) pending", buffer->fault_nr);
653 /* Sort parsed faults by instance pointer to prevent unnecessary
654 * instance to SVMM translations, followed by address and access
655 * type to reduce the amount of work when handling the faults.
657 sort(buffer->fault, buffer->fault_nr, sizeof(*buffer->fault),
658 nouveau_svm_fault_cmp, NULL);
660 /* Lookup SVMM structure for each unique instance pointer. */
661 mutex_lock(&svm->mutex);
662 for (fi = 0, svmm = NULL; fi < buffer->fault_nr; fi++) {
663 if (!svmm || buffer->fault[fi]->inst != inst) {
664 struct nouveau_ivmm *ivmm =
665 nouveau_ivmm_find(svm, buffer->fault[fi]->inst);
666 svmm = ivmm ? ivmm->svmm : NULL;
667 inst = buffer->fault[fi]->inst;
668 SVM_DBG(svm, "inst %016llx -> svm-%p", inst, svmm);
670 buffer->fault[fi]->svmm = svmm;
672 mutex_unlock(&svm->mutex);
674 /* Process list of faults. */
675 args.i.i.version = 0;
676 args.i.i.type = NVIF_IOCTL_V0_MTHD;
677 args.i.m.version = 0;
678 args.i.m.method = NVIF_VMM_V0_PFNMAP;
679 args.i.p.version = 0;
681 for (fi = 0; fn = fi + 1, fi < buffer->fault_nr; fi = fn) {
682 struct svm_notifier notifier;
683 struct mm_struct *mm;
685 /* Cancel any faults from non-SVM channels. */
686 if (!(svmm = buffer->fault[fi]->svmm)) {
687 nouveau_svm_fault_cancel_fault(svm, buffer->fault[fi]);
690 SVMM_DBG(svmm, "addr %016llx", buffer->fault[fi]->addr);
692 /* We try and group handling of faults within a small
693 * window into a single update.
695 start = buffer->fault[fi]->addr;
696 limit = start + (ARRAY_SIZE(args.phys) << PAGE_SHIFT);
697 if (start < svmm->unmanaged.limit)
698 limit = min_t(u64, limit, svmm->unmanaged.start);
699 SVMM_DBG(svmm, "wndw %016llx-%016llx", start, limit);
701 mm = svmm->notifier.mm;
702 if (!mmget_not_zero(mm)) {
703 nouveau_svm_fault_cancel_fault(svm, buffer->fault[fi]);
707 /* Intersect fault window with the CPU VMA, cancelling
708 * the fault if the address is invalid.
711 vma = find_vma_intersection(mm, start, limit);
713 SVMM_ERR(svmm, "wndw %016llx-%016llx", start, limit);
714 mmap_read_unlock(mm);
716 nouveau_svm_fault_cancel_fault(svm, buffer->fault[fi]);
719 start = max_t(u64, start, vma->vm_start);
720 limit = min_t(u64, limit, vma->vm_end);
721 mmap_read_unlock(mm);
722 SVMM_DBG(svmm, "wndw %016llx-%016llx", start, limit);
724 if (buffer->fault[fi]->addr != start) {
725 SVMM_ERR(svmm, "addr %016llx", buffer->fault[fi]->addr);
727 nouveau_svm_fault_cancel_fault(svm, buffer->fault[fi]);
731 /* Prepare the GPU-side update of all pages within the
732 * fault window, determining required pages and access
733 * permissions based on pending faults.
735 args.i.p.page = PAGE_SHIFT;
736 args.i.p.addr = start;
737 for (fn = fi, pi = 0;;) {
738 /* Determine required permissions based on GPU fault
742 switch (buffer->fault[fn]->access) {
744 hmm_pfns[pi++] = HMM_PFN_REQ_FAULT;
746 case 3: /* PREFETCH. */
750 hmm_pfns[pi++] = HMM_PFN_REQ_FAULT |
754 args.i.p.size = pi << PAGE_SHIFT;
756 /* It's okay to skip over duplicate addresses from the
757 * same SVMM as faults are ordered by access type such
758 * that only the first one needs to be handled.
760 * ie. WRITE faults appear first, thus any handling of
761 * pending READ faults will already be satisfied.
763 while (++fn < buffer->fault_nr &&
764 buffer->fault[fn]->svmm == svmm &&
765 buffer->fault[fn ]->addr ==
766 buffer->fault[fn - 1]->addr);
768 /* If the next fault is outside the window, or all GPU
769 * faults have been dealt with, we're done here.
771 if (fn >= buffer->fault_nr ||
772 buffer->fault[fn]->svmm != svmm ||
773 buffer->fault[fn]->addr >= limit)
776 /* Fill in the gap between this fault and the next. */
777 fill = (buffer->fault[fn ]->addr -
778 buffer->fault[fn - 1]->addr) >> PAGE_SHIFT;
783 SVMM_DBG(svmm, "wndw %016llx-%016llx covering %d fault(s)",
785 args.i.p.addr + args.i.p.size, fn - fi);
787 notifier.svmm = svmm;
788 ret = mmu_interval_notifier_insert(¬ifier.notifier,
790 args.i.p.addr, args.i.p.size,
791 &nouveau_svm_mni_ops);
793 ret = nouveau_range_fault(
794 svmm, svm->drm, &args,
795 sizeof(args.i) + pi * sizeof(args.phys[0]),
796 hmm_pfns, args.phys, ¬ifier);
797 mmu_interval_notifier_remove(¬ifier.notifier);
801 /* Cancel any faults in the window whose pages didn't manage
802 * to keep their valid bit, or stay writeable when required.
804 * If handling failed completely, cancel all faults.
807 struct nouveau_svm_fault *fault = buffer->fault[fi++];
808 pi = (fault->addr - args.i.p.addr) >> PAGE_SHIFT;
810 !(args.phys[pi] & NVIF_VMM_PFNMAP_V0_V) ||
811 (!(args.phys[pi] & NVIF_VMM_PFNMAP_V0_W) &&
812 fault->access != 0 && fault->access != 3)) {
813 nouveau_svm_fault_cancel_fault(svm, fault);
820 /* Issue fault replay to the GPU. */
822 nouveau_svm_fault_replay(svm);
823 return NVIF_NOTIFY_KEEP;
826 static struct nouveau_pfnmap_args *
827 nouveau_pfns_to_args(void *pfns)
829 return container_of(pfns, struct nouveau_pfnmap_args, p.phys);
833 nouveau_pfns_alloc(unsigned long npages)
835 struct nouveau_pfnmap_args *args;
837 args = kzalloc(struct_size(args, p.phys, npages), GFP_KERNEL);
841 args->i.type = NVIF_IOCTL_V0_MTHD;
842 args->m.method = NVIF_VMM_V0_PFNMAP;
843 args->p.page = PAGE_SHIFT;
849 nouveau_pfns_free(u64 *pfns)
851 struct nouveau_pfnmap_args *args = nouveau_pfns_to_args(pfns);
857 nouveau_pfns_map(struct nouveau_svmm *svmm, struct mm_struct *mm,
858 unsigned long addr, u64 *pfns, unsigned long npages)
860 struct nouveau_pfnmap_args *args = nouveau_pfns_to_args(pfns);
864 args->p.size = npages << PAGE_SHIFT;
866 mutex_lock(&svmm->mutex);
868 svmm->vmm->vmm.object.client->super = true;
869 ret = nvif_object_ioctl(&svmm->vmm->vmm.object, args, sizeof(*args) +
870 npages * sizeof(args->p.phys[0]), NULL);
871 svmm->vmm->vmm.object.client->super = false;
873 mutex_unlock(&svmm->mutex);
877 nouveau_svm_fault_buffer_fini(struct nouveau_svm *svm, int id)
879 struct nouveau_svm_fault_buffer *buffer = &svm->buffer[id];
880 nvif_notify_put(&buffer->notify);
884 nouveau_svm_fault_buffer_init(struct nouveau_svm *svm, int id)
886 struct nouveau_svm_fault_buffer *buffer = &svm->buffer[id];
887 struct nvif_object *device = &svm->drm->client.device.object;
888 buffer->get = nvif_rd32(device, buffer->getaddr);
889 buffer->put = nvif_rd32(device, buffer->putaddr);
890 SVM_DBG(svm, "get %08x put %08x (init)", buffer->get, buffer->put);
891 return nvif_notify_get(&buffer->notify);
895 nouveau_svm_fault_buffer_dtor(struct nouveau_svm *svm, int id)
897 struct nouveau_svm_fault_buffer *buffer = &svm->buffer[id];
901 for (i = 0; buffer->fault[i] && i < buffer->entries; i++)
902 kfree(buffer->fault[i]);
903 kvfree(buffer->fault);
906 nouveau_svm_fault_buffer_fini(svm, id);
908 nvif_notify_dtor(&buffer->notify);
909 nvif_object_dtor(&buffer->object);
913 nouveau_svm_fault_buffer_ctor(struct nouveau_svm *svm, s32 oclass, int id)
915 struct nouveau_svm_fault_buffer *buffer = &svm->buffer[id];
916 struct nouveau_drm *drm = svm->drm;
917 struct nvif_object *device = &drm->client.device.object;
918 struct nvif_clb069_v0 args = {};
923 ret = nvif_object_ctor(device, "svmFaultBuffer", 0, oclass, &args,
924 sizeof(args), &buffer->object);
926 SVM_ERR(svm, "Fault buffer allocation failed: %d", ret);
930 nvif_object_map(&buffer->object, NULL, 0);
931 buffer->entries = args.entries;
932 buffer->getaddr = args.get;
933 buffer->putaddr = args.put;
935 ret = nvif_notify_ctor(&buffer->object, "svmFault", nouveau_svm_fault,
936 true, NVB069_V0_NTFY_FAULT, NULL, 0, 0,
941 buffer->fault = kvzalloc(sizeof(*buffer->fault) * buffer->entries, GFP_KERNEL);
945 return nouveau_svm_fault_buffer_init(svm, id);
949 nouveau_svm_resume(struct nouveau_drm *drm)
951 struct nouveau_svm *svm = drm->svm;
953 nouveau_svm_fault_buffer_init(svm, 0);
957 nouveau_svm_suspend(struct nouveau_drm *drm)
959 struct nouveau_svm *svm = drm->svm;
961 nouveau_svm_fault_buffer_fini(svm, 0);
965 nouveau_svm_fini(struct nouveau_drm *drm)
967 struct nouveau_svm *svm = drm->svm;
969 nouveau_svm_fault_buffer_dtor(svm, 0);
976 nouveau_svm_init(struct nouveau_drm *drm)
978 static const struct nvif_mclass buffers[] = {
979 { VOLTA_FAULT_BUFFER_A, 0 },
980 { MAXWELL_FAULT_BUFFER_A, 0 },
983 struct nouveau_svm *svm;
986 /* Disable on Volta and newer until channel recovery is fixed,
987 * otherwise clients will have a trivial way to trash the GPU
990 if (drm->client.device.info.family > NV_DEVICE_INFO_V0_PASCAL)
993 if (!(drm->svm = svm = kzalloc(sizeof(*drm->svm), GFP_KERNEL)))
997 mutex_init(&drm->svm->mutex);
998 INIT_LIST_HEAD(&drm->svm->inst);
1000 ret = nvif_mclass(&drm->client.device.object, buffers);
1002 SVM_DBG(svm, "No supported fault buffer class");
1003 nouveau_svm_fini(drm);
1007 ret = nouveau_svm_fault_buffer_ctor(svm, buffers[ret].oclass, 0);
1009 nouveau_svm_fini(drm);
1013 SVM_DBG(svm, "Initialised");