1 // SPDX-License-Identifier: GPL-2.0 OR MIT
3 * Copyright 2020-2021 Advanced Micro Devices, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/types.h>
25 #include <linux/sched/task.h>
26 #include "amdgpu_sync.h"
27 #include "amdgpu_object.h"
28 #include "amdgpu_vm.h"
29 #include "amdgpu_mn.h"
31 #include "amdgpu_xgmi.h"
34 #include "kfd_migrate.h"
39 #define dev_fmt(fmt) "kfd_svm: %s: " fmt, __func__
41 #define AMDGPU_SVM_RANGE_RESTORE_DELAY_MS 1
43 /* Long enough to ensure no retry fault comes after svm range is restored and
44 * page table is updated.
46 #define AMDGPU_SVM_RANGE_RETRY_FAULT_PENDING 2000
48 static void svm_range_evict_svm_bo_worker(struct work_struct *work);
50 svm_range_cpu_invalidate_pagetables(struct mmu_interval_notifier *mni,
51 const struct mmu_notifier_range *range,
52 unsigned long cur_seq);
54 svm_range_check_vm(struct kfd_process *p, uint64_t start, uint64_t last,
55 uint64_t *bo_s, uint64_t *bo_l);
56 static const struct mmu_interval_notifier_ops svm_range_mn_ops = {
57 .invalidate = svm_range_cpu_invalidate_pagetables,
61 * svm_range_unlink - unlink svm_range from lists and interval tree
62 * @prange: svm range structure to be removed
64 * Remove the svm_range from the svms and svm_bo lists and the svms
67 * Context: The caller must hold svms->lock
69 static void svm_range_unlink(struct svm_range *prange)
71 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms,
72 prange, prange->start, prange->last);
75 spin_lock(&prange->svm_bo->list_lock);
76 list_del(&prange->svm_bo_list);
77 spin_unlock(&prange->svm_bo->list_lock);
80 list_del(&prange->list);
81 if (prange->it_node.start != 0 && prange->it_node.last != 0)
82 interval_tree_remove(&prange->it_node, &prange->svms->objects);
86 svm_range_add_notifier_locked(struct mm_struct *mm, struct svm_range *prange)
88 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms,
89 prange, prange->start, prange->last);
91 mmu_interval_notifier_insert_locked(&prange->notifier, mm,
92 prange->start << PAGE_SHIFT,
93 prange->npages << PAGE_SHIFT,
98 * svm_range_add_to_svms - add svm range to svms
99 * @prange: svm range structure to be added
101 * Add the svm range to svms interval tree and link list
103 * Context: The caller must hold svms->lock
105 static void svm_range_add_to_svms(struct svm_range *prange)
107 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms,
108 prange, prange->start, prange->last);
110 list_add_tail(&prange->list, &prange->svms->list);
111 prange->it_node.start = prange->start;
112 prange->it_node.last = prange->last;
113 interval_tree_insert(&prange->it_node, &prange->svms->objects);
116 static void svm_range_remove_notifier(struct svm_range *prange)
118 pr_debug("remove notifier svms 0x%p prange 0x%p [0x%lx 0x%lx]\n",
119 prange->svms, prange,
120 prange->notifier.interval_tree.start >> PAGE_SHIFT,
121 prange->notifier.interval_tree.last >> PAGE_SHIFT);
123 if (prange->notifier.interval_tree.start != 0 &&
124 prange->notifier.interval_tree.last != 0)
125 mmu_interval_notifier_remove(&prange->notifier);
129 svm_is_valid_dma_mapping_addr(struct device *dev, dma_addr_t dma_addr)
131 return dma_addr && !dma_mapping_error(dev, dma_addr) &&
132 !(dma_addr & SVM_RANGE_VRAM_DOMAIN);
136 svm_range_dma_map_dev(struct amdgpu_device *adev, struct svm_range *prange,
137 unsigned long offset, unsigned long npages,
138 unsigned long *hmm_pfns, uint32_t gpuidx)
140 enum dma_data_direction dir = DMA_BIDIRECTIONAL;
141 dma_addr_t *addr = prange->dma_addr[gpuidx];
142 struct device *dev = adev->dev;
147 addr = kvmalloc_array(prange->npages, sizeof(*addr),
148 GFP_KERNEL | __GFP_ZERO);
151 prange->dma_addr[gpuidx] = addr;
155 for (i = 0; i < npages; i++) {
156 if (svm_is_valid_dma_mapping_addr(dev, addr[i]))
157 dma_unmap_page(dev, addr[i], PAGE_SIZE, dir);
159 page = hmm_pfn_to_page(hmm_pfns[i]);
160 if (is_zone_device_page(page)) {
161 struct amdgpu_device *bo_adev =
162 amdgpu_ttm_adev(prange->svm_bo->bo->tbo.bdev);
164 addr[i] = (hmm_pfns[i] << PAGE_SHIFT) +
165 bo_adev->vm_manager.vram_base_offset -
166 bo_adev->kfd.dev->pgmap.range.start;
167 addr[i] |= SVM_RANGE_VRAM_DOMAIN;
168 pr_debug_ratelimited("vram address: 0x%llx\n", addr[i]);
171 addr[i] = dma_map_page(dev, page, 0, PAGE_SIZE, dir);
172 r = dma_mapping_error(dev, addr[i]);
174 dev_err(dev, "failed %d dma_map_page\n", r);
177 pr_debug_ratelimited("dma mapping 0x%llx for page addr 0x%lx\n",
178 addr[i] >> PAGE_SHIFT, page_to_pfn(page));
184 svm_range_dma_map(struct svm_range *prange, unsigned long *bitmap,
185 unsigned long offset, unsigned long npages,
186 unsigned long *hmm_pfns)
188 struct kfd_process *p;
192 p = container_of(prange->svms, struct kfd_process, svms);
194 for_each_set_bit(gpuidx, bitmap, MAX_GPU_INSTANCE) {
195 struct kfd_process_device *pdd;
197 pr_debug("mapping to gpu idx 0x%x\n", gpuidx);
198 pdd = kfd_process_device_from_gpuidx(p, gpuidx);
200 pr_debug("failed to find device idx %d\n", gpuidx);
204 r = svm_range_dma_map_dev(pdd->dev->adev, prange, offset, npages,
213 void svm_range_dma_unmap(struct device *dev, dma_addr_t *dma_addr,
214 unsigned long offset, unsigned long npages)
216 enum dma_data_direction dir = DMA_BIDIRECTIONAL;
222 for (i = offset; i < offset + npages; i++) {
223 if (!svm_is_valid_dma_mapping_addr(dev, dma_addr[i]))
225 pr_debug_ratelimited("unmap 0x%llx\n", dma_addr[i] >> PAGE_SHIFT);
226 dma_unmap_page(dev, dma_addr[i], PAGE_SIZE, dir);
231 void svm_range_free_dma_mappings(struct svm_range *prange)
233 struct kfd_process_device *pdd;
234 dma_addr_t *dma_addr;
236 struct kfd_process *p;
239 p = container_of(prange->svms, struct kfd_process, svms);
241 for (gpuidx = 0; gpuidx < MAX_GPU_INSTANCE; gpuidx++) {
242 dma_addr = prange->dma_addr[gpuidx];
246 pdd = kfd_process_device_from_gpuidx(p, gpuidx);
248 pr_debug("failed to find device idx %d\n", gpuidx);
251 dev = &pdd->dev->pdev->dev;
252 svm_range_dma_unmap(dev, dma_addr, 0, prange->npages);
254 prange->dma_addr[gpuidx] = NULL;
258 static void svm_range_free(struct svm_range *prange)
260 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms, prange,
261 prange->start, prange->last);
263 svm_range_vram_node_free(prange);
264 svm_range_free_dma_mappings(prange);
265 mutex_destroy(&prange->lock);
266 mutex_destroy(&prange->migrate_mutex);
271 svm_range_set_default_attributes(int32_t *location, int32_t *prefetch_loc,
272 uint8_t *granularity, uint32_t *flags)
274 *location = KFD_IOCTL_SVM_LOCATION_UNDEFINED;
275 *prefetch_loc = KFD_IOCTL_SVM_LOCATION_UNDEFINED;
278 KFD_IOCTL_SVM_FLAG_HOST_ACCESS | KFD_IOCTL_SVM_FLAG_COHERENT;
282 svm_range *svm_range_new(struct svm_range_list *svms, uint64_t start,
285 uint64_t size = last - start + 1;
286 struct svm_range *prange;
287 struct kfd_process *p;
289 prange = kzalloc(sizeof(*prange), GFP_KERNEL);
292 prange->npages = size;
294 prange->start = start;
296 INIT_LIST_HEAD(&prange->list);
297 INIT_LIST_HEAD(&prange->update_list);
298 INIT_LIST_HEAD(&prange->remove_list);
299 INIT_LIST_HEAD(&prange->insert_list);
300 INIT_LIST_HEAD(&prange->svm_bo_list);
301 INIT_LIST_HEAD(&prange->deferred_list);
302 INIT_LIST_HEAD(&prange->child_list);
303 atomic_set(&prange->invalid, 0);
304 prange->validate_timestamp = 0;
305 mutex_init(&prange->migrate_mutex);
306 mutex_init(&prange->lock);
308 p = container_of(svms, struct kfd_process, svms);
309 if (p->xnack_enabled)
310 bitmap_copy(prange->bitmap_access, svms->bitmap_supported,
313 svm_range_set_default_attributes(&prange->preferred_loc,
314 &prange->prefetch_loc,
315 &prange->granularity, &prange->flags);
317 pr_debug("svms 0x%p [0x%llx 0x%llx]\n", svms, start, last);
322 static bool svm_bo_ref_unless_zero(struct svm_range_bo *svm_bo)
324 if (!svm_bo || !kref_get_unless_zero(&svm_bo->kref))
330 static void svm_range_bo_release(struct kref *kref)
332 struct svm_range_bo *svm_bo;
334 svm_bo = container_of(kref, struct svm_range_bo, kref);
335 spin_lock(&svm_bo->list_lock);
336 while (!list_empty(&svm_bo->range_list)) {
337 struct svm_range *prange =
338 list_first_entry(&svm_bo->range_list,
339 struct svm_range, svm_bo_list);
340 /* list_del_init tells a concurrent svm_range_vram_node_new when
341 * it's safe to reuse the svm_bo pointer and svm_bo_list head.
343 list_del_init(&prange->svm_bo_list);
344 spin_unlock(&svm_bo->list_lock);
346 pr_debug("svms 0x%p [0x%lx 0x%lx]\n", prange->svms,
347 prange->start, prange->last);
348 mutex_lock(&prange->lock);
349 prange->svm_bo = NULL;
350 mutex_unlock(&prange->lock);
352 spin_lock(&svm_bo->list_lock);
354 spin_unlock(&svm_bo->list_lock);
355 if (!dma_fence_is_signaled(&svm_bo->eviction_fence->base)) {
356 /* We're not in the eviction worker.
357 * Signal the fence and synchronize with any
358 * pending eviction work.
360 dma_fence_signal(&svm_bo->eviction_fence->base);
361 cancel_work_sync(&svm_bo->eviction_work);
363 dma_fence_put(&svm_bo->eviction_fence->base);
364 amdgpu_bo_unref(&svm_bo->bo);
368 void svm_range_bo_unref(struct svm_range_bo *svm_bo)
373 kref_put(&svm_bo->kref, svm_range_bo_release);
377 svm_range_validate_svm_bo(struct amdgpu_device *adev, struct svm_range *prange)
379 struct amdgpu_device *bo_adev;
381 mutex_lock(&prange->lock);
382 if (!prange->svm_bo) {
383 mutex_unlock(&prange->lock);
386 if (prange->ttm_res) {
387 /* We still have a reference, all is well */
388 mutex_unlock(&prange->lock);
391 if (svm_bo_ref_unless_zero(prange->svm_bo)) {
393 * Migrate from GPU to GPU, remove range from source bo_adev
394 * svm_bo range list, and return false to allocate svm_bo from
397 bo_adev = amdgpu_ttm_adev(prange->svm_bo->bo->tbo.bdev);
398 if (bo_adev != adev) {
399 mutex_unlock(&prange->lock);
401 spin_lock(&prange->svm_bo->list_lock);
402 list_del_init(&prange->svm_bo_list);
403 spin_unlock(&prange->svm_bo->list_lock);
405 svm_range_bo_unref(prange->svm_bo);
408 if (READ_ONCE(prange->svm_bo->evicting)) {
410 struct svm_range_bo *svm_bo;
411 /* The BO is getting evicted,
412 * we need to get a new one
414 mutex_unlock(&prange->lock);
415 svm_bo = prange->svm_bo;
416 f = dma_fence_get(&svm_bo->eviction_fence->base);
417 svm_range_bo_unref(prange->svm_bo);
418 /* wait for the fence to avoid long spin-loop
419 * at list_empty_careful
421 dma_fence_wait(f, false);
424 /* The BO was still around and we got
425 * a new reference to it
427 mutex_unlock(&prange->lock);
428 pr_debug("reuse old bo svms 0x%p [0x%lx 0x%lx]\n",
429 prange->svms, prange->start, prange->last);
431 prange->ttm_res = prange->svm_bo->bo->tbo.resource;
436 mutex_unlock(&prange->lock);
439 /* We need a new svm_bo. Spin-loop to wait for concurrent
440 * svm_range_bo_release to finish removing this range from
441 * its range list. After this, it is safe to reuse the
442 * svm_bo pointer and svm_bo_list head.
444 while (!list_empty_careful(&prange->svm_bo_list))
450 static struct svm_range_bo *svm_range_bo_new(void)
452 struct svm_range_bo *svm_bo;
454 svm_bo = kzalloc(sizeof(*svm_bo), GFP_KERNEL);
458 kref_init(&svm_bo->kref);
459 INIT_LIST_HEAD(&svm_bo->range_list);
460 spin_lock_init(&svm_bo->list_lock);
466 svm_range_vram_node_new(struct amdgpu_device *adev, struct svm_range *prange,
469 struct amdgpu_bo_param bp;
470 struct svm_range_bo *svm_bo;
471 struct amdgpu_bo_user *ubo;
472 struct amdgpu_bo *bo;
473 struct kfd_process *p;
474 struct mm_struct *mm;
477 p = container_of(prange->svms, struct kfd_process, svms);
478 pr_debug("pasid: %x svms 0x%p [0x%lx 0x%lx]\n", p->pasid, prange->svms,
479 prange->start, prange->last);
481 if (svm_range_validate_svm_bo(adev, prange))
484 svm_bo = svm_range_bo_new();
486 pr_debug("failed to alloc svm bo\n");
489 mm = get_task_mm(p->lead_thread);
491 pr_debug("failed to get mm\n");
495 svm_bo->svms = prange->svms;
496 svm_bo->eviction_fence =
497 amdgpu_amdkfd_fence_create(dma_fence_context_alloc(1),
501 INIT_WORK(&svm_bo->eviction_work, svm_range_evict_svm_bo_worker);
502 svm_bo->evicting = 0;
503 memset(&bp, 0, sizeof(bp));
504 bp.size = prange->npages * PAGE_SIZE;
505 bp.byte_align = PAGE_SIZE;
506 bp.domain = AMDGPU_GEM_DOMAIN_VRAM;
507 bp.flags = AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
508 bp.flags |= clear ? AMDGPU_GEM_CREATE_VRAM_CLEARED : 0;
509 bp.flags |= AMDGPU_AMDKFD_CREATE_SVM_BO;
510 bp.type = ttm_bo_type_device;
513 r = amdgpu_bo_create_user(adev, &bp, &ubo);
515 pr_debug("failed %d to create bo\n", r);
516 goto create_bo_failed;
519 r = amdgpu_bo_reserve(bo, true);
521 pr_debug("failed %d to reserve bo\n", r);
522 goto reserve_bo_failed;
525 r = dma_resv_reserve_shared(bo->tbo.base.resv, 1);
527 pr_debug("failed %d to reserve bo\n", r);
528 amdgpu_bo_unreserve(bo);
529 goto reserve_bo_failed;
531 amdgpu_bo_fence(bo, &svm_bo->eviction_fence->base, true);
533 amdgpu_bo_unreserve(bo);
536 prange->svm_bo = svm_bo;
537 prange->ttm_res = bo->tbo.resource;
540 spin_lock(&svm_bo->list_lock);
541 list_add(&prange->svm_bo_list, &svm_bo->range_list);
542 spin_unlock(&svm_bo->list_lock);
547 amdgpu_bo_unref(&bo);
549 dma_fence_put(&svm_bo->eviction_fence->base);
551 prange->ttm_res = NULL;
556 void svm_range_vram_node_free(struct svm_range *prange)
558 svm_range_bo_unref(prange->svm_bo);
559 prange->ttm_res = NULL;
562 struct amdgpu_device *
563 svm_range_get_adev_by_id(struct svm_range *prange, uint32_t gpu_id)
565 struct kfd_process_device *pdd;
566 struct kfd_process *p;
569 p = container_of(prange->svms, struct kfd_process, svms);
571 gpu_idx = kfd_process_gpuidx_from_gpuid(p, gpu_id);
573 pr_debug("failed to get device by id 0x%x\n", gpu_id);
576 pdd = kfd_process_device_from_gpuidx(p, gpu_idx);
578 pr_debug("failed to get device by idx 0x%x\n", gpu_idx);
582 return pdd->dev->adev;
585 struct kfd_process_device *
586 svm_range_get_pdd_by_adev(struct svm_range *prange, struct amdgpu_device *adev)
588 struct kfd_process *p;
589 int32_t gpu_idx, gpuid;
592 p = container_of(prange->svms, struct kfd_process, svms);
594 r = kfd_process_gpuid_from_adev(p, adev, &gpuid, &gpu_idx);
596 pr_debug("failed to get device id by adev %p\n", adev);
600 return kfd_process_device_from_gpuidx(p, gpu_idx);
603 static int svm_range_bo_validate(void *param, struct amdgpu_bo *bo)
605 struct ttm_operation_ctx ctx = { false, false };
607 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM);
609 return ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
613 svm_range_check_attr(struct kfd_process *p,
614 uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs)
618 for (i = 0; i < nattr; i++) {
619 uint32_t val = attrs[i].value;
620 int gpuidx = MAX_GPU_INSTANCE;
622 switch (attrs[i].type) {
623 case KFD_IOCTL_SVM_ATTR_PREFERRED_LOC:
624 if (val != KFD_IOCTL_SVM_LOCATION_SYSMEM &&
625 val != KFD_IOCTL_SVM_LOCATION_UNDEFINED)
626 gpuidx = kfd_process_gpuidx_from_gpuid(p, val);
628 case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC:
629 if (val != KFD_IOCTL_SVM_LOCATION_SYSMEM)
630 gpuidx = kfd_process_gpuidx_from_gpuid(p, val);
632 case KFD_IOCTL_SVM_ATTR_ACCESS:
633 case KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE:
634 case KFD_IOCTL_SVM_ATTR_NO_ACCESS:
635 gpuidx = kfd_process_gpuidx_from_gpuid(p, val);
637 case KFD_IOCTL_SVM_ATTR_SET_FLAGS:
639 case KFD_IOCTL_SVM_ATTR_CLR_FLAGS:
641 case KFD_IOCTL_SVM_ATTR_GRANULARITY:
644 pr_debug("unknown attr type 0x%x\n", attrs[i].type);
649 pr_debug("no GPU 0x%x found\n", val);
651 } else if (gpuidx < MAX_GPU_INSTANCE &&
652 !test_bit(gpuidx, p->svms.bitmap_supported)) {
653 pr_debug("GPU 0x%x not supported\n", val);
662 svm_range_apply_attrs(struct kfd_process *p, struct svm_range *prange,
663 uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs)
668 for (i = 0; i < nattr; i++) {
669 switch (attrs[i].type) {
670 case KFD_IOCTL_SVM_ATTR_PREFERRED_LOC:
671 prange->preferred_loc = attrs[i].value;
673 case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC:
674 prange->prefetch_loc = attrs[i].value;
676 case KFD_IOCTL_SVM_ATTR_ACCESS:
677 case KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE:
678 case KFD_IOCTL_SVM_ATTR_NO_ACCESS:
679 gpuidx = kfd_process_gpuidx_from_gpuid(p,
681 if (attrs[i].type == KFD_IOCTL_SVM_ATTR_NO_ACCESS) {
682 bitmap_clear(prange->bitmap_access, gpuidx, 1);
683 bitmap_clear(prange->bitmap_aip, gpuidx, 1);
684 } else if (attrs[i].type == KFD_IOCTL_SVM_ATTR_ACCESS) {
685 bitmap_set(prange->bitmap_access, gpuidx, 1);
686 bitmap_clear(prange->bitmap_aip, gpuidx, 1);
688 bitmap_clear(prange->bitmap_access, gpuidx, 1);
689 bitmap_set(prange->bitmap_aip, gpuidx, 1);
692 case KFD_IOCTL_SVM_ATTR_SET_FLAGS:
693 prange->flags |= attrs[i].value;
695 case KFD_IOCTL_SVM_ATTR_CLR_FLAGS:
696 prange->flags &= ~attrs[i].value;
698 case KFD_IOCTL_SVM_ATTR_GRANULARITY:
699 prange->granularity = attrs[i].value;
702 WARN_ONCE(1, "svm_range_check_attrs wasn't called?");
708 * svm_range_debug_dump - print all range information from svms
709 * @svms: svm range list header
711 * debug output svm range start, end, prefetch location from svms
712 * interval tree and link list
714 * Context: The caller must hold svms->lock
716 static void svm_range_debug_dump(struct svm_range_list *svms)
718 struct interval_tree_node *node;
719 struct svm_range *prange;
721 pr_debug("dump svms 0x%p list\n", svms);
722 pr_debug("range\tstart\tpage\tend\t\tlocation\n");
724 list_for_each_entry(prange, &svms->list, list) {
725 pr_debug("0x%p 0x%lx\t0x%llx\t0x%llx\t0x%x\n",
726 prange, prange->start, prange->npages,
727 prange->start + prange->npages - 1,
731 pr_debug("dump svms 0x%p interval tree\n", svms);
732 pr_debug("range\tstart\tpage\tend\t\tlocation\n");
733 node = interval_tree_iter_first(&svms->objects, 0, ~0ULL);
735 prange = container_of(node, struct svm_range, it_node);
736 pr_debug("0x%p 0x%lx\t0x%llx\t0x%llx\t0x%x\n",
737 prange, prange->start, prange->npages,
738 prange->start + prange->npages - 1,
740 node = interval_tree_iter_next(node, 0, ~0ULL);
745 svm_range_is_same_attrs(struct svm_range *old, struct svm_range *new)
747 return (old->prefetch_loc == new->prefetch_loc &&
748 old->flags == new->flags &&
749 old->granularity == new->granularity);
753 svm_range_split_array(void *ppnew, void *ppold, size_t size,
754 uint64_t old_start, uint64_t old_n,
755 uint64_t new_start, uint64_t new_n)
757 unsigned char *new, *old, *pold;
762 pold = *(unsigned char **)ppold;
766 new = kvmalloc_array(new_n, size, GFP_KERNEL);
770 d = (new_start - old_start) * size;
771 memcpy(new, pold + d, new_n * size);
773 old = kvmalloc_array(old_n, size, GFP_KERNEL);
779 d = (new_start == old_start) ? new_n * size : 0;
780 memcpy(old, pold + d, old_n * size);
783 *(void **)ppold = old;
784 *(void **)ppnew = new;
790 svm_range_split_pages(struct svm_range *new, struct svm_range *old,
791 uint64_t start, uint64_t last)
793 uint64_t npages = last - start + 1;
796 for (i = 0; i < MAX_GPU_INSTANCE; i++) {
797 r = svm_range_split_array(&new->dma_addr[i], &old->dma_addr[i],
798 sizeof(*old->dma_addr[i]), old->start,
799 npages, new->start, new->npages);
808 svm_range_split_nodes(struct svm_range *new, struct svm_range *old,
809 uint64_t start, uint64_t last)
811 uint64_t npages = last - start + 1;
813 pr_debug("svms 0x%p new prange 0x%p start 0x%lx [0x%llx 0x%llx]\n",
814 new->svms, new, new->start, start, last);
816 if (new->start == old->start) {
817 new->offset = old->offset;
818 old->offset += new->npages;
820 new->offset = old->offset + npages;
823 new->svm_bo = svm_range_bo_ref(old->svm_bo);
824 new->ttm_res = old->ttm_res;
826 spin_lock(&new->svm_bo->list_lock);
827 list_add(&new->svm_bo_list, &new->svm_bo->range_list);
828 spin_unlock(&new->svm_bo->list_lock);
834 * svm_range_split_adjust - split range and adjust
837 * @old: the old range
838 * @start: the old range adjust to start address in pages
839 * @last: the old range adjust to last address in pages
841 * Copy system memory dma_addr or vram ttm_res in old range to new
842 * range from new_start up to size new->npages, the remaining old range is from
846 * 0 - OK, -ENOMEM - out of memory
849 svm_range_split_adjust(struct svm_range *new, struct svm_range *old,
850 uint64_t start, uint64_t last)
854 pr_debug("svms 0x%p new 0x%lx old [0x%lx 0x%lx] => [0x%llx 0x%llx]\n",
855 new->svms, new->start, old->start, old->last, start, last);
857 if (new->start < old->start ||
858 new->last > old->last) {
859 WARN_ONCE(1, "invalid new range start or last\n");
863 r = svm_range_split_pages(new, old, start, last);
867 if (old->actual_loc && old->ttm_res) {
868 r = svm_range_split_nodes(new, old, start, last);
873 old->npages = last - start + 1;
876 new->flags = old->flags;
877 new->preferred_loc = old->preferred_loc;
878 new->prefetch_loc = old->prefetch_loc;
879 new->actual_loc = old->actual_loc;
880 new->granularity = old->granularity;
881 bitmap_copy(new->bitmap_access, old->bitmap_access, MAX_GPU_INSTANCE);
882 bitmap_copy(new->bitmap_aip, old->bitmap_aip, MAX_GPU_INSTANCE);
888 * svm_range_split - split a range in 2 ranges
890 * @prange: the svm range to split
891 * @start: the remaining range start address in pages
892 * @last: the remaining range last address in pages
893 * @new: the result new range generated
896 * case 1: if start == prange->start
897 * prange ==> prange[start, last]
898 * new range [last + 1, prange->last]
900 * case 2: if last == prange->last
901 * prange ==> prange[start, last]
902 * new range [prange->start, start - 1]
905 * 0 - OK, -ENOMEM - out of memory, -EINVAL - invalid start, last
908 svm_range_split(struct svm_range *prange, uint64_t start, uint64_t last,
909 struct svm_range **new)
911 uint64_t old_start = prange->start;
912 uint64_t old_last = prange->last;
913 struct svm_range_list *svms;
916 pr_debug("svms 0x%p [0x%llx 0x%llx] to [0x%llx 0x%llx]\n", prange->svms,
917 old_start, old_last, start, last);
919 if (old_start != start && old_last != last)
921 if (start < old_start || last > old_last)
925 if (old_start == start)
926 *new = svm_range_new(svms, last + 1, old_last);
928 *new = svm_range_new(svms, old_start, start - 1);
932 r = svm_range_split_adjust(*new, prange, start, last);
934 pr_debug("failed %d split [0x%llx 0x%llx] to [0x%llx 0x%llx]\n",
935 r, old_start, old_last, start, last);
936 svm_range_free(*new);
944 svm_range_split_tail(struct svm_range *prange, struct svm_range *new,
945 uint64_t new_last, struct list_head *insert_list)
947 struct svm_range *tail;
948 int r = svm_range_split(prange, prange->start, new_last, &tail);
951 list_add(&tail->insert_list, insert_list);
956 svm_range_split_head(struct svm_range *prange, struct svm_range *new,
957 uint64_t new_start, struct list_head *insert_list)
959 struct svm_range *head;
960 int r = svm_range_split(prange, new_start, prange->last, &head);
963 list_add(&head->insert_list, insert_list);
968 svm_range_add_child(struct svm_range *prange, struct mm_struct *mm,
969 struct svm_range *pchild, enum svm_work_list_ops op)
971 pr_debug("add child 0x%p [0x%lx 0x%lx] to prange 0x%p child list %d\n",
972 pchild, pchild->start, pchild->last, prange, op);
974 pchild->work_item.mm = mm;
975 pchild->work_item.op = op;
976 list_add_tail(&pchild->child_list, &prange->child_list);
980 * svm_range_split_by_granularity - collect ranges within granularity boundary
982 * @p: the process with svms list
984 * @addr: the vm fault address in pages, to split the prange
985 * @parent: parent range if prange is from child list
986 * @prange: prange to split
988 * Trims @prange to be a single aligned block of prange->granularity if
989 * possible. The head and tail are added to the child_list in @parent.
991 * Context: caller must hold mmap_read_lock and prange->lock
994 * 0 - OK, otherwise error code
997 svm_range_split_by_granularity(struct kfd_process *p, struct mm_struct *mm,
998 unsigned long addr, struct svm_range *parent,
999 struct svm_range *prange)
1001 struct svm_range *head, *tail;
1002 unsigned long start, last, size;
1005 /* Align splited range start and size to granularity size, then a single
1006 * PTE will be used for whole range, this reduces the number of PTE
1007 * updated and the L1 TLB space used for translation.
1009 size = 1UL << prange->granularity;
1010 start = ALIGN_DOWN(addr, size);
1011 last = ALIGN(addr + 1, size) - 1;
1013 pr_debug("svms 0x%p split [0x%lx 0x%lx] to [0x%lx 0x%lx] size 0x%lx\n",
1014 prange->svms, prange->start, prange->last, start, last, size);
1016 if (start > prange->start) {
1017 r = svm_range_split(prange, start, prange->last, &head);
1020 svm_range_add_child(parent, mm, head, SVM_OP_ADD_RANGE);
1023 if (last < prange->last) {
1024 r = svm_range_split(prange, prange->start, last, &tail);
1027 svm_range_add_child(parent, mm, tail, SVM_OP_ADD_RANGE);
1030 /* xnack on, update mapping on GPUs with ACCESS_IN_PLACE */
1031 if (p->xnack_enabled && prange->work_item.op == SVM_OP_ADD_RANGE) {
1032 prange->work_item.op = SVM_OP_ADD_RANGE_AND_MAP;
1033 pr_debug("change prange 0x%p [0x%lx 0x%lx] op %d\n",
1034 prange, prange->start, prange->last,
1035 SVM_OP_ADD_RANGE_AND_MAP);
1041 svm_range_get_pte_flags(struct amdgpu_device *adev, struct svm_range *prange,
1044 struct amdgpu_device *bo_adev;
1045 uint32_t flags = prange->flags;
1046 uint32_t mapping_flags = 0;
1048 bool snoop = (domain != SVM_RANGE_VRAM_DOMAIN);
1049 bool coherent = flags & KFD_IOCTL_SVM_FLAG_COHERENT;
1051 if (domain == SVM_RANGE_VRAM_DOMAIN)
1052 bo_adev = amdgpu_ttm_adev(prange->svm_bo->bo->tbo.bdev);
1054 switch (KFD_GC_VERSION(adev->kfd.dev)) {
1055 case IP_VERSION(9, 4, 1):
1056 if (domain == SVM_RANGE_VRAM_DOMAIN) {
1057 if (bo_adev == adev) {
1058 mapping_flags |= coherent ?
1059 AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW;
1061 mapping_flags |= coherent ?
1062 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
1063 if (amdgpu_xgmi_same_hive(adev, bo_adev))
1067 mapping_flags |= coherent ?
1068 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
1071 case IP_VERSION(9, 4, 2):
1072 if (domain == SVM_RANGE_VRAM_DOMAIN) {
1073 if (bo_adev == adev) {
1074 mapping_flags |= coherent ?
1075 AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW;
1076 if (adev->gmc.xgmi.connected_to_cpu)
1079 mapping_flags |= coherent ?
1080 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
1081 if (amdgpu_xgmi_same_hive(adev, bo_adev))
1085 mapping_flags |= coherent ?
1086 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
1090 mapping_flags |= coherent ?
1091 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
1094 mapping_flags |= AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE;
1096 if (flags & KFD_IOCTL_SVM_FLAG_GPU_RO)
1097 mapping_flags &= ~AMDGPU_VM_PAGE_WRITEABLE;
1098 if (flags & KFD_IOCTL_SVM_FLAG_GPU_EXEC)
1099 mapping_flags |= AMDGPU_VM_PAGE_EXECUTABLE;
1101 pte_flags = AMDGPU_PTE_VALID;
1102 pte_flags |= (domain == SVM_RANGE_VRAM_DOMAIN) ? 0 : AMDGPU_PTE_SYSTEM;
1103 pte_flags |= snoop ? AMDGPU_PTE_SNOOPED : 0;
1105 pte_flags |= amdgpu_gem_va_map_flags(adev, mapping_flags);
1110 svm_range_unmap_from_gpu(struct amdgpu_device *adev, struct amdgpu_vm *vm,
1111 uint64_t start, uint64_t last,
1112 struct dma_fence **fence)
1114 uint64_t init_pte_value = 0;
1116 pr_debug("[0x%llx 0x%llx]\n", start, last);
1118 return amdgpu_vm_bo_update_mapping(adev, adev, vm, false, true, NULL,
1119 start, last, init_pte_value, 0,
1120 NULL, NULL, fence, NULL);
1124 svm_range_unmap_from_gpus(struct svm_range *prange, unsigned long start,
1127 DECLARE_BITMAP(bitmap, MAX_GPU_INSTANCE);
1128 struct kfd_process_device *pdd;
1129 struct dma_fence *fence = NULL;
1130 struct kfd_process *p;
1134 bitmap_or(bitmap, prange->bitmap_access, prange->bitmap_aip,
1136 p = container_of(prange->svms, struct kfd_process, svms);
1138 for_each_set_bit(gpuidx, bitmap, MAX_GPU_INSTANCE) {
1139 pr_debug("unmap from gpu idx 0x%x\n", gpuidx);
1140 pdd = kfd_process_device_from_gpuidx(p, gpuidx);
1142 pr_debug("failed to find device idx %d\n", gpuidx);
1146 r = svm_range_unmap_from_gpu(pdd->dev->adev,
1147 drm_priv_to_vm(pdd->drm_priv),
1148 start, last, &fence);
1153 r = dma_fence_wait(fence, false);
1154 dma_fence_put(fence);
1159 amdgpu_amdkfd_flush_gpu_tlb_pasid(pdd->dev->adev,
1160 p->pasid, TLB_FLUSH_HEAVYWEIGHT);
1167 svm_range_map_to_gpu(struct amdgpu_device *adev, struct amdgpu_vm *vm,
1168 struct svm_range *prange, unsigned long offset,
1169 unsigned long npages, bool readonly, dma_addr_t *dma_addr,
1170 struct amdgpu_device *bo_adev, struct dma_fence **fence)
1172 struct amdgpu_bo_va bo_va;
1173 bool table_freed = false;
1175 unsigned long last_start;
1180 last_start = prange->start + offset;
1182 pr_debug("svms 0x%p [0x%lx 0x%lx] readonly %d\n", prange->svms,
1183 last_start, last_start + npages - 1, readonly);
1185 if (prange->svm_bo && prange->ttm_res)
1186 bo_va.is_xgmi = amdgpu_xgmi_same_hive(adev, bo_adev);
1188 for (i = offset; i < offset + npages; i++) {
1189 last_domain = dma_addr[i] & SVM_RANGE_VRAM_DOMAIN;
1190 dma_addr[i] &= ~SVM_RANGE_VRAM_DOMAIN;
1192 /* Collect all pages in the same address range and memory domain
1193 * that can be mapped with a single call to update mapping.
1195 if (i < offset + npages - 1 &&
1196 last_domain == (dma_addr[i + 1] & SVM_RANGE_VRAM_DOMAIN))
1199 pr_debug("Mapping range [0x%lx 0x%llx] on domain: %s\n",
1200 last_start, prange->start + i, last_domain ? "GPU" : "CPU");
1202 pte_flags = svm_range_get_pte_flags(adev, prange, last_domain);
1204 pte_flags &= ~AMDGPU_PTE_WRITEABLE;
1206 pr_debug("svms 0x%p map [0x%lx 0x%llx] vram %d PTE 0x%llx\n",
1207 prange->svms, last_start, prange->start + i,
1208 (last_domain == SVM_RANGE_VRAM_DOMAIN) ? 1 : 0,
1211 r = amdgpu_vm_bo_update_mapping(adev, bo_adev, vm, false, false,
1213 prange->start + i, pte_flags,
1214 last_start - prange->start,
1219 for (j = last_start - prange->start; j <= i; j++)
1220 dma_addr[j] |= last_domain;
1223 pr_debug("failed %d to map to gpu 0x%lx\n", r, prange->start);
1226 last_start = prange->start + i + 1;
1229 r = amdgpu_vm_update_pdes(adev, vm, false);
1231 pr_debug("failed %d to update directories 0x%lx\n", r,
1237 *fence = dma_fence_get(vm->last_update);
1240 struct kfd_process *p;
1242 p = container_of(prange->svms, struct kfd_process, svms);
1243 amdgpu_amdkfd_flush_gpu_tlb_pasid(adev, p->pasid, TLB_FLUSH_LEGACY);
1250 svm_range_map_to_gpus(struct svm_range *prange, unsigned long offset,
1251 unsigned long npages, bool readonly,
1252 unsigned long *bitmap, bool wait)
1254 struct kfd_process_device *pdd;
1255 struct amdgpu_device *bo_adev;
1256 struct kfd_process *p;
1257 struct dma_fence *fence = NULL;
1261 if (prange->svm_bo && prange->ttm_res)
1262 bo_adev = amdgpu_ttm_adev(prange->svm_bo->bo->tbo.bdev);
1266 p = container_of(prange->svms, struct kfd_process, svms);
1267 for_each_set_bit(gpuidx, bitmap, MAX_GPU_INSTANCE) {
1268 pr_debug("mapping to gpu idx 0x%x\n", gpuidx);
1269 pdd = kfd_process_device_from_gpuidx(p, gpuidx);
1271 pr_debug("failed to find device idx %d\n", gpuidx);
1275 pdd = kfd_bind_process_to_device(pdd->dev, p);
1279 if (bo_adev && pdd->dev->adev != bo_adev &&
1280 !amdgpu_xgmi_same_hive(pdd->dev->adev, bo_adev)) {
1281 pr_debug("cannot map to device idx %d\n", gpuidx);
1285 r = svm_range_map_to_gpu(pdd->dev->adev, drm_priv_to_vm(pdd->drm_priv),
1286 prange, offset, npages, readonly,
1287 prange->dma_addr[gpuidx],
1288 bo_adev, wait ? &fence : NULL);
1293 r = dma_fence_wait(fence, false);
1294 dma_fence_put(fence);
1297 pr_debug("failed %d to dma fence wait\n", r);
1306 struct svm_validate_context {
1307 struct kfd_process *process;
1308 struct svm_range *prange;
1310 unsigned long bitmap[MAX_GPU_INSTANCE];
1311 struct ttm_validate_buffer tv[MAX_GPU_INSTANCE];
1312 struct list_head validate_list;
1313 struct ww_acquire_ctx ticket;
1316 static int svm_range_reserve_bos(struct svm_validate_context *ctx)
1318 struct kfd_process_device *pdd;
1319 struct amdgpu_vm *vm;
1323 INIT_LIST_HEAD(&ctx->validate_list);
1324 for_each_set_bit(gpuidx, ctx->bitmap, MAX_GPU_INSTANCE) {
1325 pdd = kfd_process_device_from_gpuidx(ctx->process, gpuidx);
1327 pr_debug("failed to find device idx %d\n", gpuidx);
1330 vm = drm_priv_to_vm(pdd->drm_priv);
1332 ctx->tv[gpuidx].bo = &vm->root.bo->tbo;
1333 ctx->tv[gpuidx].num_shared = 4;
1334 list_add(&ctx->tv[gpuidx].head, &ctx->validate_list);
1337 r = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->validate_list,
1340 pr_debug("failed %d to reserve bo\n", r);
1344 for_each_set_bit(gpuidx, ctx->bitmap, MAX_GPU_INSTANCE) {
1345 pdd = kfd_process_device_from_gpuidx(ctx->process, gpuidx);
1347 pr_debug("failed to find device idx %d\n", gpuidx);
1352 r = amdgpu_vm_validate_pt_bos(pdd->dev->adev,
1353 drm_priv_to_vm(pdd->drm_priv),
1354 svm_range_bo_validate, NULL);
1356 pr_debug("failed %d validate pt bos\n", r);
1364 ttm_eu_backoff_reservation(&ctx->ticket, &ctx->validate_list);
1368 static void svm_range_unreserve_bos(struct svm_validate_context *ctx)
1370 ttm_eu_backoff_reservation(&ctx->ticket, &ctx->validate_list);
1373 static void *kfd_svm_page_owner(struct kfd_process *p, int32_t gpuidx)
1375 struct kfd_process_device *pdd;
1377 pdd = kfd_process_device_from_gpuidx(p, gpuidx);
1379 return SVM_ADEV_PGMAP_OWNER(pdd->dev->adev);
1383 * Validation+GPU mapping with concurrent invalidation (MMU notifiers)
1385 * To prevent concurrent destruction or change of range attributes, the
1386 * svm_read_lock must be held. The caller must not hold the svm_write_lock
1387 * because that would block concurrent evictions and lead to deadlocks. To
1388 * serialize concurrent migrations or validations of the same range, the
1389 * prange->migrate_mutex must be held.
1391 * For VRAM ranges, the SVM BO must be allocated and valid (protected by its
1394 * The following sequence ensures race-free validation and GPU mapping:
1396 * 1. Reserve page table (and SVM BO if range is in VRAM)
1397 * 2. hmm_range_fault to get page addresses (if system memory)
1398 * 3. DMA-map pages (if system memory)
1399 * 4-a. Take notifier lock
1400 * 4-b. Check that pages still valid (mmu_interval_read_retry)
1401 * 4-c. Check that the range was not split or otherwise invalidated
1402 * 4-d. Update GPU page table
1403 * 4.e. Release notifier lock
1404 * 5. Release page table (and SVM BO) reservation
1406 static int svm_range_validate_and_map(struct mm_struct *mm,
1407 struct svm_range *prange,
1408 int32_t gpuidx, bool intr, bool wait)
1410 struct svm_validate_context ctx;
1411 unsigned long start, end, addr;
1412 struct kfd_process *p;
1417 ctx.process = container_of(prange->svms, struct kfd_process, svms);
1418 ctx.prange = prange;
1421 if (gpuidx < MAX_GPU_INSTANCE) {
1422 bitmap_zero(ctx.bitmap, MAX_GPU_INSTANCE);
1423 bitmap_set(ctx.bitmap, gpuidx, 1);
1424 } else if (ctx.process->xnack_enabled) {
1425 bitmap_copy(ctx.bitmap, prange->bitmap_aip, MAX_GPU_INSTANCE);
1427 /* If prefetch range to GPU, or GPU retry fault migrate range to
1428 * GPU, which has ACCESS attribute to the range, create mapping
1431 if (prange->actual_loc) {
1432 gpuidx = kfd_process_gpuidx_from_gpuid(ctx.process,
1433 prange->actual_loc);
1435 WARN_ONCE(1, "failed get device by id 0x%x\n",
1436 prange->actual_loc);
1439 if (test_bit(gpuidx, prange->bitmap_access))
1440 bitmap_set(ctx.bitmap, gpuidx, 1);
1443 bitmap_or(ctx.bitmap, prange->bitmap_access,
1444 prange->bitmap_aip, MAX_GPU_INSTANCE);
1447 if (bitmap_empty(ctx.bitmap, MAX_GPU_INSTANCE))
1450 if (prange->actual_loc && !prange->ttm_res) {
1451 /* This should never happen. actual_loc gets set by
1452 * svm_migrate_ram_to_vram after allocating a BO.
1454 WARN_ONCE(1, "VRAM BO missing during validation\n");
1458 svm_range_reserve_bos(&ctx);
1460 p = container_of(prange->svms, struct kfd_process, svms);
1461 owner = kfd_svm_page_owner(p, find_first_bit(ctx.bitmap,
1463 for_each_set_bit(idx, ctx.bitmap, MAX_GPU_INSTANCE) {
1464 if (kfd_svm_page_owner(p, idx) != owner) {
1470 start = prange->start << PAGE_SHIFT;
1471 end = (prange->last + 1) << PAGE_SHIFT;
1472 for (addr = start; addr < end && !r; ) {
1473 struct hmm_range *hmm_range;
1474 struct vm_area_struct *vma;
1476 unsigned long offset;
1477 unsigned long npages;
1480 vma = find_vma(mm, addr);
1481 if (!vma || addr < vma->vm_start) {
1485 readonly = !(vma->vm_flags & VM_WRITE);
1487 next = min(vma->vm_end, end);
1488 npages = (next - addr) >> PAGE_SHIFT;
1489 WRITE_ONCE(p->svms.faulting_task, current);
1490 r = amdgpu_hmm_range_get_pages(&prange->notifier, mm, NULL,
1491 addr, npages, &hmm_range,
1492 readonly, true, owner);
1493 WRITE_ONCE(p->svms.faulting_task, NULL);
1495 pr_debug("failed %d to get svm range pages\n", r);
1499 offset = (addr - start) >> PAGE_SHIFT;
1500 r = svm_range_dma_map(prange, ctx.bitmap, offset, npages,
1501 hmm_range->hmm_pfns);
1503 pr_debug("failed %d to dma map range\n", r);
1507 svm_range_lock(prange);
1508 if (amdgpu_hmm_range_get_pages_done(hmm_range)) {
1509 pr_debug("hmm update the range, need validate again\n");
1513 if (!list_empty(&prange->child_list)) {
1514 pr_debug("range split by unmap in parallel, validate again\n");
1519 r = svm_range_map_to_gpus(prange, offset, npages, readonly,
1523 svm_range_unlock(prange);
1529 prange->validated_once = true;
1532 svm_range_unreserve_bos(&ctx);
1535 prange->validate_timestamp = ktime_to_us(ktime_get());
1541 * svm_range_list_lock_and_flush_work - flush pending deferred work
1543 * @svms: the svm range list
1544 * @mm: the mm structure
1546 * Context: Returns with mmap write lock held, pending deferred work flushed
1550 svm_range_list_lock_and_flush_work(struct svm_range_list *svms,
1551 struct mm_struct *mm)
1554 flush_work(&svms->deferred_list_work);
1555 mmap_write_lock(mm);
1557 if (list_empty(&svms->deferred_range_list))
1559 mmap_write_unlock(mm);
1560 pr_debug("retry flush\n");
1561 goto retry_flush_work;
1564 static void svm_range_restore_work(struct work_struct *work)
1566 struct delayed_work *dwork = to_delayed_work(work);
1567 struct svm_range_list *svms;
1568 struct svm_range *prange;
1569 struct kfd_process *p;
1570 struct mm_struct *mm;
1575 svms = container_of(dwork, struct svm_range_list, restore_work);
1576 evicted_ranges = atomic_read(&svms->evicted_ranges);
1577 if (!evicted_ranges)
1580 pr_debug("restore svm ranges\n");
1582 /* kfd_process_notifier_release destroys this worker thread. So during
1583 * the lifetime of this thread, kfd_process and mm will be valid.
1585 p = container_of(svms, struct kfd_process, svms);
1590 svm_range_list_lock_and_flush_work(svms, mm);
1591 mutex_lock(&svms->lock);
1593 evicted_ranges = atomic_read(&svms->evicted_ranges);
1595 list_for_each_entry(prange, &svms->list, list) {
1596 invalid = atomic_read(&prange->invalid);
1600 pr_debug("restoring svms 0x%p prange 0x%p [0x%lx %lx] inv %d\n",
1601 prange->svms, prange, prange->start, prange->last,
1605 * If range is migrating, wait for migration is done.
1607 mutex_lock(&prange->migrate_mutex);
1609 r = svm_range_validate_and_map(mm, prange, MAX_GPU_INSTANCE,
1612 pr_debug("failed %d to map 0x%lx to gpus\n", r,
1615 mutex_unlock(&prange->migrate_mutex);
1617 goto out_reschedule;
1619 if (atomic_cmpxchg(&prange->invalid, invalid, 0) != invalid)
1620 goto out_reschedule;
1623 if (atomic_cmpxchg(&svms->evicted_ranges, evicted_ranges, 0) !=
1625 goto out_reschedule;
1629 r = kgd2kfd_resume_mm(mm);
1631 /* No recovery from this failure. Probably the CP is
1632 * hanging. No point trying again.
1634 pr_debug("failed %d to resume KFD\n", r);
1637 pr_debug("restore svm ranges successfully\n");
1640 mutex_unlock(&svms->lock);
1641 mmap_write_unlock(mm);
1643 /* If validation failed, reschedule another attempt */
1644 if (evicted_ranges) {
1645 pr_debug("reschedule to restore svm range\n");
1646 schedule_delayed_work(&svms->restore_work,
1647 msecs_to_jiffies(AMDGPU_SVM_RANGE_RESTORE_DELAY_MS));
1652 * svm_range_evict - evict svm range
1654 * Stop all queues of the process to ensure GPU doesn't access the memory, then
1655 * return to let CPU evict the buffer and proceed CPU pagetable update.
1657 * Don't need use lock to sync cpu pagetable invalidation with GPU execution.
1658 * If invalidation happens while restore work is running, restore work will
1659 * restart to ensure to get the latest CPU pages mapping to GPU, then start
1663 svm_range_evict(struct svm_range *prange, struct mm_struct *mm,
1664 unsigned long start, unsigned long last)
1666 struct svm_range_list *svms = prange->svms;
1667 struct svm_range *pchild;
1668 struct kfd_process *p;
1671 p = container_of(svms, struct kfd_process, svms);
1673 pr_debug("invalidate svms 0x%p prange [0x%lx 0x%lx] [0x%lx 0x%lx]\n",
1674 svms, prange->start, prange->last, start, last);
1676 if (!p->xnack_enabled) {
1679 list_for_each_entry(pchild, &prange->child_list, child_list) {
1680 mutex_lock_nested(&pchild->lock, 1);
1681 if (pchild->start <= last && pchild->last >= start) {
1682 pr_debug("increment pchild invalid [0x%lx 0x%lx]\n",
1683 pchild->start, pchild->last);
1684 atomic_inc(&pchild->invalid);
1686 mutex_unlock(&pchild->lock);
1689 if (prange->start <= last && prange->last >= start)
1690 atomic_inc(&prange->invalid);
1692 evicted_ranges = atomic_inc_return(&svms->evicted_ranges);
1693 if (evicted_ranges != 1)
1696 pr_debug("evicting svms 0x%p range [0x%lx 0x%lx]\n",
1697 prange->svms, prange->start, prange->last);
1699 /* First eviction, stop the queues */
1700 r = kgd2kfd_quiesce_mm(mm);
1702 pr_debug("failed to quiesce KFD\n");
1704 pr_debug("schedule to restore svm %p ranges\n", svms);
1705 schedule_delayed_work(&svms->restore_work,
1706 msecs_to_jiffies(AMDGPU_SVM_RANGE_RESTORE_DELAY_MS));
1710 pr_debug("invalidate unmap svms 0x%p [0x%lx 0x%lx] from GPUs\n",
1711 prange->svms, start, last);
1712 list_for_each_entry(pchild, &prange->child_list, child_list) {
1713 mutex_lock_nested(&pchild->lock, 1);
1714 s = max(start, pchild->start);
1715 l = min(last, pchild->last);
1717 svm_range_unmap_from_gpus(pchild, s, l);
1718 mutex_unlock(&pchild->lock);
1720 s = max(start, prange->start);
1721 l = min(last, prange->last);
1723 svm_range_unmap_from_gpus(prange, s, l);
1729 static struct svm_range *svm_range_clone(struct svm_range *old)
1731 struct svm_range *new;
1733 new = svm_range_new(old->svms, old->start, old->last);
1738 new->ttm_res = old->ttm_res;
1739 new->offset = old->offset;
1740 new->svm_bo = svm_range_bo_ref(old->svm_bo);
1741 spin_lock(&new->svm_bo->list_lock);
1742 list_add(&new->svm_bo_list, &new->svm_bo->range_list);
1743 spin_unlock(&new->svm_bo->list_lock);
1745 new->flags = old->flags;
1746 new->preferred_loc = old->preferred_loc;
1747 new->prefetch_loc = old->prefetch_loc;
1748 new->actual_loc = old->actual_loc;
1749 new->granularity = old->granularity;
1750 bitmap_copy(new->bitmap_access, old->bitmap_access, MAX_GPU_INSTANCE);
1751 bitmap_copy(new->bitmap_aip, old->bitmap_aip, MAX_GPU_INSTANCE);
1757 * svm_range_handle_overlap - split overlap ranges
1758 * @svms: svm range list header
1759 * @new: range added with this attributes
1760 * @start: range added start address, in pages
1761 * @last: range last address, in pages
1762 * @update_list: output, the ranges attributes are updated. For set_attr, this
1763 * will do validation and map to GPUs. For unmap, this will be
1764 * removed and unmap from GPUs
1765 * @insert_list: output, the ranges will be inserted into svms, attributes are
1766 * not changes. For set_attr, this will add into svms.
1767 * @remove_list:output, the ranges will be removed from svms
1768 * @left: the remaining range after overlap, For set_attr, this will be added
1771 * Total have 5 overlap cases.
1773 * This function handles overlap of an address interval with existing
1774 * struct svm_ranges for applying new attributes. This may require
1775 * splitting existing struct svm_ranges. All changes should be applied to
1776 * the range_list and interval tree transactionally. If any split operation
1777 * fails, the entire update fails. Therefore the existing overlapping
1778 * svm_ranges are cloned and the original svm_ranges left unchanged. If the
1779 * transaction succeeds, the modified clones are added and the originals
1780 * freed. Otherwise the clones are removed and the old svm_ranges remain.
1782 * Context: The caller must hold svms->lock
1785 svm_range_handle_overlap(struct svm_range_list *svms, struct svm_range *new,
1786 unsigned long start, unsigned long last,
1787 struct list_head *update_list,
1788 struct list_head *insert_list,
1789 struct list_head *remove_list,
1790 unsigned long *left)
1792 struct interval_tree_node *node;
1793 struct svm_range *prange;
1794 struct svm_range *tmp;
1797 INIT_LIST_HEAD(update_list);
1798 INIT_LIST_HEAD(insert_list);
1799 INIT_LIST_HEAD(remove_list);
1801 node = interval_tree_iter_first(&svms->objects, start, last);
1803 struct interval_tree_node *next;
1804 struct svm_range *old;
1805 unsigned long next_start;
1807 pr_debug("found overlap node [0x%lx 0x%lx]\n", node->start,
1810 old = container_of(node, struct svm_range, it_node);
1811 next = interval_tree_iter_next(node, start, last);
1812 next_start = min(node->last, last) + 1;
1814 if (node->start < start || node->last > last) {
1815 /* node intersects the updated range, clone+split it */
1816 prange = svm_range_clone(old);
1822 list_add(&old->remove_list, remove_list);
1823 list_add(&prange->insert_list, insert_list);
1825 if (node->start < start) {
1826 pr_debug("change old range start\n");
1827 r = svm_range_split_head(prange, new, start,
1832 if (node->last > last) {
1833 pr_debug("change old range last\n");
1834 r = svm_range_split_tail(prange, new, last,
1840 /* The node is contained within start..last,
1846 if (!svm_range_is_same_attrs(prange, new))
1847 list_add(&prange->update_list, update_list);
1849 /* insert a new node if needed */
1850 if (node->start > start) {
1851 prange = svm_range_new(prange->svms, start,
1858 list_add(&prange->insert_list, insert_list);
1859 list_add(&prange->update_list, update_list);
1866 if (left && start <= last)
1867 *left = last - start + 1;
1871 list_for_each_entry_safe(prange, tmp, insert_list, insert_list)
1872 svm_range_free(prange);
1878 svm_range_update_notifier_and_interval_tree(struct mm_struct *mm,
1879 struct svm_range *prange)
1881 unsigned long start;
1884 start = prange->notifier.interval_tree.start >> PAGE_SHIFT;
1885 last = prange->notifier.interval_tree.last >> PAGE_SHIFT;
1887 if (prange->start == start && prange->last == last)
1890 pr_debug("up notifier 0x%p prange 0x%p [0x%lx 0x%lx] [0x%lx 0x%lx]\n",
1891 prange->svms, prange, start, last, prange->start,
1894 if (start != 0 && last != 0) {
1895 interval_tree_remove(&prange->it_node, &prange->svms->objects);
1896 svm_range_remove_notifier(prange);
1898 prange->it_node.start = prange->start;
1899 prange->it_node.last = prange->last;
1901 interval_tree_insert(&prange->it_node, &prange->svms->objects);
1902 svm_range_add_notifier_locked(mm, prange);
1906 svm_range_handle_list_op(struct svm_range_list *svms, struct svm_range *prange)
1908 struct mm_struct *mm = prange->work_item.mm;
1910 switch (prange->work_item.op) {
1912 pr_debug("NULL OP 0x%p prange 0x%p [0x%lx 0x%lx]\n",
1913 svms, prange, prange->start, prange->last);
1915 case SVM_OP_UNMAP_RANGE:
1916 pr_debug("remove 0x%p prange 0x%p [0x%lx 0x%lx]\n",
1917 svms, prange, prange->start, prange->last);
1918 svm_range_unlink(prange);
1919 svm_range_remove_notifier(prange);
1920 svm_range_free(prange);
1922 case SVM_OP_UPDATE_RANGE_NOTIFIER:
1923 pr_debug("update notifier 0x%p prange 0x%p [0x%lx 0x%lx]\n",
1924 svms, prange, prange->start, prange->last);
1925 svm_range_update_notifier_and_interval_tree(mm, prange);
1927 case SVM_OP_UPDATE_RANGE_NOTIFIER_AND_MAP:
1928 pr_debug("update and map 0x%p prange 0x%p [0x%lx 0x%lx]\n",
1929 svms, prange, prange->start, prange->last);
1930 svm_range_update_notifier_and_interval_tree(mm, prange);
1931 /* TODO: implement deferred validation and mapping */
1933 case SVM_OP_ADD_RANGE:
1934 pr_debug("add 0x%p prange 0x%p [0x%lx 0x%lx]\n", svms, prange,
1935 prange->start, prange->last);
1936 svm_range_add_to_svms(prange);
1937 svm_range_add_notifier_locked(mm, prange);
1939 case SVM_OP_ADD_RANGE_AND_MAP:
1940 pr_debug("add and map 0x%p prange 0x%p [0x%lx 0x%lx]\n", svms,
1941 prange, prange->start, prange->last);
1942 svm_range_add_to_svms(prange);
1943 svm_range_add_notifier_locked(mm, prange);
1944 /* TODO: implement deferred validation and mapping */
1947 WARN_ONCE(1, "Unknown prange 0x%p work op %d\n", prange,
1948 prange->work_item.op);
1952 static void svm_range_drain_retry_fault(struct svm_range_list *svms)
1954 struct kfd_process_device *pdd;
1955 struct kfd_process *p;
1959 p = container_of(svms, struct kfd_process, svms);
1962 drain = atomic_read(&svms->drain_pagefaults);
1966 for_each_set_bit(i, svms->bitmap_supported, p->n_pdds) {
1971 pr_debug("drain retry fault gpu %d svms %p\n", i, svms);
1973 amdgpu_ih_wait_on_checkpoint_process(pdd->dev->adev,
1974 &pdd->dev->adev->irq.ih1);
1975 pr_debug("drain retry fault gpu %d svms 0x%p done\n", i, svms);
1977 if (atomic_cmpxchg(&svms->drain_pagefaults, drain, 0) != drain)
1981 static void svm_range_deferred_list_work(struct work_struct *work)
1983 struct svm_range_list *svms;
1984 struct svm_range *prange;
1985 struct mm_struct *mm;
1986 struct kfd_process *p;
1988 svms = container_of(work, struct svm_range_list, deferred_list_work);
1989 pr_debug("enter svms 0x%p\n", svms);
1991 p = container_of(svms, struct kfd_process, svms);
1992 /* Avoid mm is gone when inserting mmu notifier */
1993 mm = get_task_mm(p->lead_thread);
1995 pr_debug("svms 0x%p process mm gone\n", svms);
1999 mmap_write_lock(mm);
2001 /* Checking for the need to drain retry faults must be inside
2002 * mmap write lock to serialize with munmap notifiers.
2004 if (unlikely(atomic_read(&svms->drain_pagefaults))) {
2005 mmap_write_unlock(mm);
2006 svm_range_drain_retry_fault(svms);
2010 spin_lock(&svms->deferred_list_lock);
2011 while (!list_empty(&svms->deferred_range_list)) {
2012 prange = list_first_entry(&svms->deferred_range_list,
2013 struct svm_range, deferred_list);
2014 list_del_init(&prange->deferred_list);
2015 spin_unlock(&svms->deferred_list_lock);
2017 pr_debug("prange 0x%p [0x%lx 0x%lx] op %d\n", prange,
2018 prange->start, prange->last, prange->work_item.op);
2020 mutex_lock(&svms->lock);
2021 mutex_lock(&prange->migrate_mutex);
2022 while (!list_empty(&prange->child_list)) {
2023 struct svm_range *pchild;
2025 pchild = list_first_entry(&prange->child_list,
2026 struct svm_range, child_list);
2027 pr_debug("child prange 0x%p op %d\n", pchild,
2028 pchild->work_item.op);
2029 list_del_init(&pchild->child_list);
2030 svm_range_handle_list_op(svms, pchild);
2032 mutex_unlock(&prange->migrate_mutex);
2034 svm_range_handle_list_op(svms, prange);
2035 mutex_unlock(&svms->lock);
2037 spin_lock(&svms->deferred_list_lock);
2039 spin_unlock(&svms->deferred_list_lock);
2041 mmap_write_unlock(mm);
2043 pr_debug("exit svms 0x%p\n", svms);
2047 svm_range_add_list_work(struct svm_range_list *svms, struct svm_range *prange,
2048 struct mm_struct *mm, enum svm_work_list_ops op)
2050 spin_lock(&svms->deferred_list_lock);
2051 /* if prange is on the deferred list */
2052 if (!list_empty(&prange->deferred_list)) {
2053 pr_debug("update exist prange 0x%p work op %d\n", prange, op);
2054 WARN_ONCE(prange->work_item.mm != mm, "unmatch mm\n");
2055 if (op != SVM_OP_NULL &&
2056 prange->work_item.op != SVM_OP_UNMAP_RANGE)
2057 prange->work_item.op = op;
2059 prange->work_item.op = op;
2060 prange->work_item.mm = mm;
2061 list_add_tail(&prange->deferred_list,
2062 &prange->svms->deferred_range_list);
2063 pr_debug("add prange 0x%p [0x%lx 0x%lx] to work list op %d\n",
2064 prange, prange->start, prange->last, op);
2066 spin_unlock(&svms->deferred_list_lock);
2069 void schedule_deferred_list_work(struct svm_range_list *svms)
2071 spin_lock(&svms->deferred_list_lock);
2072 if (!list_empty(&svms->deferred_range_list))
2073 schedule_work(&svms->deferred_list_work);
2074 spin_unlock(&svms->deferred_list_lock);
2078 svm_range_unmap_split(struct mm_struct *mm, struct svm_range *parent,
2079 struct svm_range *prange, unsigned long start,
2082 struct svm_range *head;
2083 struct svm_range *tail;
2085 if (prange->work_item.op == SVM_OP_UNMAP_RANGE) {
2086 pr_debug("prange 0x%p [0x%lx 0x%lx] is already freed\n", prange,
2087 prange->start, prange->last);
2090 if (start > prange->last || last < prange->start)
2093 head = tail = prange;
2094 if (start > prange->start)
2095 svm_range_split(prange, prange->start, start - 1, &tail);
2096 if (last < tail->last)
2097 svm_range_split(tail, last + 1, tail->last, &head);
2099 if (head != prange && tail != prange) {
2100 svm_range_add_child(parent, mm, head, SVM_OP_UNMAP_RANGE);
2101 svm_range_add_child(parent, mm, tail, SVM_OP_ADD_RANGE);
2102 } else if (tail != prange) {
2103 svm_range_add_child(parent, mm, tail, SVM_OP_UNMAP_RANGE);
2104 } else if (head != prange) {
2105 svm_range_add_child(parent, mm, head, SVM_OP_UNMAP_RANGE);
2106 } else if (parent != prange) {
2107 prange->work_item.op = SVM_OP_UNMAP_RANGE;
2112 svm_range_unmap_from_cpu(struct mm_struct *mm, struct svm_range *prange,
2113 unsigned long start, unsigned long last)
2115 struct svm_range_list *svms;
2116 struct svm_range *pchild;
2117 struct kfd_process *p;
2121 p = kfd_lookup_process_by_mm(mm);
2126 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx] [0x%lx 0x%lx]\n", svms,
2127 prange, prange->start, prange->last, start, last);
2129 /* Make sure pending page faults are drained in the deferred worker
2130 * before the range is freed to avoid straggler interrupts on
2131 * unmapped memory causing "phantom faults".
2133 atomic_inc(&svms->drain_pagefaults);
2135 unmap_parent = start <= prange->start && last >= prange->last;
2137 list_for_each_entry(pchild, &prange->child_list, child_list) {
2138 mutex_lock_nested(&pchild->lock, 1);
2139 s = max(start, pchild->start);
2140 l = min(last, pchild->last);
2142 svm_range_unmap_from_gpus(pchild, s, l);
2143 svm_range_unmap_split(mm, prange, pchild, start, last);
2144 mutex_unlock(&pchild->lock);
2146 s = max(start, prange->start);
2147 l = min(last, prange->last);
2149 svm_range_unmap_from_gpus(prange, s, l);
2150 svm_range_unmap_split(mm, prange, prange, start, last);
2153 svm_range_add_list_work(svms, prange, mm, SVM_OP_UNMAP_RANGE);
2155 svm_range_add_list_work(svms, prange, mm,
2156 SVM_OP_UPDATE_RANGE_NOTIFIER);
2157 schedule_deferred_list_work(svms);
2159 kfd_unref_process(p);
2163 * svm_range_cpu_invalidate_pagetables - interval notifier callback
2165 * If event is MMU_NOTIFY_UNMAP, this is from CPU unmap range, otherwise, it
2166 * is from migration, or CPU page invalidation callback.
2168 * For unmap event, unmap range from GPUs, remove prange from svms in a delayed
2169 * work thread, and split prange if only part of prange is unmapped.
2171 * For invalidation event, if GPU retry fault is not enabled, evict the queues,
2172 * then schedule svm_range_restore_work to update GPU mapping and resume queues.
2173 * If GPU retry fault is enabled, unmap the svm range from GPU, retry fault will
2174 * update GPU mapping to recover.
2176 * Context: mmap lock, notifier_invalidate_start lock are held
2177 * for invalidate event, prange lock is held if this is from migration
2180 svm_range_cpu_invalidate_pagetables(struct mmu_interval_notifier *mni,
2181 const struct mmu_notifier_range *range,
2182 unsigned long cur_seq)
2184 struct svm_range *prange;
2185 unsigned long start;
2188 if (range->event == MMU_NOTIFY_RELEASE)
2191 start = mni->interval_tree.start;
2192 last = mni->interval_tree.last;
2193 start = (start > range->start ? start : range->start) >> PAGE_SHIFT;
2194 last = (last < (range->end - 1) ? last : range->end - 1) >> PAGE_SHIFT;
2195 pr_debug("[0x%lx 0x%lx] range[0x%lx 0x%lx] notifier[0x%lx 0x%lx] %d\n",
2196 start, last, range->start >> PAGE_SHIFT,
2197 (range->end - 1) >> PAGE_SHIFT,
2198 mni->interval_tree.start >> PAGE_SHIFT,
2199 mni->interval_tree.last >> PAGE_SHIFT, range->event);
2201 prange = container_of(mni, struct svm_range, notifier);
2203 svm_range_lock(prange);
2204 mmu_interval_set_seq(mni, cur_seq);
2206 switch (range->event) {
2207 case MMU_NOTIFY_UNMAP:
2208 svm_range_unmap_from_cpu(mni->mm, prange, start, last);
2211 svm_range_evict(prange, mni->mm, start, last);
2215 svm_range_unlock(prange);
2221 * svm_range_from_addr - find svm range from fault address
2222 * @svms: svm range list header
2223 * @addr: address to search range interval tree, in pages
2224 * @parent: parent range if range is on child list
2226 * Context: The caller must hold svms->lock
2228 * Return: the svm_range found or NULL
2231 svm_range_from_addr(struct svm_range_list *svms, unsigned long addr,
2232 struct svm_range **parent)
2234 struct interval_tree_node *node;
2235 struct svm_range *prange;
2236 struct svm_range *pchild;
2238 node = interval_tree_iter_first(&svms->objects, addr, addr);
2242 prange = container_of(node, struct svm_range, it_node);
2243 pr_debug("address 0x%lx prange [0x%lx 0x%lx] node [0x%lx 0x%lx]\n",
2244 addr, prange->start, prange->last, node->start, node->last);
2246 if (addr >= prange->start && addr <= prange->last) {
2251 list_for_each_entry(pchild, &prange->child_list, child_list)
2252 if (addr >= pchild->start && addr <= pchild->last) {
2253 pr_debug("found address 0x%lx pchild [0x%lx 0x%lx]\n",
2254 addr, pchild->start, pchild->last);
2263 /* svm_range_best_restore_location - decide the best fault restore location
2264 * @prange: svm range structure
2265 * @adev: the GPU on which vm fault happened
2267 * This is only called when xnack is on, to decide the best location to restore
2268 * the range mapping after GPU vm fault. Caller uses the best location to do
2269 * migration if actual loc is not best location, then update GPU page table
2270 * mapping to the best location.
2272 * If the preferred loc is accessible by faulting GPU, use preferred loc.
2273 * If vm fault gpu idx is on range ACCESSIBLE bitmap, best_loc is vm fault gpu
2274 * If vm fault gpu idx is on range ACCESSIBLE_IN_PLACE bitmap, then
2275 * if range actual loc is cpu, best_loc is cpu
2276 * if vm fault gpu is on xgmi same hive of range actual loc gpu, best_loc is
2278 * Otherwise, GPU no access, best_loc is -1.
2281 * -1 means vm fault GPU no access
2282 * 0 for CPU or GPU id
2285 svm_range_best_restore_location(struct svm_range *prange,
2286 struct amdgpu_device *adev,
2289 struct amdgpu_device *bo_adev, *preferred_adev;
2290 struct kfd_process *p;
2294 p = container_of(prange->svms, struct kfd_process, svms);
2296 r = kfd_process_gpuid_from_adev(p, adev, &gpuid, gpuidx);
2298 pr_debug("failed to get gpuid from kgd\n");
2302 if (prange->preferred_loc == gpuid ||
2303 prange->preferred_loc == KFD_IOCTL_SVM_LOCATION_SYSMEM) {
2304 return prange->preferred_loc;
2305 } else if (prange->preferred_loc != KFD_IOCTL_SVM_LOCATION_UNDEFINED) {
2306 preferred_adev = svm_range_get_adev_by_id(prange,
2307 prange->preferred_loc);
2308 if (amdgpu_xgmi_same_hive(adev, preferred_adev))
2309 return prange->preferred_loc;
2313 if (test_bit(*gpuidx, prange->bitmap_access))
2316 if (test_bit(*gpuidx, prange->bitmap_aip)) {
2317 if (!prange->actual_loc)
2320 bo_adev = svm_range_get_adev_by_id(prange, prange->actual_loc);
2321 if (amdgpu_xgmi_same_hive(adev, bo_adev))
2322 return prange->actual_loc;
2331 svm_range_get_range_boundaries(struct kfd_process *p, int64_t addr,
2332 unsigned long *start, unsigned long *last,
2333 bool *is_heap_stack)
2335 struct vm_area_struct *vma;
2336 struct interval_tree_node *node;
2337 unsigned long start_limit, end_limit;
2339 vma = find_vma(p->mm, addr << PAGE_SHIFT);
2340 if (!vma || (addr << PAGE_SHIFT) < vma->vm_start) {
2341 pr_debug("VMA does not exist in address [0x%llx]\n", addr);
2345 *is_heap_stack = (vma->vm_start <= vma->vm_mm->brk &&
2346 vma->vm_end >= vma->vm_mm->start_brk) ||
2347 (vma->vm_start <= vma->vm_mm->start_stack &&
2348 vma->vm_end >= vma->vm_mm->start_stack);
2350 start_limit = max(vma->vm_start >> PAGE_SHIFT,
2351 (unsigned long)ALIGN_DOWN(addr, 2UL << 8));
2352 end_limit = min(vma->vm_end >> PAGE_SHIFT,
2353 (unsigned long)ALIGN(addr + 1, 2UL << 8));
2354 /* First range that starts after the fault address */
2355 node = interval_tree_iter_first(&p->svms.objects, addr + 1, ULONG_MAX);
2357 end_limit = min(end_limit, node->start);
2358 /* Last range that ends before the fault address */
2359 node = container_of(rb_prev(&node->rb),
2360 struct interval_tree_node, rb);
2362 /* Last range must end before addr because
2363 * there was no range after addr
2365 node = container_of(rb_last(&p->svms.objects.rb_root),
2366 struct interval_tree_node, rb);
2369 if (node->last >= addr) {
2370 WARN(1, "Overlap with prev node and page fault addr\n");
2373 start_limit = max(start_limit, node->last + 1);
2376 *start = start_limit;
2377 *last = end_limit - 1;
2379 pr_debug("vma [0x%lx 0x%lx] range [0x%lx 0x%lx] is_heap_stack %d\n",
2380 vma->vm_start >> PAGE_SHIFT, vma->vm_end >> PAGE_SHIFT,
2381 *start, *last, *is_heap_stack);
2387 svm_range_check_vm_userptr(struct kfd_process *p, uint64_t start, uint64_t last,
2388 uint64_t *bo_s, uint64_t *bo_l)
2390 struct amdgpu_bo_va_mapping *mapping;
2391 struct interval_tree_node *node;
2392 struct amdgpu_bo *bo = NULL;
2393 unsigned long userptr;
2397 for (i = 0; i < p->n_pdds; i++) {
2398 struct amdgpu_vm *vm;
2400 if (!p->pdds[i]->drm_priv)
2403 vm = drm_priv_to_vm(p->pdds[i]->drm_priv);
2404 r = amdgpu_bo_reserve(vm->root.bo, false);
2408 /* Check userptr by searching entire vm->va interval tree */
2409 node = interval_tree_iter_first(&vm->va, 0, ~0ULL);
2411 mapping = container_of((struct rb_node *)node,
2412 struct amdgpu_bo_va_mapping, rb);
2413 bo = mapping->bo_va->base.bo;
2415 if (!amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm,
2416 start << PAGE_SHIFT,
2419 node = interval_tree_iter_next(node, 0, ~0ULL);
2423 pr_debug("[0x%llx 0x%llx] already userptr mapped\n",
2426 *bo_s = userptr >> PAGE_SHIFT;
2427 *bo_l = *bo_s + bo->tbo.ttm->num_pages - 1;
2429 amdgpu_bo_unreserve(vm->root.bo);
2432 amdgpu_bo_unreserve(vm->root.bo);
2438 svm_range *svm_range_create_unregistered_range(struct amdgpu_device *adev,
2439 struct kfd_process *p,
2440 struct mm_struct *mm,
2443 struct svm_range *prange = NULL;
2444 unsigned long start, last;
2445 uint32_t gpuid, gpuidx;
2451 if (svm_range_get_range_boundaries(p, addr, &start, &last,
2455 r = svm_range_check_vm(p, start, last, &bo_s, &bo_l);
2456 if (r != -EADDRINUSE)
2457 r = svm_range_check_vm_userptr(p, start, last, &bo_s, &bo_l);
2459 if (r == -EADDRINUSE) {
2460 if (addr >= bo_s && addr <= bo_l)
2463 /* Create one page svm range if 2MB range overlapping */
2468 prange = svm_range_new(&p->svms, start, last);
2470 pr_debug("Failed to create prange in address [0x%llx]\n", addr);
2473 if (kfd_process_gpuid_from_adev(p, adev, &gpuid, &gpuidx)) {
2474 pr_debug("failed to get gpuid from kgd\n");
2475 svm_range_free(prange);
2480 prange->preferred_loc = KFD_IOCTL_SVM_LOCATION_SYSMEM;
2482 svm_range_add_to_svms(prange);
2483 svm_range_add_notifier_locked(mm, prange);
2488 /* svm_range_skip_recover - decide if prange can be recovered
2489 * @prange: svm range structure
2491 * GPU vm retry fault handle skip recover the range for cases:
2492 * 1. prange is on deferred list to be removed after unmap, it is stale fault,
2493 * deferred list work will drain the stale fault before free the prange.
2494 * 2. prange is on deferred list to add interval notifier after split, or
2495 * 3. prange is child range, it is split from parent prange, recover later
2496 * after interval notifier is added.
2498 * Return: true to skip recover, false to recover
2500 static bool svm_range_skip_recover(struct svm_range *prange)
2502 struct svm_range_list *svms = prange->svms;
2504 spin_lock(&svms->deferred_list_lock);
2505 if (list_empty(&prange->deferred_list) &&
2506 list_empty(&prange->child_list)) {
2507 spin_unlock(&svms->deferred_list_lock);
2510 spin_unlock(&svms->deferred_list_lock);
2512 if (prange->work_item.op == SVM_OP_UNMAP_RANGE) {
2513 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx] unmapped\n",
2514 svms, prange, prange->start, prange->last);
2517 if (prange->work_item.op == SVM_OP_ADD_RANGE_AND_MAP ||
2518 prange->work_item.op == SVM_OP_ADD_RANGE) {
2519 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx] not added yet\n",
2520 svms, prange, prange->start, prange->last);
2527 svm_range_count_fault(struct amdgpu_device *adev, struct kfd_process *p,
2530 struct kfd_process_device *pdd;
2532 /* fault is on different page of same range
2533 * or fault is skipped to recover later
2534 * or fault is on invalid virtual address
2536 if (gpuidx == MAX_GPU_INSTANCE) {
2540 r = kfd_process_gpuid_from_adev(p, adev, &gpuid, &gpuidx);
2545 /* fault is recovered
2546 * or fault cannot recover because GPU no access on the range
2548 pdd = kfd_process_device_from_gpuidx(p, gpuidx);
2550 WRITE_ONCE(pdd->faults, pdd->faults + 1);
2554 svm_fault_allowed(struct vm_area_struct *vma, bool write_fault)
2556 unsigned long requested = VM_READ;
2559 requested |= VM_WRITE;
2561 pr_debug("requested 0x%lx, vma permission flags 0x%lx\n", requested,
2563 return (vma->vm_flags & requested) == requested;
2567 svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid,
2568 uint64_t addr, bool write_fault)
2570 struct mm_struct *mm = NULL;
2571 struct svm_range_list *svms;
2572 struct svm_range *prange;
2573 struct kfd_process *p;
2576 int32_t gpuidx = MAX_GPU_INSTANCE;
2577 bool write_locked = false;
2578 struct vm_area_struct *vma;
2581 if (!KFD_IS_SVM_API_SUPPORTED(adev->kfd.dev)) {
2582 pr_debug("device does not support SVM\n");
2586 p = kfd_lookup_process_by_pasid(pasid);
2588 pr_debug("kfd process not founded pasid 0x%x\n", pasid);
2591 if (!p->xnack_enabled) {
2592 pr_debug("XNACK not enabled for pasid 0x%x\n", pasid);
2598 pr_debug("restoring svms 0x%p fault address 0x%llx\n", svms, addr);
2600 if (atomic_read(&svms->drain_pagefaults)) {
2601 pr_debug("draining retry fault, drop fault 0x%llx\n", addr);
2606 /* p->lead_thread is available as kfd_process_wq_release flush the work
2607 * before releasing task ref.
2609 mm = get_task_mm(p->lead_thread);
2611 pr_debug("svms 0x%p failed to get mm\n", svms);
2618 mutex_lock(&svms->lock);
2619 prange = svm_range_from_addr(svms, addr, NULL);
2621 pr_debug("failed to find prange svms 0x%p address [0x%llx]\n",
2623 if (!write_locked) {
2624 /* Need the write lock to create new range with MMU notifier.
2625 * Also flush pending deferred work to make sure the interval
2626 * tree is up to date before we add a new range
2628 mutex_unlock(&svms->lock);
2629 mmap_read_unlock(mm);
2630 mmap_write_lock(mm);
2631 write_locked = true;
2632 goto retry_write_locked;
2634 prange = svm_range_create_unregistered_range(adev, p, mm, addr);
2636 pr_debug("failed to create unregistered range svms 0x%p address [0x%llx]\n",
2638 mmap_write_downgrade(mm);
2640 goto out_unlock_svms;
2644 mmap_write_downgrade(mm);
2646 mutex_lock(&prange->migrate_mutex);
2648 if (svm_range_skip_recover(prange)) {
2649 amdgpu_gmc_filter_faults_remove(adev, addr, pasid);
2651 goto out_unlock_range;
2654 timestamp = ktime_to_us(ktime_get()) - prange->validate_timestamp;
2655 /* skip duplicate vm fault on different pages of same range */
2656 if (timestamp < AMDGPU_SVM_RANGE_RETRY_FAULT_PENDING) {
2657 pr_debug("svms 0x%p [0x%lx %lx] already restored\n",
2658 svms, prange->start, prange->last);
2660 goto out_unlock_range;
2663 /* __do_munmap removed VMA, return success as we are handling stale
2666 vma = find_vma(mm, addr << PAGE_SHIFT);
2667 if (!vma || (addr << PAGE_SHIFT) < vma->vm_start) {
2668 pr_debug("address 0x%llx VMA is removed\n", addr);
2670 goto out_unlock_range;
2673 if (!svm_fault_allowed(vma, write_fault)) {
2674 pr_debug("fault addr 0x%llx no %s permission\n", addr,
2675 write_fault ? "write" : "read");
2677 goto out_unlock_range;
2680 best_loc = svm_range_best_restore_location(prange, adev, &gpuidx);
2681 if (best_loc == -1) {
2682 pr_debug("svms %p failed get best restore loc [0x%lx 0x%lx]\n",
2683 svms, prange->start, prange->last);
2685 goto out_unlock_range;
2688 pr_debug("svms %p [0x%lx 0x%lx] best restore 0x%x, actual loc 0x%x\n",
2689 svms, prange->start, prange->last, best_loc,
2690 prange->actual_loc);
2692 if (prange->actual_loc != best_loc) {
2694 r = svm_migrate_to_vram(prange, best_loc, mm);
2696 pr_debug("svm_migrate_to_vram failed (%d) at %llx, falling back to system memory\n",
2698 /* Fallback to system memory if migration to
2701 if (prange->actual_loc)
2702 r = svm_migrate_vram_to_ram(prange, mm);
2707 r = svm_migrate_vram_to_ram(prange, mm);
2710 pr_debug("failed %d to migrate svms %p [0x%lx 0x%lx]\n",
2711 r, svms, prange->start, prange->last);
2712 goto out_unlock_range;
2716 r = svm_range_validate_and_map(mm, prange, gpuidx, false, false);
2718 pr_debug("failed %d to map svms 0x%p [0x%lx 0x%lx] to gpus\n",
2719 r, svms, prange->start, prange->last);
2722 mutex_unlock(&prange->migrate_mutex);
2724 mutex_unlock(&svms->lock);
2725 mmap_read_unlock(mm);
2727 svm_range_count_fault(adev, p, gpuidx);
2731 kfd_unref_process(p);
2734 pr_debug("recover vm fault later\n");
2735 amdgpu_gmc_filter_faults_remove(adev, addr, pasid);
2741 void svm_range_list_fini(struct kfd_process *p)
2743 struct svm_range *prange;
2744 struct svm_range *next;
2746 pr_debug("pasid 0x%x svms 0x%p\n", p->pasid, &p->svms);
2748 /* Ensure list work is finished before process is destroyed */
2749 flush_work(&p->svms.deferred_list_work);
2752 * Ensure no retry fault comes in afterwards, as page fault handler will
2753 * not find kfd process and take mm lock to recover fault.
2755 atomic_inc(&p->svms.drain_pagefaults);
2756 svm_range_drain_retry_fault(&p->svms);
2759 list_for_each_entry_safe(prange, next, &p->svms.list, list) {
2760 svm_range_unlink(prange);
2761 svm_range_remove_notifier(prange);
2762 svm_range_free(prange);
2765 mutex_destroy(&p->svms.lock);
2767 pr_debug("pasid 0x%x svms 0x%p done\n", p->pasid, &p->svms);
2770 int svm_range_list_init(struct kfd_process *p)
2772 struct svm_range_list *svms = &p->svms;
2775 svms->objects = RB_ROOT_CACHED;
2776 mutex_init(&svms->lock);
2777 INIT_LIST_HEAD(&svms->list);
2778 atomic_set(&svms->evicted_ranges, 0);
2779 atomic_set(&svms->drain_pagefaults, 0);
2780 INIT_DELAYED_WORK(&svms->restore_work, svm_range_restore_work);
2781 INIT_WORK(&svms->deferred_list_work, svm_range_deferred_list_work);
2782 INIT_LIST_HEAD(&svms->deferred_range_list);
2783 spin_lock_init(&svms->deferred_list_lock);
2785 for (i = 0; i < p->n_pdds; i++)
2786 if (KFD_IS_SVM_API_SUPPORTED(p->pdds[i]->dev))
2787 bitmap_set(svms->bitmap_supported, i, 1);
2793 * svm_range_check_vm - check if virtual address range mapped already
2794 * @p: current kfd_process
2795 * @start: range start address, in pages
2796 * @last: range last address, in pages
2797 * @bo_s: mapping start address in pages if address range already mapped
2798 * @bo_l: mapping last address in pages if address range already mapped
2800 * The purpose is to avoid virtual address ranges already allocated by
2801 * kfd_ioctl_alloc_memory_of_gpu ioctl.
2802 * It looks for each pdd in the kfd_process.
2804 * Context: Process context
2806 * Return 0 - OK, if the range is not mapped.
2807 * Otherwise error code:
2808 * -EADDRINUSE - if address is mapped already by kfd_ioctl_alloc_memory_of_gpu
2809 * -ERESTARTSYS - A wait for the buffer to become unreserved was interrupted by
2810 * a signal. Release all buffer reservations and return to user-space.
2813 svm_range_check_vm(struct kfd_process *p, uint64_t start, uint64_t last,
2814 uint64_t *bo_s, uint64_t *bo_l)
2816 struct amdgpu_bo_va_mapping *mapping;
2817 struct interval_tree_node *node;
2821 for (i = 0; i < p->n_pdds; i++) {
2822 struct amdgpu_vm *vm;
2824 if (!p->pdds[i]->drm_priv)
2827 vm = drm_priv_to_vm(p->pdds[i]->drm_priv);
2828 r = amdgpu_bo_reserve(vm->root.bo, false);
2832 node = interval_tree_iter_first(&vm->va, start, last);
2834 pr_debug("range [0x%llx 0x%llx] already TTM mapped\n",
2836 mapping = container_of((struct rb_node *)node,
2837 struct amdgpu_bo_va_mapping, rb);
2839 *bo_s = mapping->start;
2840 *bo_l = mapping->last;
2842 amdgpu_bo_unreserve(vm->root.bo);
2845 amdgpu_bo_unreserve(vm->root.bo);
2852 * svm_range_is_valid - check if virtual address range is valid
2853 * @p: current kfd_process
2854 * @start: range start address, in pages
2855 * @size: range size, in pages
2857 * Valid virtual address range means it belongs to one or more VMAs
2859 * Context: Process context
2862 * 0 - OK, otherwise error code
2865 svm_range_is_valid(struct kfd_process *p, uint64_t start, uint64_t size)
2867 const unsigned long device_vma = VM_IO | VM_PFNMAP | VM_MIXEDMAP;
2868 struct vm_area_struct *vma;
2870 unsigned long start_unchg = start;
2872 start <<= PAGE_SHIFT;
2873 end = start + (size << PAGE_SHIFT);
2875 vma = find_vma(p->mm, start);
2876 if (!vma || start < vma->vm_start ||
2877 (vma->vm_flags & device_vma))
2879 start = min(end, vma->vm_end);
2880 } while (start < end);
2882 return svm_range_check_vm(p, start_unchg, (end - 1) >> PAGE_SHIFT, NULL,
2887 * svm_range_add - add svm range and handle overlap
2888 * @p: the range add to this process svms
2889 * @start: page size aligned
2890 * @size: page size aligned
2891 * @nattr: number of attributes
2892 * @attrs: array of attributes
2893 * @update_list: output, the ranges need validate and update GPU mapping
2894 * @insert_list: output, the ranges need insert to svms
2895 * @remove_list: output, the ranges are replaced and need remove from svms
2897 * Check if the virtual address range has overlap with the registered ranges,
2898 * split the overlapped range, copy and adjust pages address and vram nodes in
2899 * old and new ranges.
2901 * Context: Process context, caller must hold svms->lock
2904 * 0 - OK, otherwise error code
2907 svm_range_add(struct kfd_process *p, uint64_t start, uint64_t size,
2908 uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs,
2909 struct list_head *update_list, struct list_head *insert_list,
2910 struct list_head *remove_list)
2912 uint64_t last = start + size - 1UL;
2913 struct svm_range_list *svms;
2914 struct svm_range new = {0};
2915 struct svm_range *prange;
2916 unsigned long left = 0;
2919 pr_debug("svms 0x%p [0x%llx 0x%llx]\n", &p->svms, start, last);
2921 svm_range_apply_attrs(p, &new, nattr, attrs);
2925 r = svm_range_handle_overlap(svms, &new, start, last, update_list,
2926 insert_list, remove_list, &left);
2931 prange = svm_range_new(svms, last - left + 1, last);
2932 list_add(&prange->insert_list, insert_list);
2933 list_add(&prange->update_list, update_list);
2940 * svm_range_best_prefetch_location - decide the best prefetch location
2941 * @prange: svm range structure
2944 * If range map to single GPU, the best prefetch location is prefetch_loc, which
2945 * can be CPU or GPU.
2947 * If range is ACCESS or ACCESS_IN_PLACE by mGPUs, only if mGPU connection on
2948 * XGMI same hive, the best prefetch location is prefetch_loc GPU, othervise
2949 * the best prefetch location is always CPU, because GPU can not have coherent
2950 * mapping VRAM of other GPUs even with large-BAR PCIe connection.
2953 * If range is not ACCESS_IN_PLACE by mGPUs, the best prefetch location is
2954 * prefetch_loc, other GPU access will generate vm fault and trigger migration.
2956 * If range is ACCESS_IN_PLACE by mGPUs, only if mGPU connection on XGMI same
2957 * hive, the best prefetch location is prefetch_loc GPU, otherwise the best
2958 * prefetch location is always CPU.
2960 * Context: Process context
2963 * 0 for CPU or GPU id
2966 svm_range_best_prefetch_location(struct svm_range *prange)
2968 DECLARE_BITMAP(bitmap, MAX_GPU_INSTANCE);
2969 uint32_t best_loc = prange->prefetch_loc;
2970 struct kfd_process_device *pdd;
2971 struct amdgpu_device *bo_adev;
2972 struct kfd_process *p;
2975 p = container_of(prange->svms, struct kfd_process, svms);
2977 if (!best_loc || best_loc == KFD_IOCTL_SVM_LOCATION_UNDEFINED)
2980 bo_adev = svm_range_get_adev_by_id(prange, best_loc);
2982 WARN_ONCE(1, "failed to get device by id 0x%x\n", best_loc);
2987 if (p->xnack_enabled)
2988 bitmap_copy(bitmap, prange->bitmap_aip, MAX_GPU_INSTANCE);
2990 bitmap_or(bitmap, prange->bitmap_access, prange->bitmap_aip,
2993 for_each_set_bit(gpuidx, bitmap, MAX_GPU_INSTANCE) {
2994 pdd = kfd_process_device_from_gpuidx(p, gpuidx);
2996 pr_debug("failed to get device by idx 0x%x\n", gpuidx);
3000 if (pdd->dev->adev == bo_adev)
3003 if (!amdgpu_xgmi_same_hive(pdd->dev->adev, bo_adev)) {
3010 pr_debug("xnack %d svms 0x%p [0x%lx 0x%lx] best loc 0x%x\n",
3011 p->xnack_enabled, &p->svms, prange->start, prange->last,
3017 /* FIXME: This is a workaround for page locking bug when some pages are
3018 * invalid during migration to VRAM
3020 void svm_range_prefault(struct svm_range *prange, struct mm_struct *mm,
3023 struct hmm_range *hmm_range;
3026 if (prange->validated_once)
3029 r = amdgpu_hmm_range_get_pages(&prange->notifier, mm, NULL,
3030 prange->start << PAGE_SHIFT,
3031 prange->npages, &hmm_range,
3032 false, true, owner);
3034 amdgpu_hmm_range_get_pages_done(hmm_range);
3035 prange->validated_once = true;
3039 /* svm_range_trigger_migration - start page migration if prefetch loc changed
3040 * @mm: current process mm_struct
3041 * @prange: svm range structure
3042 * @migrated: output, true if migration is triggered
3044 * If range perfetch_loc is GPU, actual loc is cpu 0, then migrate the range
3046 * If range prefetch_loc is cpu 0, actual loc is GPU, then migrate the range
3049 * If GPU vm fault retry is not enabled, migration interact with MMU notifier
3051 * 1. migrate_vma_setup invalidate pages, MMU notifier callback svm_range_evict
3052 * stops all queues, schedule restore work
3053 * 2. svm_range_restore_work wait for migration is done by
3054 * a. svm_range_validate_vram takes prange->migrate_mutex
3055 * b. svm_range_validate_ram HMM get pages wait for CPU fault handle returns
3056 * 3. restore work update mappings of GPU, resume all queues.
3058 * Context: Process context
3061 * 0 - OK, otherwise - error code of migration
3064 svm_range_trigger_migration(struct mm_struct *mm, struct svm_range *prange,
3071 best_loc = svm_range_best_prefetch_location(prange);
3073 if (best_loc == KFD_IOCTL_SVM_LOCATION_UNDEFINED ||
3074 best_loc == prange->actual_loc)
3078 r = svm_migrate_vram_to_ram(prange, mm);
3083 r = svm_migrate_to_vram(prange, best_loc, mm);
3089 int svm_range_schedule_evict_svm_bo(struct amdgpu_amdkfd_fence *fence)
3094 if (dma_fence_is_signaled(&fence->base))
3097 if (fence->svm_bo) {
3098 WRITE_ONCE(fence->svm_bo->evicting, 1);
3099 schedule_work(&fence->svm_bo->eviction_work);
3105 static void svm_range_evict_svm_bo_worker(struct work_struct *work)
3107 struct svm_range_bo *svm_bo;
3108 struct kfd_process *p;
3109 struct mm_struct *mm;
3111 svm_bo = container_of(work, struct svm_range_bo, eviction_work);
3112 if (!svm_bo_ref_unless_zero(svm_bo))
3113 return; /* svm_bo was freed while eviction was pending */
3115 /* svm_range_bo_release destroys this worker thread. So during
3116 * the lifetime of this thread, kfd_process and mm will be valid.
3118 p = container_of(svm_bo->svms, struct kfd_process, svms);
3124 spin_lock(&svm_bo->list_lock);
3125 while (!list_empty(&svm_bo->range_list)) {
3126 struct svm_range *prange =
3127 list_first_entry(&svm_bo->range_list,
3128 struct svm_range, svm_bo_list);
3131 list_del_init(&prange->svm_bo_list);
3132 spin_unlock(&svm_bo->list_lock);
3134 pr_debug("svms 0x%p [0x%lx 0x%lx]\n", prange->svms,
3135 prange->start, prange->last);
3137 mutex_lock(&prange->migrate_mutex);
3139 svm_migrate_vram_to_ram(prange,
3140 svm_bo->eviction_fence->mm);
3141 } while (prange->actual_loc && --retries);
3142 WARN(prange->actual_loc, "Migration failed during eviction");
3144 mutex_lock(&prange->lock);
3145 prange->svm_bo = NULL;
3146 mutex_unlock(&prange->lock);
3148 mutex_unlock(&prange->migrate_mutex);
3150 spin_lock(&svm_bo->list_lock);
3152 spin_unlock(&svm_bo->list_lock);
3153 mmap_read_unlock(mm);
3155 dma_fence_signal(&svm_bo->eviction_fence->base);
3156 /* This is the last reference to svm_bo, after svm_range_vram_node_free
3157 * has been called in svm_migrate_vram_to_ram
3159 WARN_ONCE(kref_read(&svm_bo->kref) != 1, "This was not the last reference\n");
3160 svm_range_bo_unref(svm_bo);
3164 svm_range_set_attr(struct kfd_process *p, uint64_t start, uint64_t size,
3165 uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs)
3167 struct mm_struct *mm = current->mm;
3168 struct list_head update_list;
3169 struct list_head insert_list;
3170 struct list_head remove_list;
3171 struct svm_range_list *svms;
3172 struct svm_range *prange;
3173 struct svm_range *next;
3176 pr_debug("pasid 0x%x svms 0x%p [0x%llx 0x%llx] pages 0x%llx\n",
3177 p->pasid, &p->svms, start, start + size - 1, size);
3179 r = svm_range_check_attr(p, nattr, attrs);
3185 svm_range_list_lock_and_flush_work(svms, mm);
3187 r = svm_range_is_valid(p, start, size);
3189 pr_debug("invalid range r=%d\n", r);
3190 mmap_write_unlock(mm);
3194 mutex_lock(&svms->lock);
3196 /* Add new range and split existing ranges as needed */
3197 r = svm_range_add(p, start, size, nattr, attrs, &update_list,
3198 &insert_list, &remove_list);
3200 mutex_unlock(&svms->lock);
3201 mmap_write_unlock(mm);
3204 /* Apply changes as a transaction */
3205 list_for_each_entry_safe(prange, next, &insert_list, insert_list) {
3206 svm_range_add_to_svms(prange);
3207 svm_range_add_notifier_locked(mm, prange);
3209 list_for_each_entry(prange, &update_list, update_list) {
3210 svm_range_apply_attrs(p, prange, nattr, attrs);
3211 /* TODO: unmap ranges from GPU that lost access */
3213 list_for_each_entry_safe(prange, next, &remove_list,
3215 pr_debug("unlink old 0x%p prange 0x%p [0x%lx 0x%lx]\n",
3216 prange->svms, prange, prange->start,
3218 svm_range_unlink(prange);
3219 svm_range_remove_notifier(prange);
3220 svm_range_free(prange);
3223 mmap_write_downgrade(mm);
3224 /* Trigger migrations and revalidate and map to GPUs as needed. If
3225 * this fails we may be left with partially completed actions. There
3226 * is no clean way of rolling back to the previous state in such a
3227 * case because the rollback wouldn't be guaranteed to work either.
3229 list_for_each_entry(prange, &update_list, update_list) {
3232 mutex_lock(&prange->migrate_mutex);
3234 r = svm_range_trigger_migration(mm, prange, &migrated);
3236 goto out_unlock_range;
3238 if (migrated && !p->xnack_enabled) {
3239 pr_debug("restore_work will update mappings of GPUs\n");
3240 mutex_unlock(&prange->migrate_mutex);
3244 r = svm_range_validate_and_map(mm, prange, MAX_GPU_INSTANCE,
3247 pr_debug("failed %d to map svm range\n", r);
3250 mutex_unlock(&prange->migrate_mutex);
3255 svm_range_debug_dump(svms);
3257 mutex_unlock(&svms->lock);
3258 mmap_read_unlock(mm);
3260 pr_debug("pasid 0x%x svms 0x%p [0x%llx 0x%llx] done, r=%d\n", p->pasid,
3261 &p->svms, start, start + size - 1, r);
3267 svm_range_get_attr(struct kfd_process *p, uint64_t start, uint64_t size,
3268 uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs)
3270 DECLARE_BITMAP(bitmap_access, MAX_GPU_INSTANCE);
3271 DECLARE_BITMAP(bitmap_aip, MAX_GPU_INSTANCE);
3272 bool get_preferred_loc = false;
3273 bool get_prefetch_loc = false;
3274 bool get_granularity = false;
3275 bool get_accessible = false;
3276 bool get_flags = false;
3277 uint64_t last = start + size - 1UL;
3278 struct mm_struct *mm = current->mm;
3279 uint8_t granularity = 0xff;
3280 struct interval_tree_node *node;
3281 struct svm_range_list *svms;
3282 struct svm_range *prange;
3283 uint32_t prefetch_loc = KFD_IOCTL_SVM_LOCATION_UNDEFINED;
3284 uint32_t location = KFD_IOCTL_SVM_LOCATION_UNDEFINED;
3285 uint32_t flags_and = 0xffffffff;
3286 uint32_t flags_or = 0;
3291 pr_debug("svms 0x%p [0x%llx 0x%llx] nattr 0x%x\n", &p->svms, start,
3292 start + size - 1, nattr);
3294 /* Flush pending deferred work to avoid racing with deferred actions from
3295 * previous memory map changes (e.g. munmap). Concurrent memory map changes
3296 * can still race with get_attr because we don't hold the mmap lock. But that
3297 * would be a race condition in the application anyway, and undefined
3298 * behaviour is acceptable in that case.
3300 flush_work(&p->svms.deferred_list_work);
3303 r = svm_range_is_valid(p, start, size);
3304 mmap_read_unlock(mm);
3306 pr_debug("invalid range r=%d\n", r);
3310 for (i = 0; i < nattr; i++) {
3311 switch (attrs[i].type) {
3312 case KFD_IOCTL_SVM_ATTR_PREFERRED_LOC:
3313 get_preferred_loc = true;
3315 case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC:
3316 get_prefetch_loc = true;
3318 case KFD_IOCTL_SVM_ATTR_ACCESS:
3319 get_accessible = true;
3321 case KFD_IOCTL_SVM_ATTR_SET_FLAGS:
3322 case KFD_IOCTL_SVM_ATTR_CLR_FLAGS:
3325 case KFD_IOCTL_SVM_ATTR_GRANULARITY:
3326 get_granularity = true;
3328 case KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE:
3329 case KFD_IOCTL_SVM_ATTR_NO_ACCESS:
3332 pr_debug("get invalid attr type 0x%x\n", attrs[i].type);
3339 mutex_lock(&svms->lock);
3341 node = interval_tree_iter_first(&svms->objects, start, last);
3343 pr_debug("range attrs not found return default values\n");
3344 svm_range_set_default_attributes(&location, &prefetch_loc,
3345 &granularity, &flags_and);
3346 flags_or = flags_and;
3347 if (p->xnack_enabled)
3348 bitmap_copy(bitmap_access, svms->bitmap_supported,
3351 bitmap_zero(bitmap_access, MAX_GPU_INSTANCE);
3352 bitmap_zero(bitmap_aip, MAX_GPU_INSTANCE);
3355 bitmap_copy(bitmap_access, svms->bitmap_supported, MAX_GPU_INSTANCE);
3356 bitmap_copy(bitmap_aip, svms->bitmap_supported, MAX_GPU_INSTANCE);
3359 struct interval_tree_node *next;
3361 prange = container_of(node, struct svm_range, it_node);
3362 next = interval_tree_iter_next(node, start, last);
3364 if (get_preferred_loc) {
3365 if (prange->preferred_loc ==
3366 KFD_IOCTL_SVM_LOCATION_UNDEFINED ||
3367 (location != KFD_IOCTL_SVM_LOCATION_UNDEFINED &&
3368 location != prange->preferred_loc)) {
3369 location = KFD_IOCTL_SVM_LOCATION_UNDEFINED;
3370 get_preferred_loc = false;
3372 location = prange->preferred_loc;
3375 if (get_prefetch_loc) {
3376 if (prange->prefetch_loc ==
3377 KFD_IOCTL_SVM_LOCATION_UNDEFINED ||
3378 (prefetch_loc != KFD_IOCTL_SVM_LOCATION_UNDEFINED &&
3379 prefetch_loc != prange->prefetch_loc)) {
3380 prefetch_loc = KFD_IOCTL_SVM_LOCATION_UNDEFINED;
3381 get_prefetch_loc = false;
3383 prefetch_loc = prange->prefetch_loc;
3386 if (get_accessible) {
3387 bitmap_and(bitmap_access, bitmap_access,
3388 prange->bitmap_access, MAX_GPU_INSTANCE);
3389 bitmap_and(bitmap_aip, bitmap_aip,
3390 prange->bitmap_aip, MAX_GPU_INSTANCE);
3393 flags_and &= prange->flags;
3394 flags_or |= prange->flags;
3397 if (get_granularity && prange->granularity < granularity)
3398 granularity = prange->granularity;
3403 mutex_unlock(&svms->lock);
3405 for (i = 0; i < nattr; i++) {
3406 switch (attrs[i].type) {
3407 case KFD_IOCTL_SVM_ATTR_PREFERRED_LOC:
3408 attrs[i].value = location;
3410 case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC:
3411 attrs[i].value = prefetch_loc;
3413 case KFD_IOCTL_SVM_ATTR_ACCESS:
3414 gpuidx = kfd_process_gpuidx_from_gpuid(p,
3417 pr_debug("invalid gpuid %x\n", attrs[i].value);
3420 if (test_bit(gpuidx, bitmap_access))
3421 attrs[i].type = KFD_IOCTL_SVM_ATTR_ACCESS;
3422 else if (test_bit(gpuidx, bitmap_aip))
3424 KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE;
3426 attrs[i].type = KFD_IOCTL_SVM_ATTR_NO_ACCESS;
3428 case KFD_IOCTL_SVM_ATTR_SET_FLAGS:
3429 attrs[i].value = flags_and;
3431 case KFD_IOCTL_SVM_ATTR_CLR_FLAGS:
3432 attrs[i].value = ~flags_or;
3434 case KFD_IOCTL_SVM_ATTR_GRANULARITY:
3435 attrs[i].value = (uint32_t)granularity;
3444 svm_ioctl(struct kfd_process *p, enum kfd_ioctl_svm_op op, uint64_t start,
3445 uint64_t size, uint32_t nattrs, struct kfd_ioctl_svm_attribute *attrs)
3449 start >>= PAGE_SHIFT;
3450 size >>= PAGE_SHIFT;
3453 case KFD_IOCTL_SVM_OP_SET_ATTR:
3454 r = svm_range_set_attr(p, start, size, nattrs, attrs);
3456 case KFD_IOCTL_SVM_OP_GET_ATTR:
3457 r = svm_range_get_attr(p, start, size, nattrs, attrs);