1 // SPDX-License-Identifier: GPL-2.0 OR MIT
3 * Copyright 2020-2021 Advanced Micro Devices, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
23 #include <linux/types.h>
24 #include <linux/hmm.h>
25 #include <linux/dma-direction.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/migrate.h>
28 #include "amdgpu_sync.h"
29 #include "amdgpu_object.h"
30 #include "amdgpu_vm.h"
31 #include "amdgpu_res_cursor.h"
34 #include "kfd_migrate.h"
35 #include "kfd_smi_events.h"
40 #define dev_fmt(fmt) "kfd_migrate: " fmt
43 svm_migrate_direct_mapping_addr(struct amdgpu_device *adev, uint64_t addr)
45 return addr + amdgpu_ttm_domain_start(adev, TTM_PL_VRAM);
49 svm_migrate_gart_map(struct amdgpu_ring *ring, uint64_t npages,
50 dma_addr_t *addr, uint64_t *gart_addr, uint64_t flags)
52 struct amdgpu_device *adev = ring->adev;
53 struct amdgpu_job *job;
54 unsigned int num_dw, num_bytes;
55 struct dma_fence *fence;
56 uint64_t src_addr, dst_addr;
61 /* use gart window 0 */
62 *gart_addr = adev->gmc.gart_start;
64 num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8);
65 num_bytes = npages * 8;
67 r = amdgpu_job_alloc_with_ib(adev, &adev->mman.entity,
68 AMDGPU_FENCE_OWNER_UNDEFINED,
69 num_dw * 4 + num_bytes,
70 AMDGPU_IB_POOL_DELAYED,
75 src_addr = num_dw * 4;
76 src_addr += job->ibs[0].gpu_addr;
78 dst_addr = amdgpu_bo_gpu_offset(adev->gart.bo);
79 amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr,
80 dst_addr, num_bytes, false);
82 amdgpu_ring_pad_ib(ring, &job->ibs[0]);
83 WARN_ON(job->ibs[0].length_dw > num_dw);
85 pte_flags = AMDGPU_PTE_VALID | AMDGPU_PTE_READABLE;
86 pte_flags |= AMDGPU_PTE_SYSTEM | AMDGPU_PTE_SNOOPED;
87 if (!(flags & KFD_IOCTL_SVM_FLAG_GPU_RO))
88 pte_flags |= AMDGPU_PTE_WRITEABLE;
89 pte_flags |= adev->gart.gart_pte_flags;
91 cpu_addr = &job->ibs[0].ptr[num_dw];
93 amdgpu_gart_map(adev, 0, npages, addr, pte_flags, cpu_addr);
94 fence = amdgpu_job_submit(job);
101 * svm_migrate_copy_memory_gart - sdma copy data between ram and vram
103 * @adev: amdgpu device the sdma ring running
104 * @sys: system DMA pointer to be copied
105 * @vram: vram destination DMA pointer
106 * @npages: number of pages to copy
107 * @direction: enum MIGRATION_COPY_DIR
108 * @mfence: output, sdma fence to signal after sdma is done
110 * ram address uses GART table continuous entries mapping to ram pages,
111 * vram address uses direct mapping of vram pages, which must have npages
112 * number of continuous pages.
113 * GART update and sdma uses same buf copy function ring, sdma is splited to
114 * multiple GTT_MAX_PAGES transfer, all sdma operations are serialized, wait for
115 * the last sdma finish fence which is returned to check copy memory is done.
117 * Context: Process context, takes and releases gtt_window_lock
120 * 0 - OK, otherwise error code
124 svm_migrate_copy_memory_gart(struct amdgpu_device *adev, dma_addr_t *sys,
125 uint64_t *vram, uint64_t npages,
126 enum MIGRATION_COPY_DIR direction,
127 struct dma_fence **mfence)
129 const uint64_t GTT_MAX_PAGES = AMDGPU_GTT_MAX_TRANSFER_SIZE;
130 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
131 uint64_t gart_s, gart_d;
132 struct dma_fence *next;
136 mutex_lock(&adev->mman.gtt_window_lock);
139 size = min(GTT_MAX_PAGES, npages);
141 if (direction == FROM_VRAM_TO_RAM) {
142 gart_s = svm_migrate_direct_mapping_addr(adev, *vram);
143 r = svm_migrate_gart_map(ring, size, sys, &gart_d, 0);
145 } else if (direction == FROM_RAM_TO_VRAM) {
146 r = svm_migrate_gart_map(ring, size, sys, &gart_s,
147 KFD_IOCTL_SVM_FLAG_GPU_RO);
148 gart_d = svm_migrate_direct_mapping_addr(adev, *vram);
151 dev_err(adev->dev, "fail %d create gart mapping\n", r);
155 r = amdgpu_copy_buffer(ring, gart_s, gart_d, size * PAGE_SIZE,
156 NULL, &next, false, true, false);
158 dev_err(adev->dev, "fail %d to copy memory\n", r);
162 dma_fence_put(*mfence);
172 mutex_unlock(&adev->mman.gtt_window_lock);
178 * svm_migrate_copy_done - wait for memory copy sdma is done
180 * @adev: amdgpu device the sdma memory copy is executing on
181 * @mfence: migrate fence
183 * Wait for dma fence is signaled, if the copy ssplit into multiple sdma
184 * operations, this is the last sdma operation fence.
186 * Context: called after svm_migrate_copy_memory
190 * otherwise - error code from dma fence signal
193 svm_migrate_copy_done(struct amdgpu_device *adev, struct dma_fence *mfence)
198 r = dma_fence_wait(mfence, false);
199 dma_fence_put(mfence);
200 pr_debug("sdma copy memory fence done\n");
207 svm_migrate_addr_to_pfn(struct amdgpu_device *adev, unsigned long addr)
209 return (addr + adev->kfd.dev->pgmap.range.start) >> PAGE_SHIFT;
213 svm_migrate_get_vram_page(struct svm_range *prange, unsigned long pfn)
217 page = pfn_to_page(pfn);
218 svm_range_bo_ref(prange->svm_bo);
219 page->zone_device_data = prange->svm_bo;
220 zone_device_page_init(page);
224 svm_migrate_put_vram_page(struct amdgpu_device *adev, unsigned long addr)
228 page = pfn_to_page(svm_migrate_addr_to_pfn(adev, addr));
234 svm_migrate_addr(struct amdgpu_device *adev, struct page *page)
238 addr = page_to_pfn(page) << PAGE_SHIFT;
239 return (addr - adev->kfd.dev->pgmap.range.start);
243 svm_migrate_get_sys_page(struct vm_area_struct *vma, unsigned long addr)
247 page = alloc_page_vma(GFP_HIGHUSER, vma, addr);
254 static void svm_migrate_put_sys_page(unsigned long addr)
258 page = pfn_to_page(addr >> PAGE_SHIFT);
263 static unsigned long svm_migrate_successful_pages(struct migrate_vma *migrate)
265 unsigned long cpages = 0;
268 for (i = 0; i < migrate->npages; i++) {
269 if (migrate->src[i] & MIGRATE_PFN_VALID &&
270 migrate->src[i] & MIGRATE_PFN_MIGRATE)
276 static unsigned long svm_migrate_unsuccessful_pages(struct migrate_vma *migrate)
278 unsigned long upages = 0;
281 for (i = 0; i < migrate->npages; i++) {
282 if (migrate->src[i] & MIGRATE_PFN_VALID &&
283 !(migrate->src[i] & MIGRATE_PFN_MIGRATE))
290 svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
291 struct migrate_vma *migrate, struct dma_fence **mfence,
292 dma_addr_t *scratch, uint64_t ttm_res_offset)
294 uint64_t npages = migrate->npages;
295 struct device *dev = adev->dev;
296 struct amdgpu_res_cursor cursor;
302 pr_debug("svms 0x%p [0x%lx 0x%lx 0x%llx]\n", prange->svms, prange->start,
303 prange->last, ttm_res_offset);
306 dst = (uint64_t *)(scratch + npages);
308 amdgpu_res_first(prange->ttm_res, ttm_res_offset,
309 npages << PAGE_SHIFT, &cursor);
310 for (i = j = 0; i < npages; i++) {
313 dst[i] = cursor.start + (j << PAGE_SHIFT);
314 migrate->dst[i] = svm_migrate_addr_to_pfn(adev, dst[i]);
315 svm_migrate_get_vram_page(prange, migrate->dst[i]);
316 migrate->dst[i] = migrate_pfn(migrate->dst[i]);
318 spage = migrate_pfn_to_page(migrate->src[i]);
319 if (spage && !is_zone_device_page(spage)) {
320 src[i] = dma_map_page(dev, spage, 0, PAGE_SIZE,
322 r = dma_mapping_error(dev, src[i]);
324 dev_err(adev->dev, "%s: fail %d dma_map_page\n",
326 goto out_free_vram_pages;
330 r = svm_migrate_copy_memory_gart(
336 goto out_free_vram_pages;
337 amdgpu_res_next(&cursor, (j + 1) << PAGE_SHIFT);
340 amdgpu_res_next(&cursor, PAGE_SIZE);
345 pr_debug_ratelimited("dma mapping src to 0x%llx, pfn 0x%lx\n",
346 src[i] >> PAGE_SHIFT, page_to_pfn(spage));
348 if (j >= (cursor.size >> PAGE_SHIFT) - 1 && i < npages - 1) {
349 r = svm_migrate_copy_memory_gart(adev, src + i - j,
354 goto out_free_vram_pages;
355 amdgpu_res_next(&cursor, (j + 1) * PAGE_SIZE);
362 r = svm_migrate_copy_memory_gart(adev, src + i - j, dst + i - j, j,
363 FROM_RAM_TO_VRAM, mfence);
367 pr_debug("failed %d to copy memory to vram\n", r);
369 svm_migrate_put_vram_page(adev, dst[i]);
374 #ifdef DEBUG_FORCE_MIXED_DOMAINS
375 for (i = 0, j = 0; i < npages; i += 4, j++) {
378 svm_migrate_put_vram_page(adev, dst[i]);
380 svm_migrate_put_vram_page(adev, dst[i + 1]);
381 migrate->dst[i + 1] = 0;
382 svm_migrate_put_vram_page(adev, dst[i + 2]);
383 migrate->dst[i + 2] = 0;
384 svm_migrate_put_vram_page(adev, dst[i + 3]);
385 migrate->dst[i + 3] = 0;
393 svm_migrate_vma_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
394 struct vm_area_struct *vma, uint64_t start,
395 uint64_t end, uint32_t trigger, uint64_t ttm_res_offset)
397 struct kfd_process *p = container_of(prange->svms, struct kfd_process, svms);
398 uint64_t npages = (end - start) >> PAGE_SHIFT;
399 struct kfd_process_device *pdd;
400 struct dma_fence *mfence = NULL;
401 struct migrate_vma migrate = { 0 };
402 unsigned long cpages = 0;
407 memset(&migrate, 0, sizeof(migrate));
409 migrate.start = start;
411 migrate.flags = MIGRATE_VMA_SELECT_SYSTEM;
412 migrate.pgmap_owner = SVM_ADEV_PGMAP_OWNER(adev);
414 buf = kvcalloc(npages,
415 2 * sizeof(*migrate.src) + sizeof(uint64_t) + sizeof(dma_addr_t),
421 migrate.dst = migrate.src + npages;
422 scratch = (dma_addr_t *)(migrate.dst + npages);
424 kfd_smi_event_migration_start(adev->kfd.dev, p->lead_thread->pid,
425 start >> PAGE_SHIFT, end >> PAGE_SHIFT,
426 0, adev->kfd.dev->node->id, prange->prefetch_loc,
427 prange->preferred_loc, trigger);
429 r = migrate_vma_setup(&migrate);
431 dev_err(adev->dev, "%s: vma setup fail %d range [0x%lx 0x%lx]\n",
432 __func__, r, prange->start, prange->last);
436 cpages = migrate.cpages;
438 pr_debug("failed collect migrate sys pages [0x%lx 0x%lx]\n",
439 prange->start, prange->last);
442 if (cpages != npages)
443 pr_debug("partial migration, 0x%lx/0x%llx pages migrated\n",
446 pr_debug("0x%lx pages migrated\n", cpages);
448 r = svm_migrate_copy_to_vram(adev, prange, &migrate, &mfence, scratch, ttm_res_offset);
449 migrate_vma_pages(&migrate);
451 pr_debug("successful/cpages/npages 0x%lx/0x%lx/0x%lx\n",
452 svm_migrate_successful_pages(&migrate), cpages, migrate.npages);
454 svm_migrate_copy_done(adev, mfence);
455 migrate_vma_finalize(&migrate);
457 kfd_smi_event_migration_end(adev->kfd.dev, p->lead_thread->pid,
458 start >> PAGE_SHIFT, end >> PAGE_SHIFT,
459 0, adev->kfd.dev->node->id, trigger);
461 svm_range_dma_unmap(adev->dev, scratch, 0, npages);
462 svm_range_free_dma_mappings(prange);
468 pdd = svm_range_get_pdd_by_adev(prange, adev);
470 WRITE_ONCE(pdd->page_in, pdd->page_in + cpages);
478 * svm_migrate_ram_to_vram - migrate svm range from system to device
479 * @prange: range structure
480 * @best_loc: the device to migrate to
481 * @mm: the process mm structure
482 * @trigger: reason of migration
484 * Context: Process context, caller hold mmap read lock, svms lock, prange lock
487 * 0 - OK, otherwise error code
490 svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc,
491 struct mm_struct *mm, uint32_t trigger)
493 unsigned long addr, start, end;
494 struct vm_area_struct *vma;
495 struct amdgpu_device *adev;
496 uint64_t ttm_res_offset;
497 unsigned long cpages = 0;
500 if (prange->actual_loc == best_loc) {
501 pr_debug("svms 0x%p [0x%lx 0x%lx] already on best_loc 0x%x\n",
502 prange->svms, prange->start, prange->last, best_loc);
506 adev = svm_range_get_adev_by_id(prange, best_loc);
508 pr_debug("failed to get device by id 0x%x\n", best_loc);
512 pr_debug("svms 0x%p [0x%lx 0x%lx] to gpu 0x%x\n", prange->svms,
513 prange->start, prange->last, best_loc);
515 start = prange->start << PAGE_SHIFT;
516 end = (prange->last + 1) << PAGE_SHIFT;
518 r = svm_range_vram_node_new(adev, prange, true);
520 dev_dbg(adev->dev, "fail %ld to alloc vram\n", r);
523 ttm_res_offset = prange->offset << PAGE_SHIFT;
525 for (addr = start; addr < end;) {
528 vma = vma_lookup(mm, addr);
532 next = min(vma->vm_end, end);
533 r = svm_migrate_vma_to_vram(adev, prange, vma, addr, next, trigger, ttm_res_offset);
535 pr_debug("failed %ld to migrate\n", r);
540 ttm_res_offset += next - addr;
545 prange->actual_loc = best_loc;
547 svm_range_vram_node_free(prange);
549 return r < 0 ? r : 0;
552 static void svm_migrate_page_free(struct page *page)
554 struct svm_range_bo *svm_bo = page->zone_device_data;
557 pr_debug_ratelimited("ref: %d\n", kref_read(&svm_bo->kref));
558 svm_range_bo_unref_async(svm_bo);
563 svm_migrate_copy_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
564 struct migrate_vma *migrate, struct dma_fence **mfence,
565 dma_addr_t *scratch, uint64_t npages)
567 struct device *dev = adev->dev;
575 pr_debug("svms 0x%p [0x%lx 0x%lx]\n", prange->svms, prange->start,
578 addr = prange->start << PAGE_SHIFT;
580 src = (uint64_t *)(scratch + npages);
583 for (i = 0, j = 0; i < npages; i++, addr += PAGE_SIZE) {
586 spage = migrate_pfn_to_page(migrate->src[i]);
587 if (!spage || !is_zone_device_page(spage)) {
588 pr_debug("invalid page. Could be in CPU already svms 0x%p [0x%lx 0x%lx]\n",
589 prange->svms, prange->start, prange->last);
591 r = svm_migrate_copy_memory_gart(adev, dst + i - j,
601 src[i] = svm_migrate_addr(adev, spage);
602 if (j > 0 && src[i] != src[i - 1] + PAGE_SIZE) {
603 r = svm_migrate_copy_memory_gart(adev, dst + i - j,
612 dpage = svm_migrate_get_sys_page(migrate->vma, addr);
614 pr_debug("failed get page svms 0x%p [0x%lx 0x%lx]\n",
615 prange->svms, prange->start, prange->last);
620 dst[i] = dma_map_page(dev, dpage, 0, PAGE_SIZE, DMA_FROM_DEVICE);
621 r = dma_mapping_error(dev, dst[i]);
623 dev_err(adev->dev, "%s: fail %d dma_map_page\n", __func__, r);
627 pr_debug_ratelimited("dma mapping dst to 0x%llx, pfn 0x%lx\n",
628 dst[i] >> PAGE_SHIFT, page_to_pfn(dpage));
630 migrate->dst[i] = migrate_pfn(page_to_pfn(dpage));
634 r = svm_migrate_copy_memory_gart(adev, dst + i - j, src + i - j, j,
635 FROM_VRAM_TO_RAM, mfence);
639 pr_debug("failed %d copy to ram\n", r);
641 svm_migrate_put_sys_page(dst[i]);
650 * svm_migrate_vma_to_ram - migrate range inside one vma from device to system
652 * @adev: amdgpu device to migrate from
653 * @prange: svm range structure
654 * @vma: vm_area_struct that range [start, end] belongs to
655 * @start: range start virtual address in pages
656 * @end: range end virtual address in pages
658 * Context: Process context, caller hold mmap read lock, prange->migrate_mutex
661 * 0 - success with all pages migrated
662 * negative values - indicate error
663 * positive values - partial migration, number of pages not migrated
666 svm_migrate_vma_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
667 struct vm_area_struct *vma, uint64_t start, uint64_t end,
668 uint32_t trigger, struct page *fault_page)
670 struct kfd_process *p = container_of(prange->svms, struct kfd_process, svms);
671 uint64_t npages = (end - start) >> PAGE_SHIFT;
672 unsigned long upages = npages;
673 unsigned long cpages = 0;
674 struct kfd_process_device *pdd;
675 struct dma_fence *mfence = NULL;
676 struct migrate_vma migrate = { 0 };
681 memset(&migrate, 0, sizeof(migrate));
683 migrate.start = start;
685 migrate.pgmap_owner = SVM_ADEV_PGMAP_OWNER(adev);
686 if (adev->gmc.xgmi.connected_to_cpu)
687 migrate.flags = MIGRATE_VMA_SELECT_DEVICE_COHERENT;
689 migrate.flags = MIGRATE_VMA_SELECT_DEVICE_PRIVATE;
691 buf = kvcalloc(npages,
692 2 * sizeof(*migrate.src) + sizeof(uint64_t) + sizeof(dma_addr_t),
698 migrate.dst = migrate.src + npages;
699 migrate.fault_page = fault_page;
700 scratch = (dma_addr_t *)(migrate.dst + npages);
702 kfd_smi_event_migration_start(adev->kfd.dev, p->lead_thread->pid,
703 start >> PAGE_SHIFT, end >> PAGE_SHIFT,
704 adev->kfd.dev->node->id, 0, prange->prefetch_loc,
705 prange->preferred_loc, trigger);
707 r = migrate_vma_setup(&migrate);
709 dev_err(adev->dev, "%s: vma setup fail %d range [0x%lx 0x%lx]\n",
710 __func__, r, prange->start, prange->last);
714 cpages = migrate.cpages;
716 pr_debug("failed collect migrate device pages [0x%lx 0x%lx]\n",
717 prange->start, prange->last);
718 upages = svm_migrate_unsuccessful_pages(&migrate);
721 if (cpages != npages)
722 pr_debug("partial migration, 0x%lx/0x%llx pages migrated\n",
725 pr_debug("0x%lx pages migrated\n", cpages);
727 r = svm_migrate_copy_to_ram(adev, prange, &migrate, &mfence,
729 migrate_vma_pages(&migrate);
731 upages = svm_migrate_unsuccessful_pages(&migrate);
732 pr_debug("unsuccessful/cpages/npages 0x%lx/0x%lx/0x%lx\n",
733 upages, cpages, migrate.npages);
735 svm_migrate_copy_done(adev, mfence);
736 migrate_vma_finalize(&migrate);
738 kfd_smi_event_migration_end(adev->kfd.dev, p->lead_thread->pid,
739 start >> PAGE_SHIFT, end >> PAGE_SHIFT,
740 adev->kfd.dev->node->id, 0, trigger);
742 svm_range_dma_unmap(adev->dev, scratch, 0, npages);
748 pdd = svm_range_get_pdd_by_adev(prange, adev);
750 WRITE_ONCE(pdd->page_out, pdd->page_out + cpages);
752 return r ? r : upages;
756 * svm_migrate_vram_to_ram - migrate svm range from device to system
757 * @prange: range structure
758 * @mm: process mm, use current->mm if NULL
759 * @trigger: reason of migration
761 * Context: Process context, caller hold mmap read lock, prange->migrate_mutex
764 * 0 - OK, otherwise error code
766 int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm,
767 uint32_t trigger, struct page *fault_page)
769 struct amdgpu_device *adev;
770 struct vm_area_struct *vma;
774 unsigned long upages = 0;
777 if (!prange->actual_loc) {
778 pr_debug("[0x%lx 0x%lx] already migrated to ram\n",
779 prange->start, prange->last);
783 adev = svm_range_get_adev_by_id(prange, prange->actual_loc);
785 pr_debug("failed to get device by id 0x%x\n",
790 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx] from gpu 0x%x to ram\n",
791 prange->svms, prange, prange->start, prange->last,
794 start = prange->start << PAGE_SHIFT;
795 end = (prange->last + 1) << PAGE_SHIFT;
797 for (addr = start; addr < end;) {
800 vma = vma_lookup(mm, addr);
802 pr_debug("failed to find vma for prange %p\n", prange);
807 next = min(vma->vm_end, end);
808 r = svm_migrate_vma_to_ram(adev, prange, vma, addr, next, trigger,
811 pr_debug("failed %ld to migrate prange %p\n", r, prange);
819 if (r >= 0 && !upages) {
820 svm_range_vram_node_free(prange);
821 prange->actual_loc = 0;
824 return r < 0 ? r : 0;
828 * svm_migrate_vram_to_vram - migrate svm range from device to device
829 * @prange: range structure
830 * @best_loc: the device to migrate to
831 * @mm: process mm, use current->mm if NULL
832 * @trigger: reason of migration
834 * Context: Process context, caller hold mmap read lock, svms lock, prange lock
837 * 0 - OK, otherwise error code
840 svm_migrate_vram_to_vram(struct svm_range *prange, uint32_t best_loc,
841 struct mm_struct *mm, uint32_t trigger)
846 * TODO: for both devices with PCIe large bar or on same xgmi hive, skip
847 * system memory as migration bridge
850 pr_debug("from gpu 0x%x to gpu 0x%x\n", prange->actual_loc, best_loc);
853 r = svm_migrate_vram_to_ram(prange, mm, trigger, NULL);
856 } while (prange->actual_loc && --retries);
858 if (prange->actual_loc)
861 return svm_migrate_ram_to_vram(prange, best_loc, mm, trigger);
865 svm_migrate_to_vram(struct svm_range *prange, uint32_t best_loc,
866 struct mm_struct *mm, uint32_t trigger)
868 if (!prange->actual_loc)
869 return svm_migrate_ram_to_vram(prange, best_loc, mm, trigger);
871 return svm_migrate_vram_to_vram(prange, best_loc, mm, trigger);
876 * svm_migrate_to_ram - CPU page fault handler
877 * @vmf: CPU vm fault vma, address
879 * Context: vm fault handler, caller holds the mmap read lock
883 * VM_FAULT_SIGBUS - notice application to have SIGBUS page fault
885 static vm_fault_t svm_migrate_to_ram(struct vm_fault *vmf)
887 unsigned long addr = vmf->address;
888 struct svm_range_bo *svm_bo;
889 enum svm_work_list_ops op;
890 struct svm_range *parent;
891 struct svm_range *prange;
892 struct kfd_process *p;
893 struct mm_struct *mm;
896 svm_bo = vmf->page->zone_device_data;
898 pr_debug("failed get device page at addr 0x%lx\n", addr);
899 return VM_FAULT_SIGBUS;
901 if (!mmget_not_zero(svm_bo->eviction_fence->mm)) {
902 pr_debug("addr 0x%lx of process mm is destroyed\n", addr);
903 return VM_FAULT_SIGBUS;
906 mm = svm_bo->eviction_fence->mm;
907 if (mm != vmf->vma->vm_mm)
908 pr_debug("addr 0x%lx is COW mapping in child process\n", addr);
910 p = kfd_lookup_process_by_mm(mm);
912 pr_debug("failed find process at fault address 0x%lx\n", addr);
916 if (READ_ONCE(p->svms.faulting_task) == current) {
917 pr_debug("skipping ram migration\n");
919 goto out_unref_process;
922 pr_debug("CPU page fault svms 0x%p address 0x%lx\n", &p->svms, addr);
925 mutex_lock(&p->svms.lock);
927 prange = svm_range_from_addr(&p->svms, addr, &parent);
929 pr_debug("failed get range svms 0x%p addr 0x%lx\n", &p->svms, addr);
931 goto out_unlock_svms;
934 mutex_lock(&parent->migrate_mutex);
935 if (prange != parent)
936 mutex_lock_nested(&prange->migrate_mutex, 1);
938 if (!prange->actual_loc)
939 goto out_unlock_prange;
941 svm_range_lock(parent);
942 if (prange != parent)
943 mutex_lock_nested(&prange->lock, 1);
944 r = svm_range_split_by_granularity(p, mm, addr, parent, prange);
945 if (prange != parent)
946 mutex_unlock(&prange->lock);
947 svm_range_unlock(parent);
949 pr_debug("failed %d to split range by granularity\n", r);
950 goto out_unlock_prange;
953 r = svm_migrate_vram_to_ram(prange, vmf->vma->vm_mm,
954 KFD_MIGRATE_TRIGGER_PAGEFAULT_CPU,
957 pr_debug("failed %d migrate svms 0x%p range 0x%p [0x%lx 0x%lx]\n",
958 r, prange->svms, prange, prange->start, prange->last);
960 /* xnack on, update mapping on GPUs with ACCESS_IN_PLACE */
961 if (p->xnack_enabled && parent == prange)
962 op = SVM_OP_UPDATE_RANGE_NOTIFIER_AND_MAP;
964 op = SVM_OP_UPDATE_RANGE_NOTIFIER;
965 svm_range_add_list_work(&p->svms, parent, mm, op);
966 schedule_deferred_list_work(&p->svms);
969 if (prange != parent)
970 mutex_unlock(&prange->migrate_mutex);
971 mutex_unlock(&parent->migrate_mutex);
973 mutex_unlock(&p->svms.lock);
975 pr_debug("CPU fault svms 0x%p address 0x%lx done\n", &p->svms, addr);
976 kfd_unref_process(p);
979 return r ? VM_FAULT_SIGBUS : 0;
982 static const struct dev_pagemap_ops svm_migrate_pgmap_ops = {
983 .page_free = svm_migrate_page_free,
984 .migrate_to_ram = svm_migrate_to_ram,
987 /* Each VRAM page uses sizeof(struct page) on system memory */
988 #define SVM_HMM_PAGE_STRUCT_SIZE(size) ((size)/PAGE_SIZE * sizeof(struct page))
990 int svm_migrate_init(struct amdgpu_device *adev)
992 struct kfd_dev *kfddev = adev->kfd.dev;
993 struct dev_pagemap *pgmap;
994 struct resource *res = NULL;
998 /* Page migration works on Vega10 or newer */
999 if (!KFD_IS_SOC15(kfddev))
1002 pgmap = &kfddev->pgmap;
1003 memset(pgmap, 0, sizeof(*pgmap));
1005 /* TODO: register all vram to HMM for now.
1006 * should remove reserved size
1008 size = ALIGN(adev->gmc.real_vram_size, 2ULL << 20);
1009 if (adev->gmc.xgmi.connected_to_cpu) {
1010 pgmap->range.start = adev->gmc.aper_base;
1011 pgmap->range.end = adev->gmc.aper_base + adev->gmc.aper_size - 1;
1012 pgmap->type = MEMORY_DEVICE_COHERENT;
1014 res = devm_request_free_mem_region(adev->dev, &iomem_resource, size);
1017 pgmap->range.start = res->start;
1018 pgmap->range.end = res->end;
1019 pgmap->type = MEMORY_DEVICE_PRIVATE;
1022 pgmap->nr_range = 1;
1023 pgmap->ops = &svm_migrate_pgmap_ops;
1024 pgmap->owner = SVM_ADEV_PGMAP_OWNER(adev);
1026 /* Device manager releases device-specific resources, memory region and
1027 * pgmap when driver disconnects from device.
1029 r = devm_memremap_pages(adev->dev, pgmap);
1031 pr_err("failed to register HMM device memory\n");
1032 /* Disable SVM support capability */
1034 if (pgmap->type == MEMORY_DEVICE_PRIVATE)
1035 devm_release_mem_region(adev->dev, res->start, resource_size(res));
1039 pr_debug("reserve %ldMB system memory for VRAM pages struct\n",
1040 SVM_HMM_PAGE_STRUCT_SIZE(size) >> 20);
1042 amdgpu_amdkfd_reserve_system_mem(SVM_HMM_PAGE_STRUCT_SIZE(size));
1044 svm_range_set_max_pages(adev);
1046 pr_info("HMM registered %ldMB device memory\n", size >> 20);