2 * Copyright 2014-2018 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
22 #include <linux/dma-buf.h>
23 #include <linux/list.h>
24 #include <linux/pagemap.h>
25 #include <linux/sched/mm.h>
26 #include <linux/sched/task.h>
28 #include "amdgpu_object.h"
29 #include "amdgpu_gem.h"
30 #include "amdgpu_vm.h"
31 #include "amdgpu_amdkfd.h"
32 #include "amdgpu_dma_buf.h"
33 #include <uapi/linux/kfd_ioctl.h>
34 #include "amdgpu_xgmi.h"
36 /* Userptr restore delay, just long enough to allow consecutive VM
37 * changes to accumulate
39 #define AMDGPU_USERPTR_RESTORE_DELAY_MS 1
41 /* Impose limit on how much memory KFD can use */
43 uint64_t max_system_mem_limit;
44 uint64_t max_ttm_mem_limit;
45 int64_t system_mem_used;
47 spinlock_t mem_limit_lock;
50 /* Struct used for amdgpu_amdkfd_bo_validate */
51 struct amdgpu_vm_parser {
56 static const char * const domain_bit_to_string[] = {
65 #define domain_string(domain) domain_bit_to_string[ffs(domain)-1]
67 static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work);
70 static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
72 return (struct amdgpu_device *)kgd;
75 static bool kfd_mem_is_attached(struct amdgpu_vm *avm,
78 struct kfd_mem_attachment *entry;
80 list_for_each_entry(entry, &mem->attachments, list)
81 if (entry->bo_va->base.vm == avm)
87 /* Set memory usage limits. Current, limits are
88 * System (TTM + userptr) memory - 15/16th System RAM
89 * TTM memory - 3/8th System RAM
91 void amdgpu_amdkfd_gpuvm_init_mem_limits(void)
97 mem = si.freeram - si.freehigh;
100 spin_lock_init(&kfd_mem_limit.mem_limit_lock);
101 kfd_mem_limit.max_system_mem_limit = mem - (mem >> 4);
102 kfd_mem_limit.max_ttm_mem_limit = (mem >> 1) - (mem >> 3);
103 pr_debug("Kernel memory limit %lluM, TTM limit %lluM\n",
104 (kfd_mem_limit.max_system_mem_limit >> 20),
105 (kfd_mem_limit.max_ttm_mem_limit >> 20));
108 void amdgpu_amdkfd_reserve_system_mem(uint64_t size)
110 kfd_mem_limit.system_mem_used += size;
113 /* Estimate page table size needed to represent a given memory size
115 * With 4KB pages, we need one 8 byte PTE for each 4KB of memory
116 * (factor 512, >> 9). With 2MB pages, we need one 8 byte PTE for 2MB
117 * of memory (factor 256K, >> 18). ROCm user mode tries to optimize
118 * for 2MB pages for TLB efficiency. However, small allocations and
119 * fragmented system memory still need some 4KB pages. We choose a
120 * compromise that should work in most cases without reserving too
121 * much memory for page tables unnecessarily (factor 16K, >> 14).
123 #define ESTIMATE_PT_SIZE(mem_size) ((mem_size) >> 14)
125 static size_t amdgpu_amdkfd_acc_size(uint64_t size)
128 size *= sizeof(dma_addr_t) + sizeof(void *);
130 return __roundup_pow_of_two(sizeof(struct amdgpu_bo)) +
131 __roundup_pow_of_two(sizeof(struct ttm_tt)) +
135 static int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
136 uint64_t size, u32 domain, bool sg)
138 uint64_t reserved_for_pt =
139 ESTIMATE_PT_SIZE(amdgpu_amdkfd_total_mem_size);
140 size_t acc_size, system_mem_needed, ttm_mem_needed, vram_needed;
143 acc_size = amdgpu_amdkfd_acc_size(size);
146 if (domain == AMDGPU_GEM_DOMAIN_GTT) {
148 system_mem_needed = acc_size + size;
149 ttm_mem_needed = acc_size + size;
150 } else if (domain == AMDGPU_GEM_DOMAIN_CPU && !sg) {
152 system_mem_needed = acc_size + size;
153 ttm_mem_needed = acc_size;
156 system_mem_needed = acc_size;
157 ttm_mem_needed = acc_size;
158 if (domain == AMDGPU_GEM_DOMAIN_VRAM)
162 spin_lock(&kfd_mem_limit.mem_limit_lock);
164 if (kfd_mem_limit.system_mem_used + system_mem_needed >
165 kfd_mem_limit.max_system_mem_limit)
166 pr_debug("Set no_system_mem_limit=1 if using shared memory\n");
168 if ((kfd_mem_limit.system_mem_used + system_mem_needed >
169 kfd_mem_limit.max_system_mem_limit && !no_system_mem_limit) ||
170 (kfd_mem_limit.ttm_mem_used + ttm_mem_needed >
171 kfd_mem_limit.max_ttm_mem_limit) ||
172 (adev->kfd.vram_used + vram_needed >
173 adev->gmc.real_vram_size - reserved_for_pt)) {
176 kfd_mem_limit.system_mem_used += system_mem_needed;
177 kfd_mem_limit.ttm_mem_used += ttm_mem_needed;
178 adev->kfd.vram_used += vram_needed;
181 spin_unlock(&kfd_mem_limit.mem_limit_lock);
185 static void unreserve_mem_limit(struct amdgpu_device *adev,
186 uint64_t size, u32 domain, bool sg)
190 acc_size = amdgpu_amdkfd_acc_size(size);
192 spin_lock(&kfd_mem_limit.mem_limit_lock);
193 if (domain == AMDGPU_GEM_DOMAIN_GTT) {
194 kfd_mem_limit.system_mem_used -= (acc_size + size);
195 kfd_mem_limit.ttm_mem_used -= (acc_size + size);
196 } else if (domain == AMDGPU_GEM_DOMAIN_CPU && !sg) {
197 kfd_mem_limit.system_mem_used -= (acc_size + size);
198 kfd_mem_limit.ttm_mem_used -= acc_size;
200 kfd_mem_limit.system_mem_used -= acc_size;
201 kfd_mem_limit.ttm_mem_used -= acc_size;
202 if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
203 adev->kfd.vram_used -= size;
204 WARN_ONCE(adev->kfd.vram_used < 0,
205 "kfd VRAM memory accounting unbalanced");
208 WARN_ONCE(kfd_mem_limit.system_mem_used < 0,
209 "kfd system memory accounting unbalanced");
210 WARN_ONCE(kfd_mem_limit.ttm_mem_used < 0,
211 "kfd TTM memory accounting unbalanced");
213 spin_unlock(&kfd_mem_limit.mem_limit_lock);
216 void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo)
218 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
219 u32 domain = bo->preferred_domains;
220 bool sg = (bo->preferred_domains == AMDGPU_GEM_DOMAIN_CPU);
222 if (bo->flags & AMDGPU_AMDKFD_CREATE_USERPTR_BO) {
223 domain = AMDGPU_GEM_DOMAIN_CPU;
227 unreserve_mem_limit(adev, amdgpu_bo_size(bo), domain, sg);
231 /* amdgpu_amdkfd_remove_eviction_fence - Removes eviction fence from BO's
232 * reservation object.
234 * @bo: [IN] Remove eviction fence(s) from this BO
235 * @ef: [IN] This eviction fence is removed if it
236 * is present in the shared list.
238 * NOTE: Must be called with BO reserved i.e. bo->tbo.resv->lock held.
240 static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
241 struct amdgpu_amdkfd_fence *ef)
243 struct dma_resv *resv = bo->tbo.base.resv;
244 struct dma_resv_list *old, *new;
245 unsigned int i, j, k;
250 old = dma_resv_get_list(resv);
254 new = kmalloc(struct_size(new, shared, old->shared_max), GFP_KERNEL);
258 /* Go through all the shared fences in the resevation object and sort
259 * the interesting ones to the end of the list.
261 for (i = 0, j = old->shared_count, k = 0; i < old->shared_count; ++i) {
264 f = rcu_dereference_protected(old->shared[i],
265 dma_resv_held(resv));
267 if (f->context == ef->base.context)
268 RCU_INIT_POINTER(new->shared[--j], f);
270 RCU_INIT_POINTER(new->shared[k++], f);
272 new->shared_max = old->shared_max;
273 new->shared_count = k;
275 /* Install the new fence list, seqcount provides the barriers */
276 write_seqcount_begin(&resv->seq);
277 RCU_INIT_POINTER(resv->fence, new);
278 write_seqcount_end(&resv->seq);
280 /* Drop the references to the removed fences or move them to ef_list */
281 for (i = j; i < old->shared_count; ++i) {
284 f = rcu_dereference_protected(new->shared[i],
285 dma_resv_held(resv));
293 int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo)
295 struct amdgpu_bo *root = bo;
296 struct amdgpu_vm_bo_base *vm_bo;
297 struct amdgpu_vm *vm;
298 struct amdkfd_process_info *info;
299 struct amdgpu_amdkfd_fence *ef;
302 /* we can always get vm_bo from root PD bo.*/
314 info = vm->process_info;
315 if (!info || !info->eviction_fence)
318 ef = container_of(dma_fence_get(&info->eviction_fence->base),
319 struct amdgpu_amdkfd_fence, base);
321 BUG_ON(!dma_resv_trylock(bo->tbo.base.resv));
322 ret = amdgpu_amdkfd_remove_eviction_fence(bo, ef);
323 dma_resv_unlock(bo->tbo.base.resv);
325 dma_fence_put(&ef->base);
329 static int amdgpu_amdkfd_bo_validate(struct amdgpu_bo *bo, uint32_t domain,
332 struct ttm_operation_ctx ctx = { false, false };
335 if (WARN(amdgpu_ttm_tt_get_usermm(bo->tbo.ttm),
336 "Called with userptr BO"))
339 amdgpu_bo_placement_from_domain(bo, domain);
341 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
345 amdgpu_bo_sync_wait(bo, AMDGPU_FENCE_OWNER_KFD, false);
351 static int amdgpu_amdkfd_validate(void *param, struct amdgpu_bo *bo)
353 struct amdgpu_vm_parser *p = param;
355 return amdgpu_amdkfd_bo_validate(bo, p->domain, p->wait);
358 /* vm_validate_pt_pd_bos - Validate page table and directory BOs
360 * Page directories are not updated here because huge page handling
361 * during page table updates can invalidate page directory entries
362 * again. Page directories are only updated after updating page
365 static int vm_validate_pt_pd_bos(struct amdgpu_vm *vm)
367 struct amdgpu_bo *pd = vm->root.base.bo;
368 struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
369 struct amdgpu_vm_parser param;
372 param.domain = AMDGPU_GEM_DOMAIN_VRAM;
375 ret = amdgpu_vm_validate_pt_bos(adev, vm, amdgpu_amdkfd_validate,
378 pr_err("failed to validate PT BOs\n");
382 ret = amdgpu_amdkfd_validate(¶m, pd);
384 pr_err("failed to validate PD\n");
388 vm->pd_phys_addr = amdgpu_gmc_pd_addr(vm->root.base.bo);
390 if (vm->use_cpu_for_update) {
391 ret = amdgpu_bo_kmap(pd, NULL);
393 pr_err("failed to kmap PD, ret=%d\n", ret);
401 static int vm_update_pds(struct amdgpu_vm *vm, struct amdgpu_sync *sync)
403 struct amdgpu_bo *pd = vm->root.base.bo;
404 struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
407 ret = amdgpu_vm_update_pdes(adev, vm, false);
411 return amdgpu_sync_fence(sync, vm->last_update);
414 static uint64_t get_pte_flags(struct amdgpu_device *adev, struct kgd_mem *mem)
416 struct amdgpu_device *bo_adev = amdgpu_ttm_adev(mem->bo->tbo.bdev);
417 bool coherent = mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_COHERENT;
418 bool uncached = mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_UNCACHED;
419 uint32_t mapping_flags;
423 mapping_flags = AMDGPU_VM_PAGE_READABLE;
424 if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE)
425 mapping_flags |= AMDGPU_VM_PAGE_WRITEABLE;
426 if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE)
427 mapping_flags |= AMDGPU_VM_PAGE_EXECUTABLE;
429 switch (adev->asic_type) {
431 if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
433 mapping_flags |= coherent ?
434 AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW;
436 mapping_flags |= coherent ?
437 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
439 mapping_flags |= coherent ?
440 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
444 if (coherent && uncached) {
445 if (adev->gmc.xgmi.connected_to_cpu ||
446 !(mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM))
448 mapping_flags |= AMDGPU_VM_MTYPE_UC;
449 } else if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
450 if (bo_adev == adev) {
451 mapping_flags |= coherent ?
452 AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW;
453 if (adev->gmc.xgmi.connected_to_cpu)
456 mapping_flags |= coherent ?
457 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
458 if (amdgpu_xgmi_same_hive(adev, bo_adev))
463 mapping_flags |= coherent ?
464 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
468 mapping_flags |= coherent ?
469 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
472 pte_flags = amdgpu_gem_va_map_flags(adev, mapping_flags);
473 pte_flags |= snoop ? AMDGPU_PTE_SNOOPED : 0;
479 kfd_mem_dmamap_userptr(struct kgd_mem *mem,
480 struct kfd_mem_attachment *attachment)
482 enum dma_data_direction direction =
483 mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ?
484 DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
485 struct ttm_operation_ctx ctx = {.interruptible = true};
486 struct amdgpu_bo *bo = attachment->bo_va->base.bo;
487 struct amdgpu_device *adev = attachment->adev;
488 struct ttm_tt *src_ttm = mem->bo->tbo.ttm;
489 struct ttm_tt *ttm = bo->tbo.ttm;
492 ttm->sg = kmalloc(sizeof(*ttm->sg), GFP_KERNEL);
493 if (unlikely(!ttm->sg))
496 if (WARN_ON(ttm->num_pages != src_ttm->num_pages))
499 /* Same sequence as in amdgpu_ttm_tt_pin_userptr */
500 ret = sg_alloc_table_from_pages(ttm->sg, src_ttm->pages,
502 (u64)ttm->num_pages << PAGE_SHIFT,
507 ret = dma_map_sgtable(adev->dev, ttm->sg, direction, 0);
511 drm_prime_sg_to_dma_addr_array(ttm->sg, ttm->dma_address,
514 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
515 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
522 dma_unmap_sgtable(adev->dev, ttm->sg, direction, 0);
524 pr_err("DMA map userptr failed: %d\n", ret);
525 sg_free_table(ttm->sg);
533 kfd_mem_dmamap_dmabuf(struct kfd_mem_attachment *attachment)
535 struct ttm_operation_ctx ctx = {.interruptible = true};
536 struct amdgpu_bo *bo = attachment->bo_va->base.bo;
538 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
539 return ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
543 kfd_mem_dmamap_attachment(struct kgd_mem *mem,
544 struct kfd_mem_attachment *attachment)
546 switch (attachment->type) {
547 case KFD_MEM_ATT_SHARED:
549 case KFD_MEM_ATT_USERPTR:
550 return kfd_mem_dmamap_userptr(mem, attachment);
551 case KFD_MEM_ATT_DMABUF:
552 return kfd_mem_dmamap_dmabuf(attachment);
560 kfd_mem_dmaunmap_userptr(struct kgd_mem *mem,
561 struct kfd_mem_attachment *attachment)
563 enum dma_data_direction direction =
564 mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ?
565 DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
566 struct ttm_operation_ctx ctx = {.interruptible = false};
567 struct amdgpu_bo *bo = attachment->bo_va->base.bo;
568 struct amdgpu_device *adev = attachment->adev;
569 struct ttm_tt *ttm = bo->tbo.ttm;
571 if (unlikely(!ttm->sg))
574 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
575 ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
577 dma_unmap_sgtable(adev->dev, ttm->sg, direction, 0);
578 sg_free_table(ttm->sg);
583 kfd_mem_dmaunmap_dmabuf(struct kfd_mem_attachment *attachment)
585 struct ttm_operation_ctx ctx = {.interruptible = true};
586 struct amdgpu_bo *bo = attachment->bo_va->base.bo;
588 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
589 ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
593 kfd_mem_dmaunmap_attachment(struct kgd_mem *mem,
594 struct kfd_mem_attachment *attachment)
596 switch (attachment->type) {
597 case KFD_MEM_ATT_SHARED:
599 case KFD_MEM_ATT_USERPTR:
600 kfd_mem_dmaunmap_userptr(mem, attachment);
602 case KFD_MEM_ATT_DMABUF:
603 kfd_mem_dmaunmap_dmabuf(attachment);
611 kfd_mem_attach_userptr(struct amdgpu_device *adev, struct kgd_mem *mem,
612 struct amdgpu_bo **bo)
614 unsigned long bo_size = mem->bo->tbo.base.size;
615 struct drm_gem_object *gobj;
618 ret = amdgpu_bo_reserve(mem->bo, false);
622 ret = amdgpu_gem_object_create(adev, bo_size, 1,
623 AMDGPU_GEM_DOMAIN_CPU,
624 AMDGPU_GEM_CREATE_PREEMPTIBLE,
625 ttm_bo_type_sg, mem->bo->tbo.base.resv,
627 amdgpu_bo_unreserve(mem->bo);
631 *bo = gem_to_amdgpu_bo(gobj);
632 (*bo)->parent = amdgpu_bo_ref(mem->bo);
638 kfd_mem_attach_dmabuf(struct amdgpu_device *adev, struct kgd_mem *mem,
639 struct amdgpu_bo **bo)
641 struct drm_gem_object *gobj;
645 mem->dmabuf = amdgpu_gem_prime_export(&mem->bo->tbo.base,
646 mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ?
648 if (IS_ERR(mem->dmabuf)) {
649 ret = PTR_ERR(mem->dmabuf);
655 gobj = amdgpu_gem_prime_import(&adev->ddev, mem->dmabuf);
657 return PTR_ERR(gobj);
659 /* Import takes an extra reference on the dmabuf. Drop it now to
660 * avoid leaking it. We only need the one reference in
663 dma_buf_put(mem->dmabuf);
665 *bo = gem_to_amdgpu_bo(gobj);
666 (*bo)->flags |= AMDGPU_GEM_CREATE_PREEMPTIBLE;
667 (*bo)->parent = amdgpu_bo_ref(mem->bo);
672 /* kfd_mem_attach - Add a BO to a VM
674 * Everything that needs to bo done only once when a BO is first added
675 * to a VM. It can later be mapped and unmapped many times without
676 * repeating these steps.
678 * 0. Create BO for DMA mapping, if needed
679 * 1. Allocate and initialize BO VA entry data structure
680 * 2. Add BO to the VM
681 * 3. Determine ASIC-specific PTE flags
682 * 4. Alloc page tables and directories if needed
683 * 4a. Validate new page tables and directories
685 static int kfd_mem_attach(struct amdgpu_device *adev, struct kgd_mem *mem,
686 struct amdgpu_vm *vm, bool is_aql)
688 struct amdgpu_device *bo_adev = amdgpu_ttm_adev(mem->bo->tbo.bdev);
689 unsigned long bo_size = mem->bo->tbo.base.size;
690 uint64_t va = mem->va;
691 struct kfd_mem_attachment *attachment[2] = {NULL, NULL};
692 struct amdgpu_bo *bo[2] = {NULL, NULL};
696 pr_err("Invalid VA when adding BO to VM\n");
700 for (i = 0; i <= is_aql; i++) {
701 attachment[i] = kzalloc(sizeof(*attachment[i]), GFP_KERNEL);
702 if (unlikely(!attachment[i])) {
707 pr_debug("\t add VA 0x%llx - 0x%llx to vm %p\n", va,
710 if (adev == bo_adev || (mem->domain == AMDGPU_GEM_DOMAIN_VRAM &&
711 amdgpu_xgmi_same_hive(adev, bo_adev))) {
712 /* Mappings on the local GPU and VRAM mappings in the
713 * local hive share the original BO
715 attachment[i]->type = KFD_MEM_ATT_SHARED;
717 drm_gem_object_get(&bo[i]->tbo.base);
719 /* Multiple mappings on the same GPU share the BO */
720 attachment[i]->type = KFD_MEM_ATT_SHARED;
722 drm_gem_object_get(&bo[i]->tbo.base);
723 } else if (amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm)) {
724 /* Create an SG BO to DMA-map userptrs on other GPUs */
725 attachment[i]->type = KFD_MEM_ATT_USERPTR;
726 ret = kfd_mem_attach_userptr(adev, mem, &bo[i]);
729 } else if (mem->domain == AMDGPU_GEM_DOMAIN_GTT &&
730 mem->bo->tbo.type != ttm_bo_type_sg) {
731 /* GTT BOs use DMA-mapping ability of dynamic-attach
732 * DMA bufs. TODO: The same should work for VRAM on
735 attachment[i]->type = KFD_MEM_ATT_DMABUF;
736 ret = kfd_mem_attach_dmabuf(adev, mem, &bo[i]);
740 /* FIXME: Need to DMA-map other BO types:
741 * large-BAR VRAM, doorbells, MMIO remap
743 attachment[i]->type = KFD_MEM_ATT_SHARED;
745 drm_gem_object_get(&bo[i]->tbo.base);
748 /* Add BO to VM internal data structures */
749 attachment[i]->bo_va = amdgpu_vm_bo_add(adev, vm, bo[i]);
750 if (unlikely(!attachment[i]->bo_va)) {
752 pr_err("Failed to add BO object to VM. ret == %d\n",
757 attachment[i]->va = va;
758 attachment[i]->pte_flags = get_pte_flags(adev, mem);
759 attachment[i]->adev = adev;
760 list_add(&attachment[i]->list, &mem->attachments);
768 for (; i >= 0; i--) {
771 if (attachment[i]->bo_va) {
772 amdgpu_vm_bo_rmv(adev, attachment[i]->bo_va);
773 list_del(&attachment[i]->list);
776 drm_gem_object_put(&bo[i]->tbo.base);
777 kfree(attachment[i]);
782 static void kfd_mem_detach(struct kfd_mem_attachment *attachment)
784 struct amdgpu_bo *bo = attachment->bo_va->base.bo;
786 pr_debug("\t remove VA 0x%llx in entry %p\n",
787 attachment->va, attachment);
788 amdgpu_vm_bo_rmv(attachment->adev, attachment->bo_va);
789 drm_gem_object_put(&bo->tbo.base);
790 list_del(&attachment->list);
794 static void add_kgd_mem_to_kfd_bo_list(struct kgd_mem *mem,
795 struct amdkfd_process_info *process_info,
798 struct ttm_validate_buffer *entry = &mem->validate_list;
799 struct amdgpu_bo *bo = mem->bo;
801 INIT_LIST_HEAD(&entry->head);
802 entry->num_shared = 1;
803 entry->bo = &bo->tbo;
804 mutex_lock(&process_info->lock);
806 list_add_tail(&entry->head, &process_info->userptr_valid_list);
808 list_add_tail(&entry->head, &process_info->kfd_bo_list);
809 mutex_unlock(&process_info->lock);
812 static void remove_kgd_mem_from_kfd_bo_list(struct kgd_mem *mem,
813 struct amdkfd_process_info *process_info)
815 struct ttm_validate_buffer *bo_list_entry;
817 bo_list_entry = &mem->validate_list;
818 mutex_lock(&process_info->lock);
819 list_del(&bo_list_entry->head);
820 mutex_unlock(&process_info->lock);
823 /* Initializes user pages. It registers the MMU notifier and validates
824 * the userptr BO in the GTT domain.
826 * The BO must already be on the userptr_valid_list. Otherwise an
827 * eviction and restore may happen that leaves the new BO unmapped
828 * with the user mode queues running.
830 * Takes the process_info->lock to protect against concurrent restore
833 * Returns 0 for success, negative errno for errors.
835 static int init_user_pages(struct kgd_mem *mem, uint64_t user_addr)
837 struct amdkfd_process_info *process_info = mem->process_info;
838 struct amdgpu_bo *bo = mem->bo;
839 struct ttm_operation_ctx ctx = { true, false };
842 mutex_lock(&process_info->lock);
844 ret = amdgpu_ttm_tt_set_userptr(&bo->tbo, user_addr, 0);
846 pr_err("%s: Failed to set userptr: %d\n", __func__, ret);
850 ret = amdgpu_mn_register(bo, user_addr);
852 pr_err("%s: Failed to register MMU notifier: %d\n",
857 ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages);
859 pr_err("%s: Failed to get user pages: %d\n", __func__, ret);
863 ret = amdgpu_bo_reserve(bo, true);
865 pr_err("%s: Failed to reserve BO\n", __func__);
868 amdgpu_bo_placement_from_domain(bo, mem->domain);
869 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
871 pr_err("%s: failed to validate BO\n", __func__);
872 amdgpu_bo_unreserve(bo);
875 amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
878 amdgpu_mn_unregister(bo);
880 mutex_unlock(&process_info->lock);
884 /* Reserving a BO and its page table BOs must happen atomically to
885 * avoid deadlocks. Some operations update multiple VMs at once. Track
886 * all the reservation info in a context structure. Optionally a sync
887 * object can track VM updates.
889 struct bo_vm_reservation_context {
890 struct amdgpu_bo_list_entry kfd_bo; /* BO list entry for the KFD BO */
891 unsigned int n_vms; /* Number of VMs reserved */
892 struct amdgpu_bo_list_entry *vm_pd; /* Array of VM BO list entries */
893 struct ww_acquire_ctx ticket; /* Reservation ticket */
894 struct list_head list, duplicates; /* BO lists */
895 struct amdgpu_sync *sync; /* Pointer to sync object */
896 bool reserved; /* Whether BOs are reserved */
900 BO_VM_NOT_MAPPED = 0, /* Match VMs where a BO is not mapped */
901 BO_VM_MAPPED, /* Match VMs where a BO is mapped */
902 BO_VM_ALL, /* Match all VMs a BO was added to */
906 * reserve_bo_and_vm - reserve a BO and a VM unconditionally.
907 * @mem: KFD BO structure.
908 * @vm: the VM to reserve.
909 * @ctx: the struct that will be used in unreserve_bo_and_vms().
911 static int reserve_bo_and_vm(struct kgd_mem *mem,
912 struct amdgpu_vm *vm,
913 struct bo_vm_reservation_context *ctx)
915 struct amdgpu_bo *bo = mem->bo;
920 ctx->reserved = false;
922 ctx->sync = &mem->sync;
924 INIT_LIST_HEAD(&ctx->list);
925 INIT_LIST_HEAD(&ctx->duplicates);
927 ctx->vm_pd = kcalloc(ctx->n_vms, sizeof(*ctx->vm_pd), GFP_KERNEL);
931 ctx->kfd_bo.priority = 0;
932 ctx->kfd_bo.tv.bo = &bo->tbo;
933 ctx->kfd_bo.tv.num_shared = 1;
934 list_add(&ctx->kfd_bo.tv.head, &ctx->list);
936 amdgpu_vm_get_pd_bo(vm, &ctx->list, &ctx->vm_pd[0]);
938 ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list,
939 false, &ctx->duplicates);
941 pr_err("Failed to reserve buffers in ttm.\n");
947 ctx->reserved = true;
952 * reserve_bo_and_cond_vms - reserve a BO and some VMs conditionally
953 * @mem: KFD BO structure.
954 * @vm: the VM to reserve. If NULL, then all VMs associated with the BO
955 * is used. Otherwise, a single VM associated with the BO.
956 * @map_type: the mapping status that will be used to filter the VMs.
957 * @ctx: the struct that will be used in unreserve_bo_and_vms().
959 * Returns 0 for success, negative for failure.
961 static int reserve_bo_and_cond_vms(struct kgd_mem *mem,
962 struct amdgpu_vm *vm, enum bo_vm_match map_type,
963 struct bo_vm_reservation_context *ctx)
965 struct amdgpu_bo *bo = mem->bo;
966 struct kfd_mem_attachment *entry;
970 ctx->reserved = false;
973 ctx->sync = &mem->sync;
975 INIT_LIST_HEAD(&ctx->list);
976 INIT_LIST_HEAD(&ctx->duplicates);
978 list_for_each_entry(entry, &mem->attachments, list) {
979 if ((vm && vm != entry->bo_va->base.vm) ||
980 (entry->is_mapped != map_type
981 && map_type != BO_VM_ALL))
987 if (ctx->n_vms != 0) {
988 ctx->vm_pd = kcalloc(ctx->n_vms, sizeof(*ctx->vm_pd),
994 ctx->kfd_bo.priority = 0;
995 ctx->kfd_bo.tv.bo = &bo->tbo;
996 ctx->kfd_bo.tv.num_shared = 1;
997 list_add(&ctx->kfd_bo.tv.head, &ctx->list);
1000 list_for_each_entry(entry, &mem->attachments, list) {
1001 if ((vm && vm != entry->bo_va->base.vm) ||
1002 (entry->is_mapped != map_type
1003 && map_type != BO_VM_ALL))
1006 amdgpu_vm_get_pd_bo(entry->bo_va->base.vm, &ctx->list,
1011 ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list,
1012 false, &ctx->duplicates);
1014 pr_err("Failed to reserve buffers in ttm.\n");
1020 ctx->reserved = true;
1025 * unreserve_bo_and_vms - Unreserve BO and VMs from a reservation context
1026 * @ctx: Reservation context to unreserve
1027 * @wait: Optionally wait for a sync object representing pending VM updates
1028 * @intr: Whether the wait is interruptible
1030 * Also frees any resources allocated in
1031 * reserve_bo_and_(cond_)vm(s). Returns the status from
1034 static int unreserve_bo_and_vms(struct bo_vm_reservation_context *ctx,
1035 bool wait, bool intr)
1040 ret = amdgpu_sync_wait(ctx->sync, intr);
1043 ttm_eu_backoff_reservation(&ctx->ticket, &ctx->list);
1048 ctx->reserved = false;
1054 static void unmap_bo_from_gpuvm(struct kgd_mem *mem,
1055 struct kfd_mem_attachment *entry,
1056 struct amdgpu_sync *sync)
1058 struct amdgpu_bo_va *bo_va = entry->bo_va;
1059 struct amdgpu_device *adev = entry->adev;
1060 struct amdgpu_vm *vm = bo_va->base.vm;
1062 amdgpu_vm_bo_unmap(adev, bo_va, entry->va);
1064 amdgpu_vm_clear_freed(adev, vm, &bo_va->last_pt_update);
1066 amdgpu_sync_fence(sync, bo_va->last_pt_update);
1068 kfd_mem_dmaunmap_attachment(mem, entry);
1071 static int update_gpuvm_pte(struct kgd_mem *mem,
1072 struct kfd_mem_attachment *entry,
1073 struct amdgpu_sync *sync,
1076 struct amdgpu_bo_va *bo_va = entry->bo_va;
1077 struct amdgpu_device *adev = entry->adev;
1080 ret = kfd_mem_dmamap_attachment(mem, entry);
1084 /* Update the page tables */
1085 ret = amdgpu_vm_bo_update(adev, bo_va, false, table_freed);
1087 pr_err("amdgpu_vm_bo_update failed\n");
1091 return amdgpu_sync_fence(sync, bo_va->last_pt_update);
1094 static int map_bo_to_gpuvm(struct kgd_mem *mem,
1095 struct kfd_mem_attachment *entry,
1096 struct amdgpu_sync *sync,
1102 /* Set virtual address for the allocation */
1103 ret = amdgpu_vm_bo_map(entry->adev, entry->bo_va, entry->va, 0,
1104 amdgpu_bo_size(entry->bo_va->base.bo),
1107 pr_err("Failed to map VA 0x%llx in vm. ret %d\n",
1115 ret = update_gpuvm_pte(mem, entry, sync, table_freed);
1117 pr_err("update_gpuvm_pte() failed\n");
1118 goto update_gpuvm_pte_failed;
1123 update_gpuvm_pte_failed:
1124 unmap_bo_from_gpuvm(mem, entry, sync);
1128 static struct sg_table *create_doorbell_sg(uint64_t addr, uint32_t size)
1130 struct sg_table *sg = kmalloc(sizeof(*sg), GFP_KERNEL);
1134 if (sg_alloc_table(sg, 1, GFP_KERNEL)) {
1138 sg->sgl->dma_address = addr;
1139 sg->sgl->length = size;
1140 #ifdef CONFIG_NEED_SG_DMA_LENGTH
1141 sg->sgl->dma_length = size;
1146 static int process_validate_vms(struct amdkfd_process_info *process_info)
1148 struct amdgpu_vm *peer_vm;
1151 list_for_each_entry(peer_vm, &process_info->vm_list_head,
1153 ret = vm_validate_pt_pd_bos(peer_vm);
1161 static int process_sync_pds_resv(struct amdkfd_process_info *process_info,
1162 struct amdgpu_sync *sync)
1164 struct amdgpu_vm *peer_vm;
1167 list_for_each_entry(peer_vm, &process_info->vm_list_head,
1169 struct amdgpu_bo *pd = peer_vm->root.base.bo;
1171 ret = amdgpu_sync_resv(NULL, sync, pd->tbo.base.resv,
1172 AMDGPU_SYNC_NE_OWNER,
1173 AMDGPU_FENCE_OWNER_KFD);
1181 static int process_update_pds(struct amdkfd_process_info *process_info,
1182 struct amdgpu_sync *sync)
1184 struct amdgpu_vm *peer_vm;
1187 list_for_each_entry(peer_vm, &process_info->vm_list_head,
1189 ret = vm_update_pds(peer_vm, sync);
1197 static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info,
1198 struct dma_fence **ef)
1200 struct amdkfd_process_info *info = NULL;
1203 if (!*process_info) {
1204 info = kzalloc(sizeof(*info), GFP_KERNEL);
1208 mutex_init(&info->lock);
1209 INIT_LIST_HEAD(&info->vm_list_head);
1210 INIT_LIST_HEAD(&info->kfd_bo_list);
1211 INIT_LIST_HEAD(&info->userptr_valid_list);
1212 INIT_LIST_HEAD(&info->userptr_inval_list);
1214 info->eviction_fence =
1215 amdgpu_amdkfd_fence_create(dma_fence_context_alloc(1),
1218 if (!info->eviction_fence) {
1219 pr_err("Failed to create eviction fence\n");
1221 goto create_evict_fence_fail;
1224 info->pid = get_task_pid(current->group_leader, PIDTYPE_PID);
1225 atomic_set(&info->evicted_bos, 0);
1226 INIT_DELAYED_WORK(&info->restore_userptr_work,
1227 amdgpu_amdkfd_restore_userptr_worker);
1229 *process_info = info;
1230 *ef = dma_fence_get(&info->eviction_fence->base);
1233 vm->process_info = *process_info;
1235 /* Validate page directory and attach eviction fence */
1236 ret = amdgpu_bo_reserve(vm->root.base.bo, true);
1238 goto reserve_pd_fail;
1239 ret = vm_validate_pt_pd_bos(vm);
1241 pr_err("validate_pt_pd_bos() failed\n");
1242 goto validate_pd_fail;
1244 ret = amdgpu_bo_sync_wait(vm->root.base.bo,
1245 AMDGPU_FENCE_OWNER_KFD, false);
1248 ret = dma_resv_reserve_shared(vm->root.base.bo->tbo.base.resv, 1);
1250 goto reserve_shared_fail;
1251 amdgpu_bo_fence(vm->root.base.bo,
1252 &vm->process_info->eviction_fence->base, true);
1253 amdgpu_bo_unreserve(vm->root.base.bo);
1255 /* Update process info */
1256 mutex_lock(&vm->process_info->lock);
1257 list_add_tail(&vm->vm_list_node,
1258 &(vm->process_info->vm_list_head));
1259 vm->process_info->n_vms++;
1260 mutex_unlock(&vm->process_info->lock);
1264 reserve_shared_fail:
1267 amdgpu_bo_unreserve(vm->root.base.bo);
1269 vm->process_info = NULL;
1271 /* Two fence references: one in info and one in *ef */
1272 dma_fence_put(&info->eviction_fence->base);
1275 *process_info = NULL;
1277 create_evict_fence_fail:
1278 mutex_destroy(&info->lock);
1284 int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct kgd_dev *kgd,
1285 struct file *filp, u32 pasid,
1286 void **process_info,
1287 struct dma_fence **ef)
1289 struct amdgpu_device *adev = get_amdgpu_device(kgd);
1290 struct amdgpu_fpriv *drv_priv;
1291 struct amdgpu_vm *avm;
1294 ret = amdgpu_file_to_fpriv(filp, &drv_priv);
1297 avm = &drv_priv->vm;
1299 /* Already a compute VM? */
1300 if (avm->process_info)
1303 /* Convert VM into a compute VM */
1304 ret = amdgpu_vm_make_compute(adev, avm, pasid);
1308 /* Initialize KFD part of the VM and process info */
1309 ret = init_kfd_vm(avm, process_info, ef);
1313 amdgpu_vm_set_task_info(avm);
1318 void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
1319 struct amdgpu_vm *vm)
1321 struct amdkfd_process_info *process_info = vm->process_info;
1322 struct amdgpu_bo *pd = vm->root.base.bo;
1327 /* Release eviction fence from PD */
1328 amdgpu_bo_reserve(pd, false);
1329 amdgpu_bo_fence(pd, NULL, false);
1330 amdgpu_bo_unreserve(pd);
1332 /* Update process info */
1333 mutex_lock(&process_info->lock);
1334 process_info->n_vms--;
1335 list_del(&vm->vm_list_node);
1336 mutex_unlock(&process_info->lock);
1338 vm->process_info = NULL;
1340 /* Release per-process resources when last compute VM is destroyed */
1341 if (!process_info->n_vms) {
1342 WARN_ON(!list_empty(&process_info->kfd_bo_list));
1343 WARN_ON(!list_empty(&process_info->userptr_valid_list));
1344 WARN_ON(!list_empty(&process_info->userptr_inval_list));
1346 dma_fence_put(&process_info->eviction_fence->base);
1347 cancel_delayed_work_sync(&process_info->restore_userptr_work);
1348 put_pid(process_info->pid);
1349 mutex_destroy(&process_info->lock);
1350 kfree(process_info);
1354 void amdgpu_amdkfd_gpuvm_release_process_vm(struct kgd_dev *kgd, void *drm_priv)
1356 struct amdgpu_device *adev = get_amdgpu_device(kgd);
1357 struct amdgpu_vm *avm;
1359 if (WARN_ON(!kgd || !drm_priv))
1362 avm = drm_priv_to_vm(drm_priv);
1364 pr_debug("Releasing process vm %p\n", avm);
1366 /* The original pasid of amdgpu vm has already been
1367 * released during making a amdgpu vm to a compute vm
1368 * The current pasid is managed by kfd and will be
1369 * released on kfd process destroy. Set amdgpu pasid
1370 * to 0 to avoid duplicate release.
1372 amdgpu_vm_release_compute(adev, avm);
1375 uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *drm_priv)
1377 struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
1378 struct amdgpu_bo *pd = avm->root.base.bo;
1379 struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
1381 if (adev->asic_type < CHIP_VEGA10)
1382 return avm->pd_phys_addr >> AMDGPU_GPU_PAGE_SHIFT;
1383 return avm->pd_phys_addr;
1386 int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
1387 struct kgd_dev *kgd, uint64_t va, uint64_t size,
1388 void *drm_priv, struct kgd_mem **mem,
1389 uint64_t *offset, uint32_t flags)
1391 struct amdgpu_device *adev = get_amdgpu_device(kgd);
1392 struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
1393 enum ttm_bo_type bo_type = ttm_bo_type_device;
1394 struct sg_table *sg = NULL;
1395 uint64_t user_addr = 0;
1396 struct amdgpu_bo *bo;
1397 struct drm_gem_object *gobj;
1398 u32 domain, alloc_domain;
1403 * Check on which domain to allocate BO
1405 if (flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
1406 domain = alloc_domain = AMDGPU_GEM_DOMAIN_VRAM;
1407 alloc_flags = AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE;
1408 alloc_flags |= (flags & KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC) ?
1409 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED :
1410 AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
1411 } else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_GTT) {
1412 domain = alloc_domain = AMDGPU_GEM_DOMAIN_GTT;
1414 } else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) {
1415 domain = AMDGPU_GEM_DOMAIN_GTT;
1416 alloc_domain = AMDGPU_GEM_DOMAIN_CPU;
1417 alloc_flags = AMDGPU_GEM_CREATE_PREEMPTIBLE;
1418 if (!offset || !*offset)
1420 user_addr = untagged_addr(*offset);
1421 } else if (flags & (KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL |
1422 KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)) {
1423 domain = AMDGPU_GEM_DOMAIN_GTT;
1424 alloc_domain = AMDGPU_GEM_DOMAIN_CPU;
1425 bo_type = ttm_bo_type_sg;
1427 if (size > UINT_MAX)
1429 sg = create_doorbell_sg(*offset, size);
1436 *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
1441 INIT_LIST_HEAD(&(*mem)->attachments);
1442 mutex_init(&(*mem)->lock);
1443 (*mem)->aql_queue = !!(flags & KFD_IOC_ALLOC_MEM_FLAGS_AQL_QUEUE_MEM);
1445 /* Workaround for AQL queue wraparound bug. Map the same
1446 * memory twice. That means we only actually allocate half
1449 if ((*mem)->aql_queue)
1452 (*mem)->alloc_flags = flags;
1454 amdgpu_sync_create(&(*mem)->sync);
1456 ret = amdgpu_amdkfd_reserve_mem_limit(adev, size, alloc_domain, !!sg);
1458 pr_debug("Insufficient memory\n");
1459 goto err_reserve_limit;
1462 pr_debug("\tcreate BO VA 0x%llx size 0x%llx domain %s\n",
1463 va, size, domain_string(alloc_domain));
1465 ret = amdgpu_gem_object_create(adev, size, 1, alloc_domain, alloc_flags,
1466 bo_type, NULL, &gobj);
1468 pr_debug("Failed to create BO on domain %s. ret %d\n",
1469 domain_string(alloc_domain), ret);
1472 ret = drm_vma_node_allow(&gobj->vma_node, drm_priv);
1474 pr_debug("Failed to allow vma node access. ret %d\n", ret);
1475 goto err_node_allow;
1477 bo = gem_to_amdgpu_bo(gobj);
1478 if (bo_type == ttm_bo_type_sg) {
1480 bo->tbo.ttm->sg = sg;
1485 bo->flags |= AMDGPU_AMDKFD_CREATE_USERPTR_BO;
1488 (*mem)->domain = domain;
1489 (*mem)->mapped_to_gpu_memory = 0;
1490 (*mem)->process_info = avm->process_info;
1491 add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, user_addr);
1494 ret = init_user_pages(*mem, user_addr);
1496 goto allocate_init_user_pages_failed;
1500 *offset = amdgpu_bo_mmap_offset(bo);
1504 allocate_init_user_pages_failed:
1505 remove_kgd_mem_from_kfd_bo_list(*mem, avm->process_info);
1506 drm_vma_node_revoke(&gobj->vma_node, drm_priv);
1508 amdgpu_bo_unref(&bo);
1509 /* Don't unreserve system mem limit twice */
1510 goto err_reserve_limit;
1512 unreserve_mem_limit(adev, size, alloc_domain, !!sg);
1514 mutex_destroy(&(*mem)->lock);
1524 int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
1525 struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv,
1528 struct amdkfd_process_info *process_info = mem->process_info;
1529 unsigned long bo_size = mem->bo->tbo.base.size;
1530 struct kfd_mem_attachment *entry, *tmp;
1531 struct bo_vm_reservation_context ctx;
1532 struct ttm_validate_buffer *bo_list_entry;
1533 unsigned int mapped_to_gpu_memory;
1535 bool is_imported = false;
1537 mutex_lock(&mem->lock);
1538 mapped_to_gpu_memory = mem->mapped_to_gpu_memory;
1539 is_imported = mem->is_imported;
1540 mutex_unlock(&mem->lock);
1541 /* lock is not needed after this, since mem is unused and will
1545 if (mapped_to_gpu_memory > 0) {
1546 pr_debug("BO VA 0x%llx size 0x%lx is still mapped.\n",
1551 /* Make sure restore workers don't access the BO any more */
1552 bo_list_entry = &mem->validate_list;
1553 mutex_lock(&process_info->lock);
1554 list_del(&bo_list_entry->head);
1555 mutex_unlock(&process_info->lock);
1557 /* No more MMU notifiers */
1558 amdgpu_mn_unregister(mem->bo);
1560 ret = reserve_bo_and_cond_vms(mem, NULL, BO_VM_ALL, &ctx);
1564 /* The eviction fence should be removed by the last unmap.
1565 * TODO: Log an error condition if the bo still has the eviction fence
1568 amdgpu_amdkfd_remove_eviction_fence(mem->bo,
1569 process_info->eviction_fence);
1570 pr_debug("Release VA 0x%llx - 0x%llx\n", mem->va,
1571 mem->va + bo_size * (1 + mem->aql_queue));
1573 ret = unreserve_bo_and_vms(&ctx, false, false);
1575 /* Remove from VM internal data structures */
1576 list_for_each_entry_safe(entry, tmp, &mem->attachments, list)
1577 kfd_mem_detach(entry);
1579 /* Free the sync object */
1580 amdgpu_sync_free(&mem->sync);
1582 /* If the SG is not NULL, it's one we created for a doorbell or mmio
1583 * remap BO. We need to free it.
1585 if (mem->bo->tbo.sg) {
1586 sg_free_table(mem->bo->tbo.sg);
1587 kfree(mem->bo->tbo.sg);
1590 /* Update the size of the BO being freed if it was allocated from
1591 * VRAM and is not imported.
1594 if ((mem->bo->preferred_domains == AMDGPU_GEM_DOMAIN_VRAM) &&
1602 drm_vma_node_revoke(&mem->bo->tbo.base.vma_node, drm_priv);
1604 dma_buf_put(mem->dmabuf);
1605 drm_gem_object_put(&mem->bo->tbo.base);
1606 mutex_destroy(&mem->lock);
1612 int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
1613 struct kgd_dev *kgd, struct kgd_mem *mem,
1614 void *drm_priv, bool *table_freed)
1616 struct amdgpu_device *adev = get_amdgpu_device(kgd);
1617 struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
1619 struct amdgpu_bo *bo;
1621 struct kfd_mem_attachment *entry;
1622 struct bo_vm_reservation_context ctx;
1623 unsigned long bo_size;
1624 bool is_invalid_userptr = false;
1628 pr_err("Invalid BO when mapping memory to GPU\n");
1632 /* Make sure restore is not running concurrently. Since we
1633 * don't map invalid userptr BOs, we rely on the next restore
1634 * worker to do the mapping
1636 mutex_lock(&mem->process_info->lock);
1638 /* Lock mmap-sem. If we find an invalid userptr BO, we can be
1639 * sure that the MMU notifier is no longer running
1640 * concurrently and the queues are actually stopped
1642 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
1643 mmap_write_lock(current->mm);
1644 is_invalid_userptr = atomic_read(&mem->invalid);
1645 mmap_write_unlock(current->mm);
1648 mutex_lock(&mem->lock);
1650 domain = mem->domain;
1651 bo_size = bo->tbo.base.size;
1653 pr_debug("Map VA 0x%llx - 0x%llx to vm %p domain %s\n",
1655 mem->va + bo_size * (1 + mem->aql_queue),
1656 avm, domain_string(domain));
1658 if (!kfd_mem_is_attached(avm, mem)) {
1659 ret = kfd_mem_attach(adev, mem, avm, mem->aql_queue);
1664 ret = reserve_bo_and_vm(mem, avm, &ctx);
1668 /* Userptr can be marked as "not invalid", but not actually be
1669 * validated yet (still in the system domain). In that case
1670 * the queues are still stopped and we can leave mapping for
1671 * the next restore worker
1673 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) &&
1674 bo->tbo.mem.mem_type == TTM_PL_SYSTEM)
1675 is_invalid_userptr = true;
1677 ret = vm_validate_pt_pd_bos(avm);
1681 if (mem->mapped_to_gpu_memory == 0 &&
1682 !amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
1683 /* Validate BO only once. The eviction fence gets added to BO
1684 * the first time it is mapped. Validate will wait for all
1685 * background evictions to complete.
1687 ret = amdgpu_amdkfd_bo_validate(bo, domain, true);
1689 pr_debug("Validate failed\n");
1694 list_for_each_entry(entry, &mem->attachments, list) {
1695 if (entry->bo_va->base.vm != avm || entry->is_mapped)
1698 pr_debug("\t map VA 0x%llx - 0x%llx in entry %p\n",
1699 entry->va, entry->va + bo_size, entry);
1701 ret = map_bo_to_gpuvm(mem, entry, ctx.sync,
1702 is_invalid_userptr, table_freed);
1704 pr_err("Failed to map bo to gpuvm\n");
1708 ret = vm_update_pds(avm, ctx.sync);
1710 pr_err("Failed to update page directories\n");
1714 entry->is_mapped = true;
1715 mem->mapped_to_gpu_memory++;
1716 pr_debug("\t INC mapping count %d\n",
1717 mem->mapped_to_gpu_memory);
1720 if (!amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) && !bo->tbo.pin_count)
1722 &avm->process_info->eviction_fence->base,
1724 ret = unreserve_bo_and_vms(&ctx, false, false);
1729 unreserve_bo_and_vms(&ctx, false, false);
1731 mutex_unlock(&mem->process_info->lock);
1732 mutex_unlock(&mem->lock);
1736 int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
1737 struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv)
1739 struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
1740 struct amdkfd_process_info *process_info = avm->process_info;
1741 unsigned long bo_size = mem->bo->tbo.base.size;
1742 struct kfd_mem_attachment *entry;
1743 struct bo_vm_reservation_context ctx;
1746 mutex_lock(&mem->lock);
1748 ret = reserve_bo_and_cond_vms(mem, avm, BO_VM_MAPPED, &ctx);
1751 /* If no VMs were reserved, it means the BO wasn't actually mapped */
1752 if (ctx.n_vms == 0) {
1757 ret = vm_validate_pt_pd_bos(avm);
1761 pr_debug("Unmap VA 0x%llx - 0x%llx from vm %p\n",
1763 mem->va + bo_size * (1 + mem->aql_queue),
1766 list_for_each_entry(entry, &mem->attachments, list) {
1767 if (entry->bo_va->base.vm != avm || !entry->is_mapped)
1770 pr_debug("\t unmap VA 0x%llx - 0x%llx from entry %p\n",
1771 entry->va, entry->va + bo_size, entry);
1773 unmap_bo_from_gpuvm(mem, entry, ctx.sync);
1774 entry->is_mapped = false;
1776 mem->mapped_to_gpu_memory--;
1777 pr_debug("\t DEC mapping count %d\n",
1778 mem->mapped_to_gpu_memory);
1781 /* If BO is unmapped from all VMs, unfence it. It can be evicted if
1784 if (mem->mapped_to_gpu_memory == 0 &&
1785 !amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm) &&
1786 !mem->bo->tbo.pin_count)
1787 amdgpu_amdkfd_remove_eviction_fence(mem->bo,
1788 process_info->eviction_fence);
1791 unreserve_bo_and_vms(&ctx, false, false);
1793 mutex_unlock(&mem->lock);
1797 int amdgpu_amdkfd_gpuvm_sync_memory(
1798 struct kgd_dev *kgd, struct kgd_mem *mem, bool intr)
1800 struct amdgpu_sync sync;
1803 amdgpu_sync_create(&sync);
1805 mutex_lock(&mem->lock);
1806 amdgpu_sync_clone(&mem->sync, &sync);
1807 mutex_unlock(&mem->lock);
1809 ret = amdgpu_sync_wait(&sync, intr);
1810 amdgpu_sync_free(&sync);
1814 int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_dev *kgd,
1815 struct kgd_mem *mem, void **kptr, uint64_t *size)
1818 struct amdgpu_bo *bo = mem->bo;
1820 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
1821 pr_err("userptr can't be mapped to kernel\n");
1825 /* delete kgd_mem from kfd_bo_list to avoid re-validating
1826 * this BO in BO's restoring after eviction.
1828 mutex_lock(&mem->process_info->lock);
1830 ret = amdgpu_bo_reserve(bo, true);
1832 pr_err("Failed to reserve bo. ret %d\n", ret);
1833 goto bo_reserve_failed;
1836 ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
1838 pr_err("Failed to pin bo. ret %d\n", ret);
1842 ret = amdgpu_bo_kmap(bo, kptr);
1844 pr_err("Failed to map bo to kernel. ret %d\n", ret);
1848 amdgpu_amdkfd_remove_eviction_fence(
1849 bo, mem->process_info->eviction_fence);
1850 list_del_init(&mem->validate_list.head);
1853 *size = amdgpu_bo_size(bo);
1855 amdgpu_bo_unreserve(bo);
1857 mutex_unlock(&mem->process_info->lock);
1861 amdgpu_bo_unpin(bo);
1863 amdgpu_bo_unreserve(bo);
1865 mutex_unlock(&mem->process_info->lock);
1870 int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct kgd_dev *kgd,
1871 struct kfd_vm_fault_info *mem)
1873 struct amdgpu_device *adev;
1875 adev = (struct amdgpu_device *)kgd;
1876 if (atomic_read(&adev->gmc.vm_fault_info_updated) == 1) {
1877 *mem = *adev->gmc.vm_fault_info;
1879 atomic_set(&adev->gmc.vm_fault_info_updated, 0);
1884 int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd,
1885 struct dma_buf *dma_buf,
1886 uint64_t va, void *drm_priv,
1887 struct kgd_mem **mem, uint64_t *size,
1888 uint64_t *mmap_offset)
1890 struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
1891 struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
1892 struct drm_gem_object *obj;
1893 struct amdgpu_bo *bo;
1896 if (dma_buf->ops != &amdgpu_dmabuf_ops)
1897 /* Can't handle non-graphics buffers */
1900 obj = dma_buf->priv;
1901 if (drm_to_adev(obj->dev) != adev)
1902 /* Can't handle buffers from other devices */
1905 bo = gem_to_amdgpu_bo(obj);
1906 if (!(bo->preferred_domains & (AMDGPU_GEM_DOMAIN_VRAM |
1907 AMDGPU_GEM_DOMAIN_GTT)))
1908 /* Only VRAM and GTT BOs are supported */
1911 *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
1915 ret = drm_vma_node_allow(&obj->vma_node, drm_priv);
1922 *size = amdgpu_bo_size(bo);
1925 *mmap_offset = amdgpu_bo_mmap_offset(bo);
1927 INIT_LIST_HEAD(&(*mem)->attachments);
1928 mutex_init(&(*mem)->lock);
1930 (*mem)->alloc_flags =
1931 ((bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ?
1932 KFD_IOC_ALLOC_MEM_FLAGS_VRAM : KFD_IOC_ALLOC_MEM_FLAGS_GTT)
1933 | KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE
1934 | KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE;
1936 drm_gem_object_get(&bo->tbo.base);
1939 (*mem)->domain = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ?
1940 AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT;
1941 (*mem)->mapped_to_gpu_memory = 0;
1942 (*mem)->process_info = avm->process_info;
1943 add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, false);
1944 amdgpu_sync_create(&(*mem)->sync);
1945 (*mem)->is_imported = true;
1950 /* Evict a userptr BO by stopping the queues if necessary
1952 * Runs in MMU notifier, may be in RECLAIM_FS context. This means it
1953 * cannot do any memory allocations, and cannot take any locks that
1954 * are held elsewhere while allocating memory. Therefore this is as
1955 * simple as possible, using atomic counters.
1957 * It doesn't do anything to the BO itself. The real work happens in
1958 * restore, where we get updated page addresses. This function only
1959 * ensures that GPU access to the BO is stopped.
1961 int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem,
1962 struct mm_struct *mm)
1964 struct amdkfd_process_info *process_info = mem->process_info;
1968 atomic_inc(&mem->invalid);
1969 evicted_bos = atomic_inc_return(&process_info->evicted_bos);
1970 if (evicted_bos == 1) {
1971 /* First eviction, stop the queues */
1972 r = kgd2kfd_quiesce_mm(mm);
1974 pr_err("Failed to quiesce KFD\n");
1975 schedule_delayed_work(&process_info->restore_userptr_work,
1976 msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS));
1982 /* Update invalid userptr BOs
1984 * Moves invalidated (evicted) userptr BOs from userptr_valid_list to
1985 * userptr_inval_list and updates user pages for all BOs that have
1986 * been invalidated since their last update.
1988 static int update_invalid_user_pages(struct amdkfd_process_info *process_info,
1989 struct mm_struct *mm)
1991 struct kgd_mem *mem, *tmp_mem;
1992 struct amdgpu_bo *bo;
1993 struct ttm_operation_ctx ctx = { false, false };
1996 /* Move all invalidated BOs to the userptr_inval_list and
1997 * release their user pages by migration to the CPU domain
1999 list_for_each_entry_safe(mem, tmp_mem,
2000 &process_info->userptr_valid_list,
2001 validate_list.head) {
2002 if (!atomic_read(&mem->invalid))
2003 continue; /* BO is still valid */
2007 if (amdgpu_bo_reserve(bo, true))
2009 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
2010 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
2011 amdgpu_bo_unreserve(bo);
2013 pr_err("%s: Failed to invalidate userptr BO\n",
2018 list_move_tail(&mem->validate_list.head,
2019 &process_info->userptr_inval_list);
2022 if (list_empty(&process_info->userptr_inval_list))
2023 return 0; /* All evicted userptr BOs were freed */
2025 /* Go through userptr_inval_list and update any invalid user_pages */
2026 list_for_each_entry(mem, &process_info->userptr_inval_list,
2027 validate_list.head) {
2028 invalid = atomic_read(&mem->invalid);
2030 /* BO hasn't been invalidated since the last
2031 * revalidation attempt. Keep its BO list.
2037 /* Get updated user pages */
2038 ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages);
2040 pr_debug("%s: Failed to get user pages: %d\n",
2043 /* Return error -EBUSY or -ENOMEM, retry restore */
2048 * FIXME: Cannot ignore the return code, must hold
2051 amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
2053 /* Mark the BO as valid unless it was invalidated
2054 * again concurrently.
2056 if (atomic_cmpxchg(&mem->invalid, invalid, 0) != invalid)
2063 /* Validate invalid userptr BOs
2065 * Validates BOs on the userptr_inval_list, and moves them back to the
2066 * userptr_valid_list. Also updates GPUVM page tables with new page
2067 * addresses and waits for the page table updates to complete.
2069 static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
2071 struct amdgpu_bo_list_entry *pd_bo_list_entries;
2072 struct list_head resv_list, duplicates;
2073 struct ww_acquire_ctx ticket;
2074 struct amdgpu_sync sync;
2076 struct amdgpu_vm *peer_vm;
2077 struct kgd_mem *mem, *tmp_mem;
2078 struct amdgpu_bo *bo;
2079 struct ttm_operation_ctx ctx = { false, false };
2082 pd_bo_list_entries = kcalloc(process_info->n_vms,
2083 sizeof(struct amdgpu_bo_list_entry),
2085 if (!pd_bo_list_entries) {
2086 pr_err("%s: Failed to allocate PD BO list entries\n", __func__);
2091 INIT_LIST_HEAD(&resv_list);
2092 INIT_LIST_HEAD(&duplicates);
2094 /* Get all the page directory BOs that need to be reserved */
2096 list_for_each_entry(peer_vm, &process_info->vm_list_head,
2098 amdgpu_vm_get_pd_bo(peer_vm, &resv_list,
2099 &pd_bo_list_entries[i++]);
2100 /* Add the userptr_inval_list entries to resv_list */
2101 list_for_each_entry(mem, &process_info->userptr_inval_list,
2102 validate_list.head) {
2103 list_add_tail(&mem->resv_list.head, &resv_list);
2104 mem->resv_list.bo = mem->validate_list.bo;
2105 mem->resv_list.num_shared = mem->validate_list.num_shared;
2108 /* Reserve all BOs and page tables for validation */
2109 ret = ttm_eu_reserve_buffers(&ticket, &resv_list, false, &duplicates);
2110 WARN(!list_empty(&duplicates), "Duplicates should be empty");
2114 amdgpu_sync_create(&sync);
2116 ret = process_validate_vms(process_info);
2120 /* Validate BOs and update GPUVM page tables */
2121 list_for_each_entry_safe(mem, tmp_mem,
2122 &process_info->userptr_inval_list,
2123 validate_list.head) {
2124 struct kfd_mem_attachment *attachment;
2128 /* Validate the BO if we got user pages */
2129 if (bo->tbo.ttm->pages[0]) {
2130 amdgpu_bo_placement_from_domain(bo, mem->domain);
2131 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
2133 pr_err("%s: failed to validate BO\n", __func__);
2138 list_move_tail(&mem->validate_list.head,
2139 &process_info->userptr_valid_list);
2141 /* Update mapping. If the BO was not validated
2142 * (because we couldn't get user pages), this will
2143 * clear the page table entries, which will result in
2144 * VM faults if the GPU tries to access the invalid
2147 list_for_each_entry(attachment, &mem->attachments, list) {
2148 if (!attachment->is_mapped)
2151 kfd_mem_dmaunmap_attachment(mem, attachment);
2152 ret = update_gpuvm_pte(mem, attachment, &sync, NULL);
2154 pr_err("%s: update PTE failed\n", __func__);
2155 /* make sure this gets validated again */
2156 atomic_inc(&mem->invalid);
2162 /* Update page directories */
2163 ret = process_update_pds(process_info, &sync);
2166 ttm_eu_backoff_reservation(&ticket, &resv_list);
2167 amdgpu_sync_wait(&sync, false);
2168 amdgpu_sync_free(&sync);
2170 kfree(pd_bo_list_entries);
2176 /* Worker callback to restore evicted userptr BOs
2178 * Tries to update and validate all userptr BOs. If successful and no
2179 * concurrent evictions happened, the queues are restarted. Otherwise,
2180 * reschedule for another attempt later.
2182 static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work)
2184 struct delayed_work *dwork = to_delayed_work(work);
2185 struct amdkfd_process_info *process_info =
2186 container_of(dwork, struct amdkfd_process_info,
2187 restore_userptr_work);
2188 struct task_struct *usertask;
2189 struct mm_struct *mm;
2192 evicted_bos = atomic_read(&process_info->evicted_bos);
2196 /* Reference task and mm in case of concurrent process termination */
2197 usertask = get_pid_task(process_info->pid, PIDTYPE_PID);
2200 mm = get_task_mm(usertask);
2202 put_task_struct(usertask);
2206 mutex_lock(&process_info->lock);
2208 if (update_invalid_user_pages(process_info, mm))
2210 /* userptr_inval_list can be empty if all evicted userptr BOs
2211 * have been freed. In that case there is nothing to validate
2212 * and we can just restart the queues.
2214 if (!list_empty(&process_info->userptr_inval_list)) {
2215 if (atomic_read(&process_info->evicted_bos) != evicted_bos)
2216 goto unlock_out; /* Concurrent eviction, try again */
2218 if (validate_invalid_user_pages(process_info))
2221 /* Final check for concurrent evicton and atomic update. If
2222 * another eviction happens after successful update, it will
2223 * be a first eviction that calls quiesce_mm. The eviction
2224 * reference counting inside KFD will handle this case.
2226 if (atomic_cmpxchg(&process_info->evicted_bos, evicted_bos, 0) !=
2230 if (kgd2kfd_resume_mm(mm)) {
2231 pr_err("%s: Failed to resume KFD\n", __func__);
2232 /* No recovery from this failure. Probably the CP is
2233 * hanging. No point trying again.
2238 mutex_unlock(&process_info->lock);
2240 put_task_struct(usertask);
2242 /* If validation failed, reschedule another attempt */
2244 schedule_delayed_work(&process_info->restore_userptr_work,
2245 msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS));
2248 /** amdgpu_amdkfd_gpuvm_restore_process_bos - Restore all BOs for the given
2249 * KFD process identified by process_info
2251 * @process_info: amdkfd_process_info of the KFD process
2253 * After memory eviction, restore thread calls this function. The function
2254 * should be called when the Process is still valid. BO restore involves -
2256 * 1. Release old eviction fence and create new one
2257 * 2. Get two copies of PD BO list from all the VMs. Keep one copy as pd_list.
2258 * 3 Use the second PD list and kfd_bo_list to create a list (ctx.list) of
2259 * BOs that need to be reserved.
2260 * 4. Reserve all the BOs
2261 * 5. Validate of PD and PT BOs.
2262 * 6. Validate all KFD BOs using kfd_bo_list and Map them and add new fence
2263 * 7. Add fence to all PD and PT BOs.
2264 * 8. Unreserve all BOs
2266 int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
2268 struct amdgpu_bo_list_entry *pd_bo_list;
2269 struct amdkfd_process_info *process_info = info;
2270 struct amdgpu_vm *peer_vm;
2271 struct kgd_mem *mem;
2272 struct bo_vm_reservation_context ctx;
2273 struct amdgpu_amdkfd_fence *new_fence;
2275 struct list_head duplicate_save;
2276 struct amdgpu_sync sync_obj;
2277 unsigned long failed_size = 0;
2278 unsigned long total_size = 0;
2280 INIT_LIST_HEAD(&duplicate_save);
2281 INIT_LIST_HEAD(&ctx.list);
2282 INIT_LIST_HEAD(&ctx.duplicates);
2284 pd_bo_list = kcalloc(process_info->n_vms,
2285 sizeof(struct amdgpu_bo_list_entry),
2291 mutex_lock(&process_info->lock);
2292 list_for_each_entry(peer_vm, &process_info->vm_list_head,
2294 amdgpu_vm_get_pd_bo(peer_vm, &ctx.list, &pd_bo_list[i++]);
2296 /* Reserve all BOs and page tables/directory. Add all BOs from
2297 * kfd_bo_list to ctx.list
2299 list_for_each_entry(mem, &process_info->kfd_bo_list,
2300 validate_list.head) {
2302 list_add_tail(&mem->resv_list.head, &ctx.list);
2303 mem->resv_list.bo = mem->validate_list.bo;
2304 mem->resv_list.num_shared = mem->validate_list.num_shared;
2307 ret = ttm_eu_reserve_buffers(&ctx.ticket, &ctx.list,
2308 false, &duplicate_save);
2310 pr_debug("Memory eviction: TTM Reserve Failed. Try again\n");
2311 goto ttm_reserve_fail;
2314 amdgpu_sync_create(&sync_obj);
2316 /* Validate PDs and PTs */
2317 ret = process_validate_vms(process_info);
2319 goto validate_map_fail;
2321 ret = process_sync_pds_resv(process_info, &sync_obj);
2323 pr_debug("Memory eviction: Failed to sync to PD BO moving fence. Try again\n");
2324 goto validate_map_fail;
2327 /* Validate BOs and map them to GPUVM (update VM page tables). */
2328 list_for_each_entry(mem, &process_info->kfd_bo_list,
2329 validate_list.head) {
2331 struct amdgpu_bo *bo = mem->bo;
2332 uint32_t domain = mem->domain;
2333 struct kfd_mem_attachment *attachment;
2335 total_size += amdgpu_bo_size(bo);
2337 ret = amdgpu_amdkfd_bo_validate(bo, domain, false);
2339 pr_debug("Memory eviction: Validate BOs failed\n");
2340 failed_size += amdgpu_bo_size(bo);
2341 ret = amdgpu_amdkfd_bo_validate(bo,
2342 AMDGPU_GEM_DOMAIN_GTT, false);
2344 pr_debug("Memory eviction: Try again\n");
2345 goto validate_map_fail;
2348 ret = amdgpu_sync_fence(&sync_obj, bo->tbo.moving);
2350 pr_debug("Memory eviction: Sync BO fence failed. Try again\n");
2351 goto validate_map_fail;
2353 list_for_each_entry(attachment, &mem->attachments, list) {
2354 if (!attachment->is_mapped)
2357 kfd_mem_dmaunmap_attachment(mem, attachment);
2358 ret = update_gpuvm_pte(mem, attachment, &sync_obj, NULL);
2360 pr_debug("Memory eviction: update PTE failed. Try again\n");
2361 goto validate_map_fail;
2367 pr_debug("0x%lx/0x%lx in system\n", failed_size, total_size);
2369 /* Update page directories */
2370 ret = process_update_pds(process_info, &sync_obj);
2372 pr_debug("Memory eviction: update PDs failed. Try again\n");
2373 goto validate_map_fail;
2376 /* Wait for validate and PT updates to finish */
2377 amdgpu_sync_wait(&sync_obj, false);
2379 /* Release old eviction fence and create new one, because fence only
2380 * goes from unsignaled to signaled, fence cannot be reused.
2381 * Use context and mm from the old fence.
2383 new_fence = amdgpu_amdkfd_fence_create(
2384 process_info->eviction_fence->base.context,
2385 process_info->eviction_fence->mm,
2388 pr_err("Failed to create eviction fence\n");
2390 goto validate_map_fail;
2392 dma_fence_put(&process_info->eviction_fence->base);
2393 process_info->eviction_fence = new_fence;
2394 *ef = dma_fence_get(&new_fence->base);
2396 /* Attach new eviction fence to all BOs */
2397 list_for_each_entry(mem, &process_info->kfd_bo_list,
2399 amdgpu_bo_fence(mem->bo,
2400 &process_info->eviction_fence->base, true);
2402 /* Attach eviction fence to PD / PT BOs */
2403 list_for_each_entry(peer_vm, &process_info->vm_list_head,
2405 struct amdgpu_bo *bo = peer_vm->root.base.bo;
2407 amdgpu_bo_fence(bo, &process_info->eviction_fence->base, true);
2411 ttm_eu_backoff_reservation(&ctx.ticket, &ctx.list);
2412 amdgpu_sync_free(&sync_obj);
2414 mutex_unlock(&process_info->lock);
2419 int amdgpu_amdkfd_add_gws_to_process(void *info, void *gws, struct kgd_mem **mem)
2421 struct amdkfd_process_info *process_info = (struct amdkfd_process_info *)info;
2422 struct amdgpu_bo *gws_bo = (struct amdgpu_bo *)gws;
2428 *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
2432 mutex_init(&(*mem)->lock);
2433 INIT_LIST_HEAD(&(*mem)->attachments);
2434 (*mem)->bo = amdgpu_bo_ref(gws_bo);
2435 (*mem)->domain = AMDGPU_GEM_DOMAIN_GWS;
2436 (*mem)->process_info = process_info;
2437 add_kgd_mem_to_kfd_bo_list(*mem, process_info, false);
2438 amdgpu_sync_create(&(*mem)->sync);
2441 /* Validate gws bo the first time it is added to process */
2442 mutex_lock(&(*mem)->process_info->lock);
2443 ret = amdgpu_bo_reserve(gws_bo, false);
2444 if (unlikely(ret)) {
2445 pr_err("Reserve gws bo failed %d\n", ret);
2446 goto bo_reservation_failure;
2449 ret = amdgpu_amdkfd_bo_validate(gws_bo, AMDGPU_GEM_DOMAIN_GWS, true);
2451 pr_err("GWS BO validate failed %d\n", ret);
2452 goto bo_validation_failure;
2454 /* GWS resource is shared b/t amdgpu and amdkfd
2455 * Add process eviction fence to bo so they can
2458 ret = dma_resv_reserve_shared(gws_bo->tbo.base.resv, 1);
2460 goto reserve_shared_fail;
2461 amdgpu_bo_fence(gws_bo, &process_info->eviction_fence->base, true);
2462 amdgpu_bo_unreserve(gws_bo);
2463 mutex_unlock(&(*mem)->process_info->lock);
2467 reserve_shared_fail:
2468 bo_validation_failure:
2469 amdgpu_bo_unreserve(gws_bo);
2470 bo_reservation_failure:
2471 mutex_unlock(&(*mem)->process_info->lock);
2472 amdgpu_sync_free(&(*mem)->sync);
2473 remove_kgd_mem_from_kfd_bo_list(*mem, process_info);
2474 amdgpu_bo_unref(&gws_bo);
2475 mutex_destroy(&(*mem)->lock);
2481 int amdgpu_amdkfd_remove_gws_from_process(void *info, void *mem)
2484 struct amdkfd_process_info *process_info = (struct amdkfd_process_info *)info;
2485 struct kgd_mem *kgd_mem = (struct kgd_mem *)mem;
2486 struct amdgpu_bo *gws_bo = kgd_mem->bo;
2488 /* Remove BO from process's validate list so restore worker won't touch
2491 remove_kgd_mem_from_kfd_bo_list(kgd_mem, process_info);
2493 ret = amdgpu_bo_reserve(gws_bo, false);
2494 if (unlikely(ret)) {
2495 pr_err("Reserve gws bo failed %d\n", ret);
2496 //TODO add BO back to validate_list?
2499 amdgpu_amdkfd_remove_eviction_fence(gws_bo,
2500 process_info->eviction_fence);
2501 amdgpu_bo_unreserve(gws_bo);
2502 amdgpu_sync_free(&kgd_mem->sync);
2503 amdgpu_bo_unref(&gws_bo);
2504 mutex_destroy(&kgd_mem->lock);
2509 /* Returns GPU-specific tiling mode information */
2510 int amdgpu_amdkfd_get_tile_config(struct kgd_dev *kgd,
2511 struct tile_config *config)
2513 struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
2515 config->gb_addr_config = adev->gfx.config.gb_addr_config;
2516 config->tile_config_ptr = adev->gfx.config.tile_mode_array;
2517 config->num_tile_configs =
2518 ARRAY_SIZE(adev->gfx.config.tile_mode_array);
2519 config->macro_tile_config_ptr =
2520 adev->gfx.config.macrotile_mode_array;
2521 config->num_macro_tile_configs =
2522 ARRAY_SIZE(adev->gfx.config.macrotile_mode_array);
2524 /* Those values are not set from GFX9 onwards */
2525 config->num_banks = adev->gfx.config.num_banks;
2526 config->num_ranks = adev->gfx.config.num_ranks;