2 * Copyright 2014-2018 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
22 #include <linux/dma-buf.h>
23 #include <linux/list.h>
24 #include <linux/pagemap.h>
25 #include <linux/sched/mm.h>
26 #include <linux/sched/task.h>
28 #include "amdgpu_object.h"
29 #include "amdgpu_vm.h"
30 #include "amdgpu_amdkfd.h"
31 #include "amdgpu_dma_buf.h"
32 #include <uapi/linux/kfd_ioctl.h>
34 /* BO flag to indicate a KFD userptr BO */
35 #define AMDGPU_AMDKFD_USERPTR_BO (1ULL << 63)
37 /* Userptr restore delay, just long enough to allow consecutive VM
38 * changes to accumulate
40 #define AMDGPU_USERPTR_RESTORE_DELAY_MS 1
42 /* Impose limit on how much memory KFD can use */
44 uint64_t max_system_mem_limit;
45 uint64_t max_ttm_mem_limit;
46 int64_t system_mem_used;
48 spinlock_t mem_limit_lock;
51 /* Struct used for amdgpu_amdkfd_bo_validate */
52 struct amdgpu_vm_parser {
57 static const char * const domain_bit_to_string[] = {
66 #define domain_string(domain) domain_bit_to_string[ffs(domain)-1]
68 static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work);
71 static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
73 return (struct amdgpu_device *)kgd;
76 static bool check_if_add_bo_to_vm(struct amdgpu_vm *avm,
79 struct kfd_bo_va_list *entry;
81 list_for_each_entry(entry, &mem->bo_va_list, bo_list)
82 if (entry->bo_va->base.vm == avm)
88 /* Set memory usage limits. Current, limits are
89 * System (TTM + userptr) memory - 15/16th System RAM
90 * TTM memory - 3/8th System RAM
92 void amdgpu_amdkfd_gpuvm_init_mem_limits(void)
98 mem = si.totalram - si.totalhigh;
101 spin_lock_init(&kfd_mem_limit.mem_limit_lock);
102 kfd_mem_limit.max_system_mem_limit = mem - (mem >> 4);
103 kfd_mem_limit.max_ttm_mem_limit = (mem >> 1) - (mem >> 3);
104 pr_debug("Kernel memory limit %lluM, TTM limit %lluM\n",
105 (kfd_mem_limit.max_system_mem_limit >> 20),
106 (kfd_mem_limit.max_ttm_mem_limit >> 20));
109 /* Estimate page table size needed to represent a given memory size
111 * With 4KB pages, we need one 8 byte PTE for each 4KB of memory
112 * (factor 512, >> 9). With 2MB pages, we need one 8 byte PTE for 2MB
113 * of memory (factor 256K, >> 18). ROCm user mode tries to optimize
114 * for 2MB pages for TLB efficiency. However, small allocations and
115 * fragmented system memory still need some 4KB pages. We choose a
116 * compromise that should work in most cases without reserving too
117 * much memory for page tables unnecessarily (factor 16K, >> 14).
119 #define ESTIMATE_PT_SIZE(mem_size) ((mem_size) >> 14)
121 static int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
122 uint64_t size, u32 domain, bool sg)
124 uint64_t reserved_for_pt =
125 ESTIMATE_PT_SIZE(amdgpu_amdkfd_total_mem_size);
126 size_t acc_size, system_mem_needed, ttm_mem_needed, vram_needed;
129 acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size,
130 sizeof(struct amdgpu_bo));
133 if (domain == AMDGPU_GEM_DOMAIN_GTT) {
135 system_mem_needed = acc_size + size;
136 ttm_mem_needed = acc_size + size;
137 } else if (domain == AMDGPU_GEM_DOMAIN_CPU && !sg) {
139 system_mem_needed = acc_size + size;
140 ttm_mem_needed = acc_size;
143 system_mem_needed = acc_size;
144 ttm_mem_needed = acc_size;
145 if (domain == AMDGPU_GEM_DOMAIN_VRAM)
149 spin_lock(&kfd_mem_limit.mem_limit_lock);
151 if ((kfd_mem_limit.system_mem_used + system_mem_needed >
152 kfd_mem_limit.max_system_mem_limit) ||
153 (kfd_mem_limit.ttm_mem_used + ttm_mem_needed >
154 kfd_mem_limit.max_ttm_mem_limit) ||
155 (adev->kfd.vram_used + vram_needed >
156 adev->gmc.real_vram_size - reserved_for_pt)) {
159 kfd_mem_limit.system_mem_used += system_mem_needed;
160 kfd_mem_limit.ttm_mem_used += ttm_mem_needed;
161 adev->kfd.vram_used += vram_needed;
164 spin_unlock(&kfd_mem_limit.mem_limit_lock);
168 static void unreserve_mem_limit(struct amdgpu_device *adev,
169 uint64_t size, u32 domain, bool sg)
173 acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size,
174 sizeof(struct amdgpu_bo));
176 spin_lock(&kfd_mem_limit.mem_limit_lock);
177 if (domain == AMDGPU_GEM_DOMAIN_GTT) {
178 kfd_mem_limit.system_mem_used -= (acc_size + size);
179 kfd_mem_limit.ttm_mem_used -= (acc_size + size);
180 } else if (domain == AMDGPU_GEM_DOMAIN_CPU && !sg) {
181 kfd_mem_limit.system_mem_used -= (acc_size + size);
182 kfd_mem_limit.ttm_mem_used -= acc_size;
184 kfd_mem_limit.system_mem_used -= acc_size;
185 kfd_mem_limit.ttm_mem_used -= acc_size;
186 if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
187 adev->kfd.vram_used -= size;
188 WARN_ONCE(adev->kfd.vram_used < 0,
189 "kfd VRAM memory accounting unbalanced");
192 WARN_ONCE(kfd_mem_limit.system_mem_used < 0,
193 "kfd system memory accounting unbalanced");
194 WARN_ONCE(kfd_mem_limit.ttm_mem_used < 0,
195 "kfd TTM memory accounting unbalanced");
197 spin_unlock(&kfd_mem_limit.mem_limit_lock);
200 void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo)
202 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
203 u32 domain = bo->preferred_domains;
204 bool sg = (bo->preferred_domains == AMDGPU_GEM_DOMAIN_CPU);
206 if (bo->flags & AMDGPU_AMDKFD_USERPTR_BO) {
207 domain = AMDGPU_GEM_DOMAIN_CPU;
211 unreserve_mem_limit(adev, amdgpu_bo_size(bo), domain, sg);
215 /* amdgpu_amdkfd_remove_eviction_fence - Removes eviction fence from BO's
216 * reservation object.
218 * @bo: [IN] Remove eviction fence(s) from this BO
219 * @ef: [IN] This eviction fence is removed if it
220 * is present in the shared list.
222 * NOTE: Must be called with BO reserved i.e. bo->tbo.resv->lock held.
224 static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
225 struct amdgpu_amdkfd_fence *ef)
227 struct dma_resv *resv = bo->tbo.base.resv;
228 struct dma_resv_list *old, *new;
229 unsigned int i, j, k;
234 old = dma_resv_get_list(resv);
238 new = kmalloc(offsetof(typeof(*new), shared[old->shared_max]),
243 /* Go through all the shared fences in the resevation object and sort
244 * the interesting ones to the end of the list.
246 for (i = 0, j = old->shared_count, k = 0; i < old->shared_count; ++i) {
249 f = rcu_dereference_protected(old->shared[i],
250 dma_resv_held(resv));
252 if (f->context == ef->base.context)
253 RCU_INIT_POINTER(new->shared[--j], f);
255 RCU_INIT_POINTER(new->shared[k++], f);
257 new->shared_max = old->shared_max;
258 new->shared_count = k;
260 /* Install the new fence list, seqcount provides the barriers */
261 write_seqcount_begin(&resv->seq);
262 RCU_INIT_POINTER(resv->fence, new);
263 write_seqcount_end(&resv->seq);
265 /* Drop the references to the removed fences or move them to ef_list */
266 for (i = j, k = 0; i < old->shared_count; ++i) {
269 f = rcu_dereference_protected(new->shared[i],
270 dma_resv_held(resv));
278 int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo)
280 struct amdgpu_bo *root = bo;
281 struct amdgpu_vm_bo_base *vm_bo;
282 struct amdgpu_vm *vm;
283 struct amdkfd_process_info *info;
284 struct amdgpu_amdkfd_fence *ef;
287 /* we can always get vm_bo from root PD bo.*/
299 info = vm->process_info;
300 if (!info || !info->eviction_fence)
303 ef = container_of(dma_fence_get(&info->eviction_fence->base),
304 struct amdgpu_amdkfd_fence, base);
306 BUG_ON(!dma_resv_trylock(bo->tbo.base.resv));
307 ret = amdgpu_amdkfd_remove_eviction_fence(bo, ef);
308 dma_resv_unlock(bo->tbo.base.resv);
310 dma_fence_put(&ef->base);
314 static int amdgpu_amdkfd_bo_validate(struct amdgpu_bo *bo, uint32_t domain,
317 struct ttm_operation_ctx ctx = { false, false };
320 if (WARN(amdgpu_ttm_tt_get_usermm(bo->tbo.ttm),
321 "Called with userptr BO"))
324 amdgpu_bo_placement_from_domain(bo, domain);
326 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
330 amdgpu_bo_sync_wait(bo, AMDGPU_FENCE_OWNER_KFD, false);
336 static int amdgpu_amdkfd_validate(void *param, struct amdgpu_bo *bo)
338 struct amdgpu_vm_parser *p = param;
340 return amdgpu_amdkfd_bo_validate(bo, p->domain, p->wait);
343 /* vm_validate_pt_pd_bos - Validate page table and directory BOs
345 * Page directories are not updated here because huge page handling
346 * during page table updates can invalidate page directory entries
347 * again. Page directories are only updated after updating page
350 static int vm_validate_pt_pd_bos(struct amdgpu_vm *vm)
352 struct amdgpu_bo *pd = vm->root.base.bo;
353 struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
354 struct amdgpu_vm_parser param;
357 param.domain = AMDGPU_GEM_DOMAIN_VRAM;
360 ret = amdgpu_vm_validate_pt_bos(adev, vm, amdgpu_amdkfd_validate,
363 pr_err("failed to validate PT BOs\n");
367 ret = amdgpu_amdkfd_validate(¶m, pd);
369 pr_err("failed to validate PD\n");
373 vm->pd_phys_addr = amdgpu_gmc_pd_addr(vm->root.base.bo);
375 if (vm->use_cpu_for_update) {
376 ret = amdgpu_bo_kmap(pd, NULL);
378 pr_err("failed to kmap PD, ret=%d\n", ret);
386 static int vm_update_pds(struct amdgpu_vm *vm, struct amdgpu_sync *sync)
388 struct amdgpu_bo *pd = vm->root.base.bo;
389 struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
392 ret = amdgpu_vm_update_pdes(adev, vm, false);
396 return amdgpu_sync_fence(sync, vm->last_update);
399 static uint64_t get_pte_flags(struct amdgpu_device *adev, struct kgd_mem *mem)
401 struct amdgpu_device *bo_adev = amdgpu_ttm_adev(mem->bo->tbo.bdev);
402 bool coherent = mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_COHERENT;
403 uint32_t mapping_flags;
405 mapping_flags = AMDGPU_VM_PAGE_READABLE;
406 if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE)
407 mapping_flags |= AMDGPU_VM_PAGE_WRITEABLE;
408 if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE)
409 mapping_flags |= AMDGPU_VM_PAGE_EXECUTABLE;
411 switch (adev->asic_type) {
413 if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
415 mapping_flags |= coherent ?
416 AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW;
418 mapping_flags |= AMDGPU_VM_MTYPE_UC;
420 mapping_flags |= coherent ?
421 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
425 mapping_flags |= coherent ?
426 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
429 return amdgpu_gem_va_map_flags(adev, mapping_flags);
432 /* add_bo_to_vm - Add a BO to a VM
434 * Everything that needs to bo done only once when a BO is first added
435 * to a VM. It can later be mapped and unmapped many times without
436 * repeating these steps.
438 * 1. Allocate and initialize BO VA entry data structure
439 * 2. Add BO to the VM
440 * 3. Determine ASIC-specific PTE flags
441 * 4. Alloc page tables and directories if needed
442 * 4a. Validate new page tables and directories
444 static int add_bo_to_vm(struct amdgpu_device *adev, struct kgd_mem *mem,
445 struct amdgpu_vm *vm, bool is_aql,
446 struct kfd_bo_va_list **p_bo_va_entry)
449 struct kfd_bo_va_list *bo_va_entry;
450 struct amdgpu_bo *bo = mem->bo;
451 uint64_t va = mem->va;
452 struct list_head *list_bo_va = &mem->bo_va_list;
453 unsigned long bo_size = bo->tbo.mem.size;
456 pr_err("Invalid VA when adding BO to VM\n");
463 bo_va_entry = kzalloc(sizeof(*bo_va_entry), GFP_KERNEL);
467 pr_debug("\t add VA 0x%llx - 0x%llx to vm %p\n", va,
470 /* Add BO to VM internal data structures*/
471 bo_va_entry->bo_va = amdgpu_vm_bo_add(adev, vm, bo);
472 if (!bo_va_entry->bo_va) {
474 pr_err("Failed to add BO object to VM. ret == %d\n",
479 bo_va_entry->va = va;
480 bo_va_entry->pte_flags = get_pte_flags(adev, mem);
481 bo_va_entry->kgd_dev = (void *)adev;
482 list_add(&bo_va_entry->bo_list, list_bo_va);
485 *p_bo_va_entry = bo_va_entry;
487 /* Allocate validate page tables if needed */
488 ret = vm_validate_pt_pd_bos(vm);
490 pr_err("validate_pt_pd_bos() failed\n");
497 amdgpu_vm_bo_rmv(adev, bo_va_entry->bo_va);
498 list_del(&bo_va_entry->bo_list);
504 static void remove_bo_from_vm(struct amdgpu_device *adev,
505 struct kfd_bo_va_list *entry, unsigned long size)
507 pr_debug("\t remove VA 0x%llx - 0x%llx in entry %p\n",
509 entry->va + size, entry);
510 amdgpu_vm_bo_rmv(adev, entry->bo_va);
511 list_del(&entry->bo_list);
515 static void add_kgd_mem_to_kfd_bo_list(struct kgd_mem *mem,
516 struct amdkfd_process_info *process_info,
519 struct ttm_validate_buffer *entry = &mem->validate_list;
520 struct amdgpu_bo *bo = mem->bo;
522 INIT_LIST_HEAD(&entry->head);
523 entry->num_shared = 1;
524 entry->bo = &bo->tbo;
525 mutex_lock(&process_info->lock);
527 list_add_tail(&entry->head, &process_info->userptr_valid_list);
529 list_add_tail(&entry->head, &process_info->kfd_bo_list);
530 mutex_unlock(&process_info->lock);
533 static void remove_kgd_mem_from_kfd_bo_list(struct kgd_mem *mem,
534 struct amdkfd_process_info *process_info)
536 struct ttm_validate_buffer *bo_list_entry;
538 bo_list_entry = &mem->validate_list;
539 mutex_lock(&process_info->lock);
540 list_del(&bo_list_entry->head);
541 mutex_unlock(&process_info->lock);
544 /* Initializes user pages. It registers the MMU notifier and validates
545 * the userptr BO in the GTT domain.
547 * The BO must already be on the userptr_valid_list. Otherwise an
548 * eviction and restore may happen that leaves the new BO unmapped
549 * with the user mode queues running.
551 * Takes the process_info->lock to protect against concurrent restore
554 * Returns 0 for success, negative errno for errors.
556 static int init_user_pages(struct kgd_mem *mem, uint64_t user_addr)
558 struct amdkfd_process_info *process_info = mem->process_info;
559 struct amdgpu_bo *bo = mem->bo;
560 struct ttm_operation_ctx ctx = { true, false };
563 mutex_lock(&process_info->lock);
565 ret = amdgpu_ttm_tt_set_userptr(bo->tbo.ttm, user_addr, 0);
567 pr_err("%s: Failed to set userptr: %d\n", __func__, ret);
571 ret = amdgpu_mn_register(bo, user_addr);
573 pr_err("%s: Failed to register MMU notifier: %d\n",
578 ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages);
580 pr_err("%s: Failed to get user pages: %d\n", __func__, ret);
584 ret = amdgpu_bo_reserve(bo, true);
586 pr_err("%s: Failed to reserve BO\n", __func__);
589 amdgpu_bo_placement_from_domain(bo, mem->domain);
590 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
592 pr_err("%s: failed to validate BO\n", __func__);
593 amdgpu_bo_unreserve(bo);
596 amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
599 amdgpu_mn_unregister(bo);
601 mutex_unlock(&process_info->lock);
605 /* Reserving a BO and its page table BOs must happen atomically to
606 * avoid deadlocks. Some operations update multiple VMs at once. Track
607 * all the reservation info in a context structure. Optionally a sync
608 * object can track VM updates.
610 struct bo_vm_reservation_context {
611 struct amdgpu_bo_list_entry kfd_bo; /* BO list entry for the KFD BO */
612 unsigned int n_vms; /* Number of VMs reserved */
613 struct amdgpu_bo_list_entry *vm_pd; /* Array of VM BO list entries */
614 struct ww_acquire_ctx ticket; /* Reservation ticket */
615 struct list_head list, duplicates; /* BO lists */
616 struct amdgpu_sync *sync; /* Pointer to sync object */
617 bool reserved; /* Whether BOs are reserved */
621 BO_VM_NOT_MAPPED = 0, /* Match VMs where a BO is not mapped */
622 BO_VM_MAPPED, /* Match VMs where a BO is mapped */
623 BO_VM_ALL, /* Match all VMs a BO was added to */
627 * reserve_bo_and_vm - reserve a BO and a VM unconditionally.
628 * @mem: KFD BO structure.
629 * @vm: the VM to reserve.
630 * @ctx: the struct that will be used in unreserve_bo_and_vms().
632 static int reserve_bo_and_vm(struct kgd_mem *mem,
633 struct amdgpu_vm *vm,
634 struct bo_vm_reservation_context *ctx)
636 struct amdgpu_bo *bo = mem->bo;
641 ctx->reserved = false;
643 ctx->sync = &mem->sync;
645 INIT_LIST_HEAD(&ctx->list);
646 INIT_LIST_HEAD(&ctx->duplicates);
648 ctx->vm_pd = kcalloc(ctx->n_vms, sizeof(*ctx->vm_pd), GFP_KERNEL);
652 ctx->kfd_bo.priority = 0;
653 ctx->kfd_bo.tv.bo = &bo->tbo;
654 ctx->kfd_bo.tv.num_shared = 1;
655 list_add(&ctx->kfd_bo.tv.head, &ctx->list);
657 amdgpu_vm_get_pd_bo(vm, &ctx->list, &ctx->vm_pd[0]);
659 ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list,
660 false, &ctx->duplicates);
662 pr_err("Failed to reserve buffers in ttm.\n");
668 ctx->reserved = true;
673 * reserve_bo_and_cond_vms - reserve a BO and some VMs conditionally
674 * @mem: KFD BO structure.
675 * @vm: the VM to reserve. If NULL, then all VMs associated with the BO
676 * is used. Otherwise, a single VM associated with the BO.
677 * @map_type: the mapping status that will be used to filter the VMs.
678 * @ctx: the struct that will be used in unreserve_bo_and_vms().
680 * Returns 0 for success, negative for failure.
682 static int reserve_bo_and_cond_vms(struct kgd_mem *mem,
683 struct amdgpu_vm *vm, enum bo_vm_match map_type,
684 struct bo_vm_reservation_context *ctx)
686 struct amdgpu_bo *bo = mem->bo;
687 struct kfd_bo_va_list *entry;
691 ctx->reserved = false;
694 ctx->sync = &mem->sync;
696 INIT_LIST_HEAD(&ctx->list);
697 INIT_LIST_HEAD(&ctx->duplicates);
699 list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
700 if ((vm && vm != entry->bo_va->base.vm) ||
701 (entry->is_mapped != map_type
702 && map_type != BO_VM_ALL))
708 if (ctx->n_vms != 0) {
709 ctx->vm_pd = kcalloc(ctx->n_vms, sizeof(*ctx->vm_pd),
715 ctx->kfd_bo.priority = 0;
716 ctx->kfd_bo.tv.bo = &bo->tbo;
717 ctx->kfd_bo.tv.num_shared = 1;
718 list_add(&ctx->kfd_bo.tv.head, &ctx->list);
721 list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
722 if ((vm && vm != entry->bo_va->base.vm) ||
723 (entry->is_mapped != map_type
724 && map_type != BO_VM_ALL))
727 amdgpu_vm_get_pd_bo(entry->bo_va->base.vm, &ctx->list,
732 ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list,
733 false, &ctx->duplicates);
735 pr_err("Failed to reserve buffers in ttm.\n");
741 ctx->reserved = true;
746 * unreserve_bo_and_vms - Unreserve BO and VMs from a reservation context
747 * @ctx: Reservation context to unreserve
748 * @wait: Optionally wait for a sync object representing pending VM updates
749 * @intr: Whether the wait is interruptible
751 * Also frees any resources allocated in
752 * reserve_bo_and_(cond_)vm(s). Returns the status from
755 static int unreserve_bo_and_vms(struct bo_vm_reservation_context *ctx,
756 bool wait, bool intr)
761 ret = amdgpu_sync_wait(ctx->sync, intr);
764 ttm_eu_backoff_reservation(&ctx->ticket, &ctx->list);
769 ctx->reserved = false;
775 static int unmap_bo_from_gpuvm(struct amdgpu_device *adev,
776 struct kfd_bo_va_list *entry,
777 struct amdgpu_sync *sync)
779 struct amdgpu_bo_va *bo_va = entry->bo_va;
780 struct amdgpu_vm *vm = bo_va->base.vm;
782 amdgpu_vm_bo_unmap(adev, bo_va, entry->va);
784 amdgpu_vm_clear_freed(adev, vm, &bo_va->last_pt_update);
786 amdgpu_sync_fence(sync, bo_va->last_pt_update);
791 static int update_gpuvm_pte(struct amdgpu_device *adev,
792 struct kfd_bo_va_list *entry,
793 struct amdgpu_sync *sync)
796 struct amdgpu_bo_va *bo_va = entry->bo_va;
798 /* Update the page tables */
799 ret = amdgpu_vm_bo_update(adev, bo_va, false);
801 pr_err("amdgpu_vm_bo_update failed\n");
805 return amdgpu_sync_fence(sync, bo_va->last_pt_update);
808 static int map_bo_to_gpuvm(struct amdgpu_device *adev,
809 struct kfd_bo_va_list *entry, struct amdgpu_sync *sync,
814 /* Set virtual address for the allocation */
815 ret = amdgpu_vm_bo_map(adev, entry->bo_va, entry->va, 0,
816 amdgpu_bo_size(entry->bo_va->base.bo),
819 pr_err("Failed to map VA 0x%llx in vm. ret %d\n",
827 ret = update_gpuvm_pte(adev, entry, sync);
829 pr_err("update_gpuvm_pte() failed\n");
830 goto update_gpuvm_pte_failed;
835 update_gpuvm_pte_failed:
836 unmap_bo_from_gpuvm(adev, entry, sync);
840 static struct sg_table *create_doorbell_sg(uint64_t addr, uint32_t size)
842 struct sg_table *sg = kmalloc(sizeof(*sg), GFP_KERNEL);
846 if (sg_alloc_table(sg, 1, GFP_KERNEL)) {
850 sg->sgl->dma_address = addr;
851 sg->sgl->length = size;
852 #ifdef CONFIG_NEED_SG_DMA_LENGTH
853 sg->sgl->dma_length = size;
858 static int process_validate_vms(struct amdkfd_process_info *process_info)
860 struct amdgpu_vm *peer_vm;
863 list_for_each_entry(peer_vm, &process_info->vm_list_head,
865 ret = vm_validate_pt_pd_bos(peer_vm);
873 static int process_sync_pds_resv(struct amdkfd_process_info *process_info,
874 struct amdgpu_sync *sync)
876 struct amdgpu_vm *peer_vm;
879 list_for_each_entry(peer_vm, &process_info->vm_list_head,
881 struct amdgpu_bo *pd = peer_vm->root.base.bo;
883 ret = amdgpu_sync_resv(NULL, sync, pd->tbo.base.resv,
884 AMDGPU_SYNC_NE_OWNER,
885 AMDGPU_FENCE_OWNER_KFD);
893 static int process_update_pds(struct amdkfd_process_info *process_info,
894 struct amdgpu_sync *sync)
896 struct amdgpu_vm *peer_vm;
899 list_for_each_entry(peer_vm, &process_info->vm_list_head,
901 ret = vm_update_pds(peer_vm, sync);
909 static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info,
910 struct dma_fence **ef)
912 struct amdkfd_process_info *info = NULL;
915 if (!*process_info) {
916 info = kzalloc(sizeof(*info), GFP_KERNEL);
920 mutex_init(&info->lock);
921 INIT_LIST_HEAD(&info->vm_list_head);
922 INIT_LIST_HEAD(&info->kfd_bo_list);
923 INIT_LIST_HEAD(&info->userptr_valid_list);
924 INIT_LIST_HEAD(&info->userptr_inval_list);
926 info->eviction_fence =
927 amdgpu_amdkfd_fence_create(dma_fence_context_alloc(1),
929 if (!info->eviction_fence) {
930 pr_err("Failed to create eviction fence\n");
932 goto create_evict_fence_fail;
935 info->pid = get_task_pid(current->group_leader, PIDTYPE_PID);
936 atomic_set(&info->evicted_bos, 0);
937 INIT_DELAYED_WORK(&info->restore_userptr_work,
938 amdgpu_amdkfd_restore_userptr_worker);
940 *process_info = info;
941 *ef = dma_fence_get(&info->eviction_fence->base);
944 vm->process_info = *process_info;
946 /* Validate page directory and attach eviction fence */
947 ret = amdgpu_bo_reserve(vm->root.base.bo, true);
949 goto reserve_pd_fail;
950 ret = vm_validate_pt_pd_bos(vm);
952 pr_err("validate_pt_pd_bos() failed\n");
953 goto validate_pd_fail;
955 ret = amdgpu_bo_sync_wait(vm->root.base.bo,
956 AMDGPU_FENCE_OWNER_KFD, false);
959 ret = dma_resv_reserve_shared(vm->root.base.bo->tbo.base.resv, 1);
961 goto reserve_shared_fail;
962 amdgpu_bo_fence(vm->root.base.bo,
963 &vm->process_info->eviction_fence->base, true);
964 amdgpu_bo_unreserve(vm->root.base.bo);
966 /* Update process info */
967 mutex_lock(&vm->process_info->lock);
968 list_add_tail(&vm->vm_list_node,
969 &(vm->process_info->vm_list_head));
970 vm->process_info->n_vms++;
971 mutex_unlock(&vm->process_info->lock);
978 amdgpu_bo_unreserve(vm->root.base.bo);
980 vm->process_info = NULL;
982 /* Two fence references: one in info and one in *ef */
983 dma_fence_put(&info->eviction_fence->base);
986 *process_info = NULL;
988 create_evict_fence_fail:
989 mutex_destroy(&info->lock);
995 int amdgpu_amdkfd_gpuvm_create_process_vm(struct kgd_dev *kgd, unsigned int pasid,
996 void **vm, void **process_info,
997 struct dma_fence **ef)
999 struct amdgpu_device *adev = get_amdgpu_device(kgd);
1000 struct amdgpu_vm *new_vm;
1003 new_vm = kzalloc(sizeof(*new_vm), GFP_KERNEL);
1007 /* Initialize AMDGPU part of the VM */
1008 ret = amdgpu_vm_init(adev, new_vm, AMDGPU_VM_CONTEXT_COMPUTE, pasid);
1010 pr_err("Failed init vm ret %d\n", ret);
1011 goto amdgpu_vm_init_fail;
1014 /* Initialize KFD part of the VM and process info */
1015 ret = init_kfd_vm(new_vm, process_info, ef);
1017 goto init_kfd_vm_fail;
1019 *vm = (void *) new_vm;
1024 amdgpu_vm_fini(adev, new_vm);
1025 amdgpu_vm_init_fail:
1030 int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct kgd_dev *kgd,
1031 struct file *filp, unsigned int pasid,
1032 void **vm, void **process_info,
1033 struct dma_fence **ef)
1035 struct amdgpu_device *adev = get_amdgpu_device(kgd);
1036 struct drm_file *drm_priv = filp->private_data;
1037 struct amdgpu_fpriv *drv_priv = drm_priv->driver_priv;
1038 struct amdgpu_vm *avm = &drv_priv->vm;
1041 /* Already a compute VM? */
1042 if (avm->process_info)
1045 /* Convert VM into a compute VM */
1046 ret = amdgpu_vm_make_compute(adev, avm, pasid);
1050 /* Initialize KFD part of the VM and process info */
1051 ret = init_kfd_vm(avm, process_info, ef);
1060 void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
1061 struct amdgpu_vm *vm)
1063 struct amdkfd_process_info *process_info = vm->process_info;
1064 struct amdgpu_bo *pd = vm->root.base.bo;
1069 /* Release eviction fence from PD */
1070 amdgpu_bo_reserve(pd, false);
1071 amdgpu_bo_fence(pd, NULL, false);
1072 amdgpu_bo_unreserve(pd);
1074 /* Update process info */
1075 mutex_lock(&process_info->lock);
1076 process_info->n_vms--;
1077 list_del(&vm->vm_list_node);
1078 mutex_unlock(&process_info->lock);
1080 vm->process_info = NULL;
1082 /* Release per-process resources when last compute VM is destroyed */
1083 if (!process_info->n_vms) {
1084 WARN_ON(!list_empty(&process_info->kfd_bo_list));
1085 WARN_ON(!list_empty(&process_info->userptr_valid_list));
1086 WARN_ON(!list_empty(&process_info->userptr_inval_list));
1088 dma_fence_put(&process_info->eviction_fence->base);
1089 cancel_delayed_work_sync(&process_info->restore_userptr_work);
1090 put_pid(process_info->pid);
1091 mutex_destroy(&process_info->lock);
1092 kfree(process_info);
1096 void amdgpu_amdkfd_gpuvm_destroy_process_vm(struct kgd_dev *kgd, void *vm)
1098 struct amdgpu_device *adev = get_amdgpu_device(kgd);
1099 struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1101 if (WARN_ON(!kgd || !vm))
1104 pr_debug("Destroying process vm %p\n", vm);
1106 /* Release the VM context */
1107 amdgpu_vm_fini(adev, avm);
1111 void amdgpu_amdkfd_gpuvm_release_process_vm(struct kgd_dev *kgd, void *vm)
1113 struct amdgpu_device *adev = get_amdgpu_device(kgd);
1114 struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1116 if (WARN_ON(!kgd || !vm))
1119 pr_debug("Releasing process vm %p\n", vm);
1121 /* The original pasid of amdgpu vm has already been
1122 * released during making a amdgpu vm to a compute vm
1123 * The current pasid is managed by kfd and will be
1124 * released on kfd process destroy. Set amdgpu pasid
1125 * to 0 to avoid duplicate release.
1127 amdgpu_vm_release_compute(adev, avm);
1130 uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *vm)
1132 struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1133 struct amdgpu_bo *pd = avm->root.base.bo;
1134 struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
1136 if (adev->asic_type < CHIP_VEGA10)
1137 return avm->pd_phys_addr >> AMDGPU_GPU_PAGE_SHIFT;
1138 return avm->pd_phys_addr;
1141 int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
1142 struct kgd_dev *kgd, uint64_t va, uint64_t size,
1143 void *vm, struct kgd_mem **mem,
1144 uint64_t *offset, uint32_t flags)
1146 struct amdgpu_device *adev = get_amdgpu_device(kgd);
1147 struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1148 enum ttm_bo_type bo_type = ttm_bo_type_device;
1149 struct sg_table *sg = NULL;
1150 uint64_t user_addr = 0;
1151 struct amdgpu_bo *bo;
1152 struct amdgpu_bo_param bp;
1153 u32 domain, alloc_domain;
1158 * Check on which domain to allocate BO
1160 if (flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
1161 domain = alloc_domain = AMDGPU_GEM_DOMAIN_VRAM;
1162 alloc_flags = AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE;
1163 alloc_flags |= (flags & KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC) ?
1164 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED :
1165 AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
1166 } else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_GTT) {
1167 domain = alloc_domain = AMDGPU_GEM_DOMAIN_GTT;
1169 } else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) {
1170 domain = AMDGPU_GEM_DOMAIN_GTT;
1171 alloc_domain = AMDGPU_GEM_DOMAIN_CPU;
1173 if (!offset || !*offset)
1175 user_addr = untagged_addr(*offset);
1176 } else if (flags & (KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL |
1177 KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)) {
1178 domain = AMDGPU_GEM_DOMAIN_GTT;
1179 alloc_domain = AMDGPU_GEM_DOMAIN_CPU;
1180 bo_type = ttm_bo_type_sg;
1182 if (size > UINT_MAX)
1184 sg = create_doorbell_sg(*offset, size);
1191 *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
1196 INIT_LIST_HEAD(&(*mem)->bo_va_list);
1197 mutex_init(&(*mem)->lock);
1198 (*mem)->aql_queue = !!(flags & KFD_IOC_ALLOC_MEM_FLAGS_AQL_QUEUE_MEM);
1200 /* Workaround for AQL queue wraparound bug. Map the same
1201 * memory twice. That means we only actually allocate half
1204 if ((*mem)->aql_queue)
1207 (*mem)->alloc_flags = flags;
1209 amdgpu_sync_create(&(*mem)->sync);
1211 ret = amdgpu_amdkfd_reserve_mem_limit(adev, size, alloc_domain, !!sg);
1213 pr_debug("Insufficient system memory\n");
1214 goto err_reserve_limit;
1217 pr_debug("\tcreate BO VA 0x%llx size 0x%llx domain %s\n",
1218 va, size, domain_string(alloc_domain));
1220 memset(&bp, 0, sizeof(bp));
1223 bp.domain = alloc_domain;
1224 bp.flags = alloc_flags;
1227 ret = amdgpu_bo_create(adev, &bp, &bo);
1229 pr_debug("Failed to create BO on domain %s. ret %d\n",
1230 domain_string(alloc_domain), ret);
1233 if (bo_type == ttm_bo_type_sg) {
1235 bo->tbo.ttm->sg = sg;
1240 bo->flags |= AMDGPU_AMDKFD_USERPTR_BO;
1243 (*mem)->domain = domain;
1244 (*mem)->mapped_to_gpu_memory = 0;
1245 (*mem)->process_info = avm->process_info;
1246 add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, user_addr);
1249 ret = init_user_pages(*mem, user_addr);
1251 goto allocate_init_user_pages_failed;
1255 *offset = amdgpu_bo_mmap_offset(bo);
1259 allocate_init_user_pages_failed:
1260 remove_kgd_mem_from_kfd_bo_list(*mem, avm->process_info);
1261 amdgpu_bo_unref(&bo);
1262 /* Don't unreserve system mem limit twice */
1263 goto err_reserve_limit;
1265 unreserve_mem_limit(adev, size, alloc_domain, !!sg);
1267 mutex_destroy(&(*mem)->lock);
1277 int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
1278 struct kgd_dev *kgd, struct kgd_mem *mem, uint64_t *size)
1280 struct amdkfd_process_info *process_info = mem->process_info;
1281 unsigned long bo_size = mem->bo->tbo.mem.size;
1282 struct kfd_bo_va_list *entry, *tmp;
1283 struct bo_vm_reservation_context ctx;
1284 struct ttm_validate_buffer *bo_list_entry;
1285 unsigned int mapped_to_gpu_memory;
1287 bool is_imported = 0;
1289 mutex_lock(&mem->lock);
1290 mapped_to_gpu_memory = mem->mapped_to_gpu_memory;
1291 is_imported = mem->is_imported;
1292 mutex_unlock(&mem->lock);
1293 /* lock is not needed after this, since mem is unused and will
1297 if (mapped_to_gpu_memory > 0) {
1298 pr_debug("BO VA 0x%llx size 0x%lx is still mapped.\n",
1303 /* Make sure restore workers don't access the BO any more */
1304 bo_list_entry = &mem->validate_list;
1305 mutex_lock(&process_info->lock);
1306 list_del(&bo_list_entry->head);
1307 mutex_unlock(&process_info->lock);
1309 /* No more MMU notifiers */
1310 amdgpu_mn_unregister(mem->bo);
1312 ret = reserve_bo_and_cond_vms(mem, NULL, BO_VM_ALL, &ctx);
1316 /* The eviction fence should be removed by the last unmap.
1317 * TODO: Log an error condition if the bo still has the eviction fence
1320 amdgpu_amdkfd_remove_eviction_fence(mem->bo,
1321 process_info->eviction_fence);
1322 pr_debug("Release VA 0x%llx - 0x%llx\n", mem->va,
1323 mem->va + bo_size * (1 + mem->aql_queue));
1325 /* Remove from VM internal data structures */
1326 list_for_each_entry_safe(entry, tmp, &mem->bo_va_list, bo_list)
1327 remove_bo_from_vm((struct amdgpu_device *)entry->kgd_dev,
1330 ret = unreserve_bo_and_vms(&ctx, false, false);
1332 /* Free the sync object */
1333 amdgpu_sync_free(&mem->sync);
1335 /* If the SG is not NULL, it's one we created for a doorbell or mmio
1336 * remap BO. We need to free it.
1338 if (mem->bo->tbo.sg) {
1339 sg_free_table(mem->bo->tbo.sg);
1340 kfree(mem->bo->tbo.sg);
1343 /* Update the size of the BO being freed if it was allocated from
1344 * VRAM and is not imported.
1347 if ((mem->bo->preferred_domains == AMDGPU_GEM_DOMAIN_VRAM) &&
1355 drm_gem_object_put(&mem->bo->tbo.base);
1356 mutex_destroy(&mem->lock);
1362 int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
1363 struct kgd_dev *kgd, struct kgd_mem *mem, void *vm)
1365 struct amdgpu_device *adev = get_amdgpu_device(kgd);
1366 struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1368 struct amdgpu_bo *bo;
1370 struct kfd_bo_va_list *entry;
1371 struct bo_vm_reservation_context ctx;
1372 struct kfd_bo_va_list *bo_va_entry = NULL;
1373 struct kfd_bo_va_list *bo_va_entry_aql = NULL;
1374 unsigned long bo_size;
1375 bool is_invalid_userptr = false;
1379 pr_err("Invalid BO when mapping memory to GPU\n");
1383 /* Make sure restore is not running concurrently. Since we
1384 * don't map invalid userptr BOs, we rely on the next restore
1385 * worker to do the mapping
1387 mutex_lock(&mem->process_info->lock);
1389 /* Lock mmap-sem. If we find an invalid userptr BO, we can be
1390 * sure that the MMU notifier is no longer running
1391 * concurrently and the queues are actually stopped
1393 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
1394 mmap_write_lock(current->mm);
1395 is_invalid_userptr = atomic_read(&mem->invalid);
1396 mmap_write_unlock(current->mm);
1399 mutex_lock(&mem->lock);
1401 domain = mem->domain;
1402 bo_size = bo->tbo.mem.size;
1404 pr_debug("Map VA 0x%llx - 0x%llx to vm %p domain %s\n",
1406 mem->va + bo_size * (1 + mem->aql_queue),
1407 vm, domain_string(domain));
1409 ret = reserve_bo_and_vm(mem, vm, &ctx);
1413 /* Userptr can be marked as "not invalid", but not actually be
1414 * validated yet (still in the system domain). In that case
1415 * the queues are still stopped and we can leave mapping for
1416 * the next restore worker
1418 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) &&
1419 bo->tbo.mem.mem_type == TTM_PL_SYSTEM)
1420 is_invalid_userptr = true;
1422 if (check_if_add_bo_to_vm(avm, mem)) {
1423 ret = add_bo_to_vm(adev, mem, avm, false,
1426 goto add_bo_to_vm_failed;
1427 if (mem->aql_queue) {
1428 ret = add_bo_to_vm(adev, mem, avm,
1429 true, &bo_va_entry_aql);
1431 goto add_bo_to_vm_failed_aql;
1434 ret = vm_validate_pt_pd_bos(avm);
1436 goto add_bo_to_vm_failed;
1439 if (mem->mapped_to_gpu_memory == 0 &&
1440 !amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
1441 /* Validate BO only once. The eviction fence gets added to BO
1442 * the first time it is mapped. Validate will wait for all
1443 * background evictions to complete.
1445 ret = amdgpu_amdkfd_bo_validate(bo, domain, true);
1447 pr_debug("Validate failed\n");
1448 goto map_bo_to_gpuvm_failed;
1452 list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
1453 if (entry->bo_va->base.vm == vm && !entry->is_mapped) {
1454 pr_debug("\t map VA 0x%llx - 0x%llx in entry %p\n",
1455 entry->va, entry->va + bo_size,
1458 ret = map_bo_to_gpuvm(adev, entry, ctx.sync,
1459 is_invalid_userptr);
1461 pr_err("Failed to map bo to gpuvm\n");
1462 goto map_bo_to_gpuvm_failed;
1465 ret = vm_update_pds(vm, ctx.sync);
1467 pr_err("Failed to update page directories\n");
1468 goto map_bo_to_gpuvm_failed;
1471 entry->is_mapped = true;
1472 mem->mapped_to_gpu_memory++;
1473 pr_debug("\t INC mapping count %d\n",
1474 mem->mapped_to_gpu_memory);
1478 if (!amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) && !bo->pin_count)
1480 &avm->process_info->eviction_fence->base,
1482 ret = unreserve_bo_and_vms(&ctx, false, false);
1486 map_bo_to_gpuvm_failed:
1487 if (bo_va_entry_aql)
1488 remove_bo_from_vm(adev, bo_va_entry_aql, bo_size);
1489 add_bo_to_vm_failed_aql:
1491 remove_bo_from_vm(adev, bo_va_entry, bo_size);
1492 add_bo_to_vm_failed:
1493 unreserve_bo_and_vms(&ctx, false, false);
1495 mutex_unlock(&mem->process_info->lock);
1496 mutex_unlock(&mem->lock);
1500 int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
1501 struct kgd_dev *kgd, struct kgd_mem *mem, void *vm)
1503 struct amdgpu_device *adev = get_amdgpu_device(kgd);
1504 struct amdkfd_process_info *process_info =
1505 ((struct amdgpu_vm *)vm)->process_info;
1506 unsigned long bo_size = mem->bo->tbo.mem.size;
1507 struct kfd_bo_va_list *entry;
1508 struct bo_vm_reservation_context ctx;
1511 mutex_lock(&mem->lock);
1513 ret = reserve_bo_and_cond_vms(mem, vm, BO_VM_MAPPED, &ctx);
1516 /* If no VMs were reserved, it means the BO wasn't actually mapped */
1517 if (ctx.n_vms == 0) {
1522 ret = vm_validate_pt_pd_bos((struct amdgpu_vm *)vm);
1526 pr_debug("Unmap VA 0x%llx - 0x%llx from vm %p\n",
1528 mem->va + bo_size * (1 + mem->aql_queue),
1531 list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
1532 if (entry->bo_va->base.vm == vm && entry->is_mapped) {
1533 pr_debug("\t unmap VA 0x%llx - 0x%llx from entry %p\n",
1535 entry->va + bo_size,
1538 ret = unmap_bo_from_gpuvm(adev, entry, ctx.sync);
1540 entry->is_mapped = false;
1542 pr_err("failed to unmap VA 0x%llx\n",
1547 mem->mapped_to_gpu_memory--;
1548 pr_debug("\t DEC mapping count %d\n",
1549 mem->mapped_to_gpu_memory);
1553 /* If BO is unmapped from all VMs, unfence it. It can be evicted if
1556 if (mem->mapped_to_gpu_memory == 0 &&
1557 !amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm) && !mem->bo->pin_count)
1558 amdgpu_amdkfd_remove_eviction_fence(mem->bo,
1559 process_info->eviction_fence);
1562 unreserve_bo_and_vms(&ctx, false, false);
1564 mutex_unlock(&mem->lock);
1568 int amdgpu_amdkfd_gpuvm_sync_memory(
1569 struct kgd_dev *kgd, struct kgd_mem *mem, bool intr)
1571 struct amdgpu_sync sync;
1574 amdgpu_sync_create(&sync);
1576 mutex_lock(&mem->lock);
1577 amdgpu_sync_clone(&mem->sync, &sync);
1578 mutex_unlock(&mem->lock);
1580 ret = amdgpu_sync_wait(&sync, intr);
1581 amdgpu_sync_free(&sync);
1585 int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_dev *kgd,
1586 struct kgd_mem *mem, void **kptr, uint64_t *size)
1589 struct amdgpu_bo *bo = mem->bo;
1591 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
1592 pr_err("userptr can't be mapped to kernel\n");
1596 /* delete kgd_mem from kfd_bo_list to avoid re-validating
1597 * this BO in BO's restoring after eviction.
1599 mutex_lock(&mem->process_info->lock);
1601 ret = amdgpu_bo_reserve(bo, true);
1603 pr_err("Failed to reserve bo. ret %d\n", ret);
1604 goto bo_reserve_failed;
1607 ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
1609 pr_err("Failed to pin bo. ret %d\n", ret);
1613 ret = amdgpu_bo_kmap(bo, kptr);
1615 pr_err("Failed to map bo to kernel. ret %d\n", ret);
1619 amdgpu_amdkfd_remove_eviction_fence(
1620 bo, mem->process_info->eviction_fence);
1621 list_del_init(&mem->validate_list.head);
1624 *size = amdgpu_bo_size(bo);
1626 amdgpu_bo_unreserve(bo);
1628 mutex_unlock(&mem->process_info->lock);
1632 amdgpu_bo_unpin(bo);
1634 amdgpu_bo_unreserve(bo);
1636 mutex_unlock(&mem->process_info->lock);
1641 int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct kgd_dev *kgd,
1642 struct kfd_vm_fault_info *mem)
1644 struct amdgpu_device *adev;
1646 adev = (struct amdgpu_device *)kgd;
1647 if (atomic_read(&adev->gmc.vm_fault_info_updated) == 1) {
1648 *mem = *adev->gmc.vm_fault_info;
1650 atomic_set(&adev->gmc.vm_fault_info_updated, 0);
1655 int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd,
1656 struct dma_buf *dma_buf,
1657 uint64_t va, void *vm,
1658 struct kgd_mem **mem, uint64_t *size,
1659 uint64_t *mmap_offset)
1661 struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
1662 struct drm_gem_object *obj;
1663 struct amdgpu_bo *bo;
1664 struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1666 if (dma_buf->ops != &amdgpu_dmabuf_ops)
1667 /* Can't handle non-graphics buffers */
1670 obj = dma_buf->priv;
1671 if (obj->dev->dev_private != adev)
1672 /* Can't handle buffers from other devices */
1675 bo = gem_to_amdgpu_bo(obj);
1676 if (!(bo->preferred_domains & (AMDGPU_GEM_DOMAIN_VRAM |
1677 AMDGPU_GEM_DOMAIN_GTT)))
1678 /* Only VRAM and GTT BOs are supported */
1681 *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
1686 *size = amdgpu_bo_size(bo);
1689 *mmap_offset = amdgpu_bo_mmap_offset(bo);
1691 INIT_LIST_HEAD(&(*mem)->bo_va_list);
1692 mutex_init(&(*mem)->lock);
1694 (*mem)->alloc_flags =
1695 ((bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ?
1696 KFD_IOC_ALLOC_MEM_FLAGS_VRAM : KFD_IOC_ALLOC_MEM_FLAGS_GTT)
1697 | KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE
1698 | KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE;
1700 drm_gem_object_get(&bo->tbo.base);
1703 (*mem)->domain = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ?
1704 AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT;
1705 (*mem)->mapped_to_gpu_memory = 0;
1706 (*mem)->process_info = avm->process_info;
1707 add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, false);
1708 amdgpu_sync_create(&(*mem)->sync);
1709 (*mem)->is_imported = true;
1714 /* Evict a userptr BO by stopping the queues if necessary
1716 * Runs in MMU notifier, may be in RECLAIM_FS context. This means it
1717 * cannot do any memory allocations, and cannot take any locks that
1718 * are held elsewhere while allocating memory. Therefore this is as
1719 * simple as possible, using atomic counters.
1721 * It doesn't do anything to the BO itself. The real work happens in
1722 * restore, where we get updated page addresses. This function only
1723 * ensures that GPU access to the BO is stopped.
1725 int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem,
1726 struct mm_struct *mm)
1728 struct amdkfd_process_info *process_info = mem->process_info;
1732 atomic_inc(&mem->invalid);
1733 evicted_bos = atomic_inc_return(&process_info->evicted_bos);
1734 if (evicted_bos == 1) {
1735 /* First eviction, stop the queues */
1736 r = kgd2kfd_quiesce_mm(mm);
1738 pr_err("Failed to quiesce KFD\n");
1739 schedule_delayed_work(&process_info->restore_userptr_work,
1740 msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS));
1746 /* Update invalid userptr BOs
1748 * Moves invalidated (evicted) userptr BOs from userptr_valid_list to
1749 * userptr_inval_list and updates user pages for all BOs that have
1750 * been invalidated since their last update.
1752 static int update_invalid_user_pages(struct amdkfd_process_info *process_info,
1753 struct mm_struct *mm)
1755 struct kgd_mem *mem, *tmp_mem;
1756 struct amdgpu_bo *bo;
1757 struct ttm_operation_ctx ctx = { false, false };
1760 /* Move all invalidated BOs to the userptr_inval_list and
1761 * release their user pages by migration to the CPU domain
1763 list_for_each_entry_safe(mem, tmp_mem,
1764 &process_info->userptr_valid_list,
1765 validate_list.head) {
1766 if (!atomic_read(&mem->invalid))
1767 continue; /* BO is still valid */
1771 if (amdgpu_bo_reserve(bo, true))
1773 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
1774 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
1775 amdgpu_bo_unreserve(bo);
1777 pr_err("%s: Failed to invalidate userptr BO\n",
1782 list_move_tail(&mem->validate_list.head,
1783 &process_info->userptr_inval_list);
1786 if (list_empty(&process_info->userptr_inval_list))
1787 return 0; /* All evicted userptr BOs were freed */
1789 /* Go through userptr_inval_list and update any invalid user_pages */
1790 list_for_each_entry(mem, &process_info->userptr_inval_list,
1791 validate_list.head) {
1792 invalid = atomic_read(&mem->invalid);
1794 /* BO hasn't been invalidated since the last
1795 * revalidation attempt. Keep its BO list.
1801 /* Get updated user pages */
1802 ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages);
1804 pr_debug("%s: Failed to get user pages: %d\n",
1807 /* Return error -EBUSY or -ENOMEM, retry restore */
1812 * FIXME: Cannot ignore the return code, must hold
1815 amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
1817 /* Mark the BO as valid unless it was invalidated
1818 * again concurrently.
1820 if (atomic_cmpxchg(&mem->invalid, invalid, 0) != invalid)
1827 /* Validate invalid userptr BOs
1829 * Validates BOs on the userptr_inval_list, and moves them back to the
1830 * userptr_valid_list. Also updates GPUVM page tables with new page
1831 * addresses and waits for the page table updates to complete.
1833 static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
1835 struct amdgpu_bo_list_entry *pd_bo_list_entries;
1836 struct list_head resv_list, duplicates;
1837 struct ww_acquire_ctx ticket;
1838 struct amdgpu_sync sync;
1840 struct amdgpu_vm *peer_vm;
1841 struct kgd_mem *mem, *tmp_mem;
1842 struct amdgpu_bo *bo;
1843 struct ttm_operation_ctx ctx = { false, false };
1846 pd_bo_list_entries = kcalloc(process_info->n_vms,
1847 sizeof(struct amdgpu_bo_list_entry),
1849 if (!pd_bo_list_entries) {
1850 pr_err("%s: Failed to allocate PD BO list entries\n", __func__);
1855 INIT_LIST_HEAD(&resv_list);
1856 INIT_LIST_HEAD(&duplicates);
1858 /* Get all the page directory BOs that need to be reserved */
1860 list_for_each_entry(peer_vm, &process_info->vm_list_head,
1862 amdgpu_vm_get_pd_bo(peer_vm, &resv_list,
1863 &pd_bo_list_entries[i++]);
1864 /* Add the userptr_inval_list entries to resv_list */
1865 list_for_each_entry(mem, &process_info->userptr_inval_list,
1866 validate_list.head) {
1867 list_add_tail(&mem->resv_list.head, &resv_list);
1868 mem->resv_list.bo = mem->validate_list.bo;
1869 mem->resv_list.num_shared = mem->validate_list.num_shared;
1872 /* Reserve all BOs and page tables for validation */
1873 ret = ttm_eu_reserve_buffers(&ticket, &resv_list, false, &duplicates);
1874 WARN(!list_empty(&duplicates), "Duplicates should be empty");
1878 amdgpu_sync_create(&sync);
1880 ret = process_validate_vms(process_info);
1884 /* Validate BOs and update GPUVM page tables */
1885 list_for_each_entry_safe(mem, tmp_mem,
1886 &process_info->userptr_inval_list,
1887 validate_list.head) {
1888 struct kfd_bo_va_list *bo_va_entry;
1892 /* Validate the BO if we got user pages */
1893 if (bo->tbo.ttm->pages[0]) {
1894 amdgpu_bo_placement_from_domain(bo, mem->domain);
1895 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
1897 pr_err("%s: failed to validate BO\n", __func__);
1902 list_move_tail(&mem->validate_list.head,
1903 &process_info->userptr_valid_list);
1905 /* Update mapping. If the BO was not validated
1906 * (because we couldn't get user pages), this will
1907 * clear the page table entries, which will result in
1908 * VM faults if the GPU tries to access the invalid
1911 list_for_each_entry(bo_va_entry, &mem->bo_va_list, bo_list) {
1912 if (!bo_va_entry->is_mapped)
1915 ret = update_gpuvm_pte((struct amdgpu_device *)
1916 bo_va_entry->kgd_dev,
1917 bo_va_entry, &sync);
1919 pr_err("%s: update PTE failed\n", __func__);
1920 /* make sure this gets validated again */
1921 atomic_inc(&mem->invalid);
1927 /* Update page directories */
1928 ret = process_update_pds(process_info, &sync);
1931 ttm_eu_backoff_reservation(&ticket, &resv_list);
1932 amdgpu_sync_wait(&sync, false);
1933 amdgpu_sync_free(&sync);
1935 kfree(pd_bo_list_entries);
1941 /* Worker callback to restore evicted userptr BOs
1943 * Tries to update and validate all userptr BOs. If successful and no
1944 * concurrent evictions happened, the queues are restarted. Otherwise,
1945 * reschedule for another attempt later.
1947 static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work)
1949 struct delayed_work *dwork = to_delayed_work(work);
1950 struct amdkfd_process_info *process_info =
1951 container_of(dwork, struct amdkfd_process_info,
1952 restore_userptr_work);
1953 struct task_struct *usertask;
1954 struct mm_struct *mm;
1957 evicted_bos = atomic_read(&process_info->evicted_bos);
1961 /* Reference task and mm in case of concurrent process termination */
1962 usertask = get_pid_task(process_info->pid, PIDTYPE_PID);
1965 mm = get_task_mm(usertask);
1967 put_task_struct(usertask);
1971 mutex_lock(&process_info->lock);
1973 if (update_invalid_user_pages(process_info, mm))
1975 /* userptr_inval_list can be empty if all evicted userptr BOs
1976 * have been freed. In that case there is nothing to validate
1977 * and we can just restart the queues.
1979 if (!list_empty(&process_info->userptr_inval_list)) {
1980 if (atomic_read(&process_info->evicted_bos) != evicted_bos)
1981 goto unlock_out; /* Concurrent eviction, try again */
1983 if (validate_invalid_user_pages(process_info))
1986 /* Final check for concurrent evicton and atomic update. If
1987 * another eviction happens after successful update, it will
1988 * be a first eviction that calls quiesce_mm. The eviction
1989 * reference counting inside KFD will handle this case.
1991 if (atomic_cmpxchg(&process_info->evicted_bos, evicted_bos, 0) !=
1995 if (kgd2kfd_resume_mm(mm)) {
1996 pr_err("%s: Failed to resume KFD\n", __func__);
1997 /* No recovery from this failure. Probably the CP is
1998 * hanging. No point trying again.
2003 mutex_unlock(&process_info->lock);
2005 put_task_struct(usertask);
2007 /* If validation failed, reschedule another attempt */
2009 schedule_delayed_work(&process_info->restore_userptr_work,
2010 msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS));
2013 /** amdgpu_amdkfd_gpuvm_restore_process_bos - Restore all BOs for the given
2014 * KFD process identified by process_info
2016 * @process_info: amdkfd_process_info of the KFD process
2018 * After memory eviction, restore thread calls this function. The function
2019 * should be called when the Process is still valid. BO restore involves -
2021 * 1. Release old eviction fence and create new one
2022 * 2. Get two copies of PD BO list from all the VMs. Keep one copy as pd_list.
2023 * 3 Use the second PD list and kfd_bo_list to create a list (ctx.list) of
2024 * BOs that need to be reserved.
2025 * 4. Reserve all the BOs
2026 * 5. Validate of PD and PT BOs.
2027 * 6. Validate all KFD BOs using kfd_bo_list and Map them and add new fence
2028 * 7. Add fence to all PD and PT BOs.
2029 * 8. Unreserve all BOs
2031 int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
2033 struct amdgpu_bo_list_entry *pd_bo_list;
2034 struct amdkfd_process_info *process_info = info;
2035 struct amdgpu_vm *peer_vm;
2036 struct kgd_mem *mem;
2037 struct bo_vm_reservation_context ctx;
2038 struct amdgpu_amdkfd_fence *new_fence;
2040 struct list_head duplicate_save;
2041 struct amdgpu_sync sync_obj;
2043 INIT_LIST_HEAD(&duplicate_save);
2044 INIT_LIST_HEAD(&ctx.list);
2045 INIT_LIST_HEAD(&ctx.duplicates);
2047 pd_bo_list = kcalloc(process_info->n_vms,
2048 sizeof(struct amdgpu_bo_list_entry),
2054 mutex_lock(&process_info->lock);
2055 list_for_each_entry(peer_vm, &process_info->vm_list_head,
2057 amdgpu_vm_get_pd_bo(peer_vm, &ctx.list, &pd_bo_list[i++]);
2059 /* Reserve all BOs and page tables/directory. Add all BOs from
2060 * kfd_bo_list to ctx.list
2062 list_for_each_entry(mem, &process_info->kfd_bo_list,
2063 validate_list.head) {
2065 list_add_tail(&mem->resv_list.head, &ctx.list);
2066 mem->resv_list.bo = mem->validate_list.bo;
2067 mem->resv_list.num_shared = mem->validate_list.num_shared;
2070 ret = ttm_eu_reserve_buffers(&ctx.ticket, &ctx.list,
2071 false, &duplicate_save);
2073 pr_debug("Memory eviction: TTM Reserve Failed. Try again\n");
2074 goto ttm_reserve_fail;
2077 amdgpu_sync_create(&sync_obj);
2079 /* Validate PDs and PTs */
2080 ret = process_validate_vms(process_info);
2082 goto validate_map_fail;
2084 ret = process_sync_pds_resv(process_info, &sync_obj);
2086 pr_debug("Memory eviction: Failed to sync to PD BO moving fence. Try again\n");
2087 goto validate_map_fail;
2090 /* Validate BOs and map them to GPUVM (update VM page tables). */
2091 list_for_each_entry(mem, &process_info->kfd_bo_list,
2092 validate_list.head) {
2094 struct amdgpu_bo *bo = mem->bo;
2095 uint32_t domain = mem->domain;
2096 struct kfd_bo_va_list *bo_va_entry;
2098 ret = amdgpu_amdkfd_bo_validate(bo, domain, false);
2100 pr_debug("Memory eviction: Validate BOs failed. Try again\n");
2101 goto validate_map_fail;
2103 ret = amdgpu_sync_fence(&sync_obj, bo->tbo.moving);
2105 pr_debug("Memory eviction: Sync BO fence failed. Try again\n");
2106 goto validate_map_fail;
2108 list_for_each_entry(bo_va_entry, &mem->bo_va_list,
2110 ret = update_gpuvm_pte((struct amdgpu_device *)
2111 bo_va_entry->kgd_dev,
2115 pr_debug("Memory eviction: update PTE failed. Try again\n");
2116 goto validate_map_fail;
2121 /* Update page directories */
2122 ret = process_update_pds(process_info, &sync_obj);
2124 pr_debug("Memory eviction: update PDs failed. Try again\n");
2125 goto validate_map_fail;
2128 /* Wait for validate and PT updates to finish */
2129 amdgpu_sync_wait(&sync_obj, false);
2131 /* Release old eviction fence and create new one, because fence only
2132 * goes from unsignaled to signaled, fence cannot be reused.
2133 * Use context and mm from the old fence.
2135 new_fence = amdgpu_amdkfd_fence_create(
2136 process_info->eviction_fence->base.context,
2137 process_info->eviction_fence->mm);
2139 pr_err("Failed to create eviction fence\n");
2141 goto validate_map_fail;
2143 dma_fence_put(&process_info->eviction_fence->base);
2144 process_info->eviction_fence = new_fence;
2145 *ef = dma_fence_get(&new_fence->base);
2147 /* Attach new eviction fence to all BOs */
2148 list_for_each_entry(mem, &process_info->kfd_bo_list,
2150 amdgpu_bo_fence(mem->bo,
2151 &process_info->eviction_fence->base, true);
2153 /* Attach eviction fence to PD / PT BOs */
2154 list_for_each_entry(peer_vm, &process_info->vm_list_head,
2156 struct amdgpu_bo *bo = peer_vm->root.base.bo;
2158 amdgpu_bo_fence(bo, &process_info->eviction_fence->base, true);
2162 ttm_eu_backoff_reservation(&ctx.ticket, &ctx.list);
2163 amdgpu_sync_free(&sync_obj);
2165 mutex_unlock(&process_info->lock);
2170 int amdgpu_amdkfd_add_gws_to_process(void *info, void *gws, struct kgd_mem **mem)
2172 struct amdkfd_process_info *process_info = (struct amdkfd_process_info *)info;
2173 struct amdgpu_bo *gws_bo = (struct amdgpu_bo *)gws;
2179 *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
2183 mutex_init(&(*mem)->lock);
2184 INIT_LIST_HEAD(&(*mem)->bo_va_list);
2185 (*mem)->bo = amdgpu_bo_ref(gws_bo);
2186 (*mem)->domain = AMDGPU_GEM_DOMAIN_GWS;
2187 (*mem)->process_info = process_info;
2188 add_kgd_mem_to_kfd_bo_list(*mem, process_info, false);
2189 amdgpu_sync_create(&(*mem)->sync);
2192 /* Validate gws bo the first time it is added to process */
2193 mutex_lock(&(*mem)->process_info->lock);
2194 ret = amdgpu_bo_reserve(gws_bo, false);
2195 if (unlikely(ret)) {
2196 pr_err("Reserve gws bo failed %d\n", ret);
2197 goto bo_reservation_failure;
2200 ret = amdgpu_amdkfd_bo_validate(gws_bo, AMDGPU_GEM_DOMAIN_GWS, true);
2202 pr_err("GWS BO validate failed %d\n", ret);
2203 goto bo_validation_failure;
2205 /* GWS resource is shared b/t amdgpu and amdkfd
2206 * Add process eviction fence to bo so they can
2209 ret = dma_resv_reserve_shared(gws_bo->tbo.base.resv, 1);
2211 goto reserve_shared_fail;
2212 amdgpu_bo_fence(gws_bo, &process_info->eviction_fence->base, true);
2213 amdgpu_bo_unreserve(gws_bo);
2214 mutex_unlock(&(*mem)->process_info->lock);
2218 reserve_shared_fail:
2219 bo_validation_failure:
2220 amdgpu_bo_unreserve(gws_bo);
2221 bo_reservation_failure:
2222 mutex_unlock(&(*mem)->process_info->lock);
2223 amdgpu_sync_free(&(*mem)->sync);
2224 remove_kgd_mem_from_kfd_bo_list(*mem, process_info);
2225 amdgpu_bo_unref(&gws_bo);
2226 mutex_destroy(&(*mem)->lock);
2232 int amdgpu_amdkfd_remove_gws_from_process(void *info, void *mem)
2235 struct amdkfd_process_info *process_info = (struct amdkfd_process_info *)info;
2236 struct kgd_mem *kgd_mem = (struct kgd_mem *)mem;
2237 struct amdgpu_bo *gws_bo = kgd_mem->bo;
2239 /* Remove BO from process's validate list so restore worker won't touch
2242 remove_kgd_mem_from_kfd_bo_list(kgd_mem, process_info);
2244 ret = amdgpu_bo_reserve(gws_bo, false);
2245 if (unlikely(ret)) {
2246 pr_err("Reserve gws bo failed %d\n", ret);
2247 //TODO add BO back to validate_list?
2250 amdgpu_amdkfd_remove_eviction_fence(gws_bo,
2251 process_info->eviction_fence);
2252 amdgpu_bo_unreserve(gws_bo);
2253 amdgpu_sync_free(&kgd_mem->sync);
2254 amdgpu_bo_unref(&gws_bo);
2255 mutex_destroy(&kgd_mem->lock);
2260 /* Returns GPU-specific tiling mode information */
2261 int amdgpu_amdkfd_get_tile_config(struct kgd_dev *kgd,
2262 struct tile_config *config)
2264 struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
2266 config->gb_addr_config = adev->gfx.config.gb_addr_config;
2267 config->tile_config_ptr = adev->gfx.config.tile_mode_array;
2268 config->num_tile_configs =
2269 ARRAY_SIZE(adev->gfx.config.tile_mode_array);
2270 config->macro_tile_config_ptr =
2271 adev->gfx.config.macrotile_mode_array;
2272 config->num_macro_tile_configs =
2273 ARRAY_SIZE(adev->gfx.config.macrotile_mode_array);
2275 /* Those values are not set from GFX9 onwards */
2276 config->num_banks = adev->gfx.config.num_banks;
2277 config->num_ranks = adev->gfx.config.num_ranks;