2 * Copyright 2014-2018 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #define pr_fmt(fmt) "kfd2kgd: " fmt
25 #include <linux/list.h>
26 #include <linux/pagemap.h>
27 #include <linux/sched/mm.h>
28 #include <linux/dma-buf.h>
30 #include "amdgpu_object.h"
31 #include "amdgpu_vm.h"
32 #include "amdgpu_amdkfd.h"
34 /* Special VM and GART address alignment needed for VI pre-Fiji due to
37 #define VI_BO_SIZE_ALIGN (0x8000)
39 /* BO flag to indicate a KFD userptr BO */
40 #define AMDGPU_AMDKFD_USERPTR_BO (1ULL << 63)
42 /* Userptr restore delay, just long enough to allow consecutive VM
43 * changes to accumulate
45 #define AMDGPU_USERPTR_RESTORE_DELAY_MS 1
47 /* Impose limit on how much memory KFD can use */
49 uint64_t max_system_mem_limit;
50 uint64_t max_ttm_mem_limit;
51 int64_t system_mem_used;
53 spinlock_t mem_limit_lock;
56 /* Struct used for amdgpu_amdkfd_bo_validate */
57 struct amdgpu_vm_parser {
62 static const char * const domain_bit_to_string[] = {
71 #define domain_string(domain) domain_bit_to_string[ffs(domain)-1]
73 static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work);
76 static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
78 return (struct amdgpu_device *)kgd;
81 static bool check_if_add_bo_to_vm(struct amdgpu_vm *avm,
84 struct kfd_bo_va_list *entry;
86 list_for_each_entry(entry, &mem->bo_va_list, bo_list)
87 if (entry->bo_va->base.vm == avm)
93 /* Set memory usage limits. Current, limits are
94 * System (TTM + userptr) memory - 3/4th System RAM
95 * TTM memory - 3/8th System RAM
97 void amdgpu_amdkfd_gpuvm_init_mem_limits(void)
103 mem = si.totalram - si.totalhigh;
106 spin_lock_init(&kfd_mem_limit.mem_limit_lock);
107 kfd_mem_limit.max_system_mem_limit = (mem >> 1) + (mem >> 2);
108 kfd_mem_limit.max_ttm_mem_limit = (mem >> 1) - (mem >> 3);
109 pr_debug("Kernel memory limit %lluM, TTM limit %lluM\n",
110 (kfd_mem_limit.max_system_mem_limit >> 20),
111 (kfd_mem_limit.max_ttm_mem_limit >> 20));
114 static int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
115 uint64_t size, u32 domain, bool sg)
117 size_t acc_size, system_mem_needed, ttm_mem_needed, vram_needed;
118 uint64_t reserved_for_pt = amdgpu_amdkfd_total_mem_size >> 9;
121 acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size,
122 sizeof(struct amdgpu_bo));
125 if (domain == AMDGPU_GEM_DOMAIN_GTT) {
127 system_mem_needed = acc_size + size;
128 ttm_mem_needed = acc_size + size;
129 } else if (domain == AMDGPU_GEM_DOMAIN_CPU && !sg) {
131 system_mem_needed = acc_size + size;
132 ttm_mem_needed = acc_size;
135 system_mem_needed = acc_size;
136 ttm_mem_needed = acc_size;
137 if (domain == AMDGPU_GEM_DOMAIN_VRAM)
141 spin_lock(&kfd_mem_limit.mem_limit_lock);
143 if ((kfd_mem_limit.system_mem_used + system_mem_needed >
144 kfd_mem_limit.max_system_mem_limit) ||
145 (kfd_mem_limit.ttm_mem_used + ttm_mem_needed >
146 kfd_mem_limit.max_ttm_mem_limit) ||
147 (adev->kfd.vram_used + vram_needed >
148 adev->gmc.real_vram_size - reserved_for_pt)) {
151 kfd_mem_limit.system_mem_used += system_mem_needed;
152 kfd_mem_limit.ttm_mem_used += ttm_mem_needed;
153 adev->kfd.vram_used += vram_needed;
156 spin_unlock(&kfd_mem_limit.mem_limit_lock);
160 static void unreserve_mem_limit(struct amdgpu_device *adev,
161 uint64_t size, u32 domain, bool sg)
165 acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size,
166 sizeof(struct amdgpu_bo));
168 spin_lock(&kfd_mem_limit.mem_limit_lock);
169 if (domain == AMDGPU_GEM_DOMAIN_GTT) {
170 kfd_mem_limit.system_mem_used -= (acc_size + size);
171 kfd_mem_limit.ttm_mem_used -= (acc_size + size);
172 } else if (domain == AMDGPU_GEM_DOMAIN_CPU && !sg) {
173 kfd_mem_limit.system_mem_used -= (acc_size + size);
174 kfd_mem_limit.ttm_mem_used -= acc_size;
176 kfd_mem_limit.system_mem_used -= acc_size;
177 kfd_mem_limit.ttm_mem_used -= acc_size;
178 if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
179 adev->kfd.vram_used -= size;
180 WARN_ONCE(adev->kfd.vram_used < 0,
181 "kfd VRAM memory accounting unbalanced");
184 WARN_ONCE(kfd_mem_limit.system_mem_used < 0,
185 "kfd system memory accounting unbalanced");
186 WARN_ONCE(kfd_mem_limit.ttm_mem_used < 0,
187 "kfd TTM memory accounting unbalanced");
189 spin_unlock(&kfd_mem_limit.mem_limit_lock);
192 void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo)
194 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
195 u32 domain = bo->preferred_domains;
196 bool sg = (bo->preferred_domains == AMDGPU_GEM_DOMAIN_CPU);
198 if (bo->flags & AMDGPU_AMDKFD_USERPTR_BO) {
199 domain = AMDGPU_GEM_DOMAIN_CPU;
203 unreserve_mem_limit(adev, amdgpu_bo_size(bo), domain, sg);
207 /* amdgpu_amdkfd_remove_eviction_fence - Removes eviction fence from BO's
208 * reservation object.
210 * @bo: [IN] Remove eviction fence(s) from this BO
211 * @ef: [IN] This eviction fence is removed if it
212 * is present in the shared list.
214 * NOTE: Must be called with BO reserved i.e. bo->tbo.resv->lock held.
216 static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
217 struct amdgpu_amdkfd_fence *ef)
219 struct reservation_object *resv = bo->tbo.resv;
220 struct reservation_object_list *old, *new;
221 unsigned int i, j, k;
226 old = reservation_object_get_list(resv);
230 new = kmalloc(offsetof(typeof(*new), shared[old->shared_max]),
235 /* Go through all the shared fences in the resevation object and sort
236 * the interesting ones to the end of the list.
238 for (i = 0, j = old->shared_count, k = 0; i < old->shared_count; ++i) {
241 f = rcu_dereference_protected(old->shared[i],
242 reservation_object_held(resv));
244 if (f->context == ef->base.context)
245 RCU_INIT_POINTER(new->shared[--j], f);
247 RCU_INIT_POINTER(new->shared[k++], f);
249 new->shared_max = old->shared_max;
250 new->shared_count = k;
252 /* Install the new fence list, seqcount provides the barriers */
254 write_seqcount_begin(&resv->seq);
255 RCU_INIT_POINTER(resv->fence, new);
256 write_seqcount_end(&resv->seq);
259 /* Drop the references to the removed fences or move them to ef_list */
260 for (i = j, k = 0; i < old->shared_count; ++i) {
263 f = rcu_dereference_protected(new->shared[i],
264 reservation_object_held(resv));
272 static int amdgpu_amdkfd_bo_validate(struct amdgpu_bo *bo, uint32_t domain,
275 struct ttm_operation_ctx ctx = { false, false };
278 if (WARN(amdgpu_ttm_tt_get_usermm(bo->tbo.ttm),
279 "Called with userptr BO"))
282 amdgpu_bo_placement_from_domain(bo, domain);
284 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
288 amdgpu_bo_sync_wait(bo, AMDGPU_FENCE_OWNER_KFD, false);
294 static int amdgpu_amdkfd_validate(void *param, struct amdgpu_bo *bo)
296 struct amdgpu_vm_parser *p = param;
298 return amdgpu_amdkfd_bo_validate(bo, p->domain, p->wait);
301 /* vm_validate_pt_pd_bos - Validate page table and directory BOs
303 * Page directories are not updated here because huge page handling
304 * during page table updates can invalidate page directory entries
305 * again. Page directories are only updated after updating page
308 static int vm_validate_pt_pd_bos(struct amdgpu_vm *vm)
310 struct amdgpu_bo *pd = vm->root.base.bo;
311 struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
312 struct amdgpu_vm_parser param;
315 param.domain = AMDGPU_GEM_DOMAIN_VRAM;
318 ret = amdgpu_vm_validate_pt_bos(adev, vm, amdgpu_amdkfd_validate,
321 pr_err("amdgpu: failed to validate PT BOs\n");
325 ret = amdgpu_amdkfd_validate(¶m, pd);
327 pr_err("amdgpu: failed to validate PD\n");
331 vm->pd_phys_addr = amdgpu_gmc_pd_addr(vm->root.base.bo);
333 if (vm->use_cpu_for_update) {
334 ret = amdgpu_bo_kmap(pd, NULL);
336 pr_err("amdgpu: failed to kmap PD, ret=%d\n", ret);
344 static int vm_update_pds(struct amdgpu_vm *vm, struct amdgpu_sync *sync)
346 struct amdgpu_bo *pd = vm->root.base.bo;
347 struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
350 ret = amdgpu_vm_update_directories(adev, vm);
354 return amdgpu_sync_fence(NULL, sync, vm->last_update, false);
357 /* add_bo_to_vm - Add a BO to a VM
359 * Everything that needs to bo done only once when a BO is first added
360 * to a VM. It can later be mapped and unmapped many times without
361 * repeating these steps.
363 * 1. Allocate and initialize BO VA entry data structure
364 * 2. Add BO to the VM
365 * 3. Determine ASIC-specific PTE flags
366 * 4. Alloc page tables and directories if needed
367 * 4a. Validate new page tables and directories
369 static int add_bo_to_vm(struct amdgpu_device *adev, struct kgd_mem *mem,
370 struct amdgpu_vm *vm, bool is_aql,
371 struct kfd_bo_va_list **p_bo_va_entry)
374 struct kfd_bo_va_list *bo_va_entry;
375 struct amdgpu_bo *bo = mem->bo;
376 uint64_t va = mem->va;
377 struct list_head *list_bo_va = &mem->bo_va_list;
378 unsigned long bo_size = bo->tbo.mem.size;
381 pr_err("Invalid VA when adding BO to VM\n");
388 bo_va_entry = kzalloc(sizeof(*bo_va_entry), GFP_KERNEL);
392 pr_debug("\t add VA 0x%llx - 0x%llx to vm %p\n", va,
395 /* Add BO to VM internal data structures*/
396 bo_va_entry->bo_va = amdgpu_vm_bo_add(adev, vm, bo);
397 if (!bo_va_entry->bo_va) {
399 pr_err("Failed to add BO object to VM. ret == %d\n",
404 bo_va_entry->va = va;
405 bo_va_entry->pte_flags = amdgpu_gmc_get_pte_flags(adev,
407 bo_va_entry->kgd_dev = (void *)adev;
408 list_add(&bo_va_entry->bo_list, list_bo_va);
411 *p_bo_va_entry = bo_va_entry;
413 /* Allocate validate page tables if needed */
414 ret = vm_validate_pt_pd_bos(vm);
416 pr_err("validate_pt_pd_bos() failed\n");
423 amdgpu_vm_bo_rmv(adev, bo_va_entry->bo_va);
424 list_del(&bo_va_entry->bo_list);
430 static void remove_bo_from_vm(struct amdgpu_device *adev,
431 struct kfd_bo_va_list *entry, unsigned long size)
433 pr_debug("\t remove VA 0x%llx - 0x%llx in entry %p\n",
435 entry->va + size, entry);
436 amdgpu_vm_bo_rmv(adev, entry->bo_va);
437 list_del(&entry->bo_list);
441 static void add_kgd_mem_to_kfd_bo_list(struct kgd_mem *mem,
442 struct amdkfd_process_info *process_info,
445 struct ttm_validate_buffer *entry = &mem->validate_list;
446 struct amdgpu_bo *bo = mem->bo;
448 INIT_LIST_HEAD(&entry->head);
449 entry->num_shared = 1;
450 entry->bo = &bo->tbo;
451 mutex_lock(&process_info->lock);
453 list_add_tail(&entry->head, &process_info->userptr_valid_list);
455 list_add_tail(&entry->head, &process_info->kfd_bo_list);
456 mutex_unlock(&process_info->lock);
459 /* Initializes user pages. It registers the MMU notifier and validates
460 * the userptr BO in the GTT domain.
462 * The BO must already be on the userptr_valid_list. Otherwise an
463 * eviction and restore may happen that leaves the new BO unmapped
464 * with the user mode queues running.
466 * Takes the process_info->lock to protect against concurrent restore
469 * Returns 0 for success, negative errno for errors.
471 static int init_user_pages(struct kgd_mem *mem, struct mm_struct *mm,
474 struct amdkfd_process_info *process_info = mem->process_info;
475 struct amdgpu_bo *bo = mem->bo;
476 struct ttm_operation_ctx ctx = { true, false };
479 mutex_lock(&process_info->lock);
481 ret = amdgpu_ttm_tt_set_userptr(bo->tbo.ttm, user_addr, 0);
483 pr_err("%s: Failed to set userptr: %d\n", __func__, ret);
487 ret = amdgpu_mn_register(bo, user_addr);
489 pr_err("%s: Failed to register MMU notifier: %d\n",
494 ret = amdgpu_ttm_tt_get_user_pages(bo->tbo.ttm, bo->tbo.ttm->pages);
496 pr_err("%s: Failed to get user pages: %d\n", __func__, ret);
500 ret = amdgpu_bo_reserve(bo, true);
502 pr_err("%s: Failed to reserve BO\n", __func__);
505 amdgpu_bo_placement_from_domain(bo, mem->domain);
506 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
508 pr_err("%s: failed to validate BO\n", __func__);
509 amdgpu_bo_unreserve(bo);
512 amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
515 amdgpu_mn_unregister(bo);
517 mutex_unlock(&process_info->lock);
521 /* Reserving a BO and its page table BOs must happen atomically to
522 * avoid deadlocks. Some operations update multiple VMs at once. Track
523 * all the reservation info in a context structure. Optionally a sync
524 * object can track VM updates.
526 struct bo_vm_reservation_context {
527 struct amdgpu_bo_list_entry kfd_bo; /* BO list entry for the KFD BO */
528 unsigned int n_vms; /* Number of VMs reserved */
529 struct amdgpu_bo_list_entry *vm_pd; /* Array of VM BO list entries */
530 struct ww_acquire_ctx ticket; /* Reservation ticket */
531 struct list_head list, duplicates; /* BO lists */
532 struct amdgpu_sync *sync; /* Pointer to sync object */
533 bool reserved; /* Whether BOs are reserved */
537 BO_VM_NOT_MAPPED = 0, /* Match VMs where a BO is not mapped */
538 BO_VM_MAPPED, /* Match VMs where a BO is mapped */
539 BO_VM_ALL, /* Match all VMs a BO was added to */
543 * reserve_bo_and_vm - reserve a BO and a VM unconditionally.
544 * @mem: KFD BO structure.
545 * @vm: the VM to reserve.
546 * @ctx: the struct that will be used in unreserve_bo_and_vms().
548 static int reserve_bo_and_vm(struct kgd_mem *mem,
549 struct amdgpu_vm *vm,
550 struct bo_vm_reservation_context *ctx)
552 struct amdgpu_bo *bo = mem->bo;
557 ctx->reserved = false;
559 ctx->sync = &mem->sync;
561 INIT_LIST_HEAD(&ctx->list);
562 INIT_LIST_HEAD(&ctx->duplicates);
564 ctx->vm_pd = kcalloc(ctx->n_vms, sizeof(*ctx->vm_pd), GFP_KERNEL);
568 ctx->kfd_bo.priority = 0;
569 ctx->kfd_bo.tv.bo = &bo->tbo;
570 ctx->kfd_bo.tv.num_shared = 1;
571 list_add(&ctx->kfd_bo.tv.head, &ctx->list);
573 amdgpu_vm_get_pd_bo(vm, &ctx->list, &ctx->vm_pd[0]);
575 ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list,
576 false, &ctx->duplicates);
578 ctx->reserved = true;
580 pr_err("Failed to reserve buffers in ttm\n");
589 * reserve_bo_and_cond_vms - reserve a BO and some VMs conditionally
590 * @mem: KFD BO structure.
591 * @vm: the VM to reserve. If NULL, then all VMs associated with the BO
592 * is used. Otherwise, a single VM associated with the BO.
593 * @map_type: the mapping status that will be used to filter the VMs.
594 * @ctx: the struct that will be used in unreserve_bo_and_vms().
596 * Returns 0 for success, negative for failure.
598 static int reserve_bo_and_cond_vms(struct kgd_mem *mem,
599 struct amdgpu_vm *vm, enum bo_vm_match map_type,
600 struct bo_vm_reservation_context *ctx)
602 struct amdgpu_bo *bo = mem->bo;
603 struct kfd_bo_va_list *entry;
607 ctx->reserved = false;
610 ctx->sync = &mem->sync;
612 INIT_LIST_HEAD(&ctx->list);
613 INIT_LIST_HEAD(&ctx->duplicates);
615 list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
616 if ((vm && vm != entry->bo_va->base.vm) ||
617 (entry->is_mapped != map_type
618 && map_type != BO_VM_ALL))
624 if (ctx->n_vms != 0) {
625 ctx->vm_pd = kcalloc(ctx->n_vms, sizeof(*ctx->vm_pd),
631 ctx->kfd_bo.priority = 0;
632 ctx->kfd_bo.tv.bo = &bo->tbo;
633 ctx->kfd_bo.tv.num_shared = 1;
634 list_add(&ctx->kfd_bo.tv.head, &ctx->list);
637 list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
638 if ((vm && vm != entry->bo_va->base.vm) ||
639 (entry->is_mapped != map_type
640 && map_type != BO_VM_ALL))
643 amdgpu_vm_get_pd_bo(entry->bo_va->base.vm, &ctx->list,
648 ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list,
649 false, &ctx->duplicates);
651 ctx->reserved = true;
653 pr_err("Failed to reserve buffers in ttm.\n");
664 * unreserve_bo_and_vms - Unreserve BO and VMs from a reservation context
665 * @ctx: Reservation context to unreserve
666 * @wait: Optionally wait for a sync object representing pending VM updates
667 * @intr: Whether the wait is interruptible
669 * Also frees any resources allocated in
670 * reserve_bo_and_(cond_)vm(s). Returns the status from
673 static int unreserve_bo_and_vms(struct bo_vm_reservation_context *ctx,
674 bool wait, bool intr)
679 ret = amdgpu_sync_wait(ctx->sync, intr);
682 ttm_eu_backoff_reservation(&ctx->ticket, &ctx->list);
687 ctx->reserved = false;
693 static int unmap_bo_from_gpuvm(struct amdgpu_device *adev,
694 struct kfd_bo_va_list *entry,
695 struct amdgpu_sync *sync)
697 struct amdgpu_bo_va *bo_va = entry->bo_va;
698 struct amdgpu_vm *vm = bo_va->base.vm;
700 amdgpu_vm_bo_unmap(adev, bo_va, entry->va);
702 amdgpu_vm_clear_freed(adev, vm, &bo_va->last_pt_update);
704 amdgpu_sync_fence(NULL, sync, bo_va->last_pt_update, false);
709 static int update_gpuvm_pte(struct amdgpu_device *adev,
710 struct kfd_bo_va_list *entry,
711 struct amdgpu_sync *sync)
714 struct amdgpu_bo_va *bo_va = entry->bo_va;
716 /* Update the page tables */
717 ret = amdgpu_vm_bo_update(adev, bo_va, false);
719 pr_err("amdgpu_vm_bo_update failed\n");
723 return amdgpu_sync_fence(NULL, sync, bo_va->last_pt_update, false);
726 static int map_bo_to_gpuvm(struct amdgpu_device *adev,
727 struct kfd_bo_va_list *entry, struct amdgpu_sync *sync,
732 /* Set virtual address for the allocation */
733 ret = amdgpu_vm_bo_map(adev, entry->bo_va, entry->va, 0,
734 amdgpu_bo_size(entry->bo_va->base.bo),
737 pr_err("Failed to map VA 0x%llx in vm. ret %d\n",
745 ret = update_gpuvm_pte(adev, entry, sync);
747 pr_err("update_gpuvm_pte() failed\n");
748 goto update_gpuvm_pte_failed;
753 update_gpuvm_pte_failed:
754 unmap_bo_from_gpuvm(adev, entry, sync);
758 static struct sg_table *create_doorbell_sg(uint64_t addr, uint32_t size)
760 struct sg_table *sg = kmalloc(sizeof(*sg), GFP_KERNEL);
764 if (sg_alloc_table(sg, 1, GFP_KERNEL)) {
768 sg->sgl->dma_address = addr;
769 sg->sgl->length = size;
770 #ifdef CONFIG_NEED_SG_DMA_LENGTH
771 sg->sgl->dma_length = size;
776 static int process_validate_vms(struct amdkfd_process_info *process_info)
778 struct amdgpu_vm *peer_vm;
781 list_for_each_entry(peer_vm, &process_info->vm_list_head,
783 ret = vm_validate_pt_pd_bos(peer_vm);
791 static int process_sync_pds_resv(struct amdkfd_process_info *process_info,
792 struct amdgpu_sync *sync)
794 struct amdgpu_vm *peer_vm;
797 list_for_each_entry(peer_vm, &process_info->vm_list_head,
799 struct amdgpu_bo *pd = peer_vm->root.base.bo;
801 ret = amdgpu_sync_resv(NULL,
803 AMDGPU_FENCE_OWNER_UNDEFINED, false);
811 static int process_update_pds(struct amdkfd_process_info *process_info,
812 struct amdgpu_sync *sync)
814 struct amdgpu_vm *peer_vm;
817 list_for_each_entry(peer_vm, &process_info->vm_list_head,
819 ret = vm_update_pds(peer_vm, sync);
827 static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info,
828 struct dma_fence **ef)
830 struct amdkfd_process_info *info = NULL;
833 if (!*process_info) {
834 info = kzalloc(sizeof(*info), GFP_KERNEL);
838 mutex_init(&info->lock);
839 INIT_LIST_HEAD(&info->vm_list_head);
840 INIT_LIST_HEAD(&info->kfd_bo_list);
841 INIT_LIST_HEAD(&info->userptr_valid_list);
842 INIT_LIST_HEAD(&info->userptr_inval_list);
844 info->eviction_fence =
845 amdgpu_amdkfd_fence_create(dma_fence_context_alloc(1),
847 if (!info->eviction_fence) {
848 pr_err("Failed to create eviction fence\n");
850 goto create_evict_fence_fail;
853 info->pid = get_task_pid(current->group_leader, PIDTYPE_PID);
854 atomic_set(&info->evicted_bos, 0);
855 INIT_DELAYED_WORK(&info->restore_userptr_work,
856 amdgpu_amdkfd_restore_userptr_worker);
858 *process_info = info;
859 *ef = dma_fence_get(&info->eviction_fence->base);
862 vm->process_info = *process_info;
864 /* Validate page directory and attach eviction fence */
865 ret = amdgpu_bo_reserve(vm->root.base.bo, true);
867 goto reserve_pd_fail;
868 ret = vm_validate_pt_pd_bos(vm);
870 pr_err("validate_pt_pd_bos() failed\n");
871 goto validate_pd_fail;
873 ret = amdgpu_bo_sync_wait(vm->root.base.bo,
874 AMDGPU_FENCE_OWNER_KFD, false);
877 amdgpu_bo_fence(vm->root.base.bo,
878 &vm->process_info->eviction_fence->base, true);
879 amdgpu_bo_unreserve(vm->root.base.bo);
881 /* Update process info */
882 mutex_lock(&vm->process_info->lock);
883 list_add_tail(&vm->vm_list_node,
884 &(vm->process_info->vm_list_head));
885 vm->process_info->n_vms++;
886 mutex_unlock(&vm->process_info->lock);
892 amdgpu_bo_unreserve(vm->root.base.bo);
894 vm->process_info = NULL;
896 /* Two fence references: one in info and one in *ef */
897 dma_fence_put(&info->eviction_fence->base);
900 *process_info = NULL;
902 create_evict_fence_fail:
903 mutex_destroy(&info->lock);
909 int amdgpu_amdkfd_gpuvm_create_process_vm(struct kgd_dev *kgd, unsigned int pasid,
910 void **vm, void **process_info,
911 struct dma_fence **ef)
913 struct amdgpu_device *adev = get_amdgpu_device(kgd);
914 struct amdgpu_vm *new_vm;
917 new_vm = kzalloc(sizeof(*new_vm), GFP_KERNEL);
921 /* Initialize AMDGPU part of the VM */
922 ret = amdgpu_vm_init(adev, new_vm, AMDGPU_VM_CONTEXT_COMPUTE, pasid);
924 pr_err("Failed init vm ret %d\n", ret);
925 goto amdgpu_vm_init_fail;
928 /* Initialize KFD part of the VM and process info */
929 ret = init_kfd_vm(new_vm, process_info, ef);
931 goto init_kfd_vm_fail;
933 *vm = (void *) new_vm;
938 amdgpu_vm_fini(adev, new_vm);
944 int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct kgd_dev *kgd,
945 struct file *filp, unsigned int pasid,
946 void **vm, void **process_info,
947 struct dma_fence **ef)
949 struct amdgpu_device *adev = get_amdgpu_device(kgd);
950 struct drm_file *drm_priv = filp->private_data;
951 struct amdgpu_fpriv *drv_priv = drm_priv->driver_priv;
952 struct amdgpu_vm *avm = &drv_priv->vm;
955 /* Already a compute VM? */
956 if (avm->process_info)
959 /* Convert VM into a compute VM */
960 ret = amdgpu_vm_make_compute(adev, avm, pasid);
964 /* Initialize KFD part of the VM and process info */
965 ret = init_kfd_vm(avm, process_info, ef);
974 void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
975 struct amdgpu_vm *vm)
977 struct amdkfd_process_info *process_info = vm->process_info;
978 struct amdgpu_bo *pd = vm->root.base.bo;
983 /* Release eviction fence from PD */
984 amdgpu_bo_reserve(pd, false);
985 amdgpu_bo_fence(pd, NULL, false);
986 amdgpu_bo_unreserve(pd);
988 /* Update process info */
989 mutex_lock(&process_info->lock);
990 process_info->n_vms--;
991 list_del(&vm->vm_list_node);
992 mutex_unlock(&process_info->lock);
994 /* Release per-process resources when last compute VM is destroyed */
995 if (!process_info->n_vms) {
996 WARN_ON(!list_empty(&process_info->kfd_bo_list));
997 WARN_ON(!list_empty(&process_info->userptr_valid_list));
998 WARN_ON(!list_empty(&process_info->userptr_inval_list));
1000 dma_fence_put(&process_info->eviction_fence->base);
1001 cancel_delayed_work_sync(&process_info->restore_userptr_work);
1002 put_pid(process_info->pid);
1003 mutex_destroy(&process_info->lock);
1004 kfree(process_info);
1008 void amdgpu_amdkfd_gpuvm_destroy_process_vm(struct kgd_dev *kgd, void *vm)
1010 struct amdgpu_device *adev = get_amdgpu_device(kgd);
1011 struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1013 if (WARN_ON(!kgd || !vm))
1016 pr_debug("Destroying process vm %p\n", vm);
1018 /* Release the VM context */
1019 amdgpu_vm_fini(adev, avm);
1023 void amdgpu_amdkfd_gpuvm_release_process_vm(struct kgd_dev *kgd, void *vm)
1025 struct amdgpu_device *adev = get_amdgpu_device(kgd);
1026 struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1028 if (WARN_ON(!kgd || !vm))
1031 pr_debug("Releasing process vm %p\n", vm);
1033 /* The original pasid of amdgpu vm has already been
1034 * released during making a amdgpu vm to a compute vm
1035 * The current pasid is managed by kfd and will be
1036 * released on kfd process destroy. Set amdgpu pasid
1037 * to 0 to avoid duplicate release.
1039 amdgpu_vm_release_compute(adev, avm);
1042 uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *vm)
1044 struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1045 struct amdgpu_bo *pd = avm->root.base.bo;
1046 struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
1048 if (adev->asic_type < CHIP_VEGA10)
1049 return avm->pd_phys_addr >> AMDGPU_GPU_PAGE_SHIFT;
1050 return avm->pd_phys_addr;
1053 int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
1054 struct kgd_dev *kgd, uint64_t va, uint64_t size,
1055 void *vm, struct kgd_mem **mem,
1056 uint64_t *offset, uint32_t flags)
1058 struct amdgpu_device *adev = get_amdgpu_device(kgd);
1059 struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1060 enum ttm_bo_type bo_type = ttm_bo_type_device;
1061 struct sg_table *sg = NULL;
1062 uint64_t user_addr = 0;
1063 struct amdgpu_bo *bo;
1064 struct amdgpu_bo_param bp;
1066 u32 domain, alloc_domain;
1068 uint32_t mapping_flags;
1072 * Check on which domain to allocate BO
1074 if (flags & ALLOC_MEM_FLAGS_VRAM) {
1075 domain = alloc_domain = AMDGPU_GEM_DOMAIN_VRAM;
1076 alloc_flags = AMDGPU_GEM_CREATE_VRAM_CLEARED;
1077 alloc_flags |= (flags & ALLOC_MEM_FLAGS_PUBLIC) ?
1078 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED :
1079 AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
1080 } else if (flags & ALLOC_MEM_FLAGS_GTT) {
1081 domain = alloc_domain = AMDGPU_GEM_DOMAIN_GTT;
1083 } else if (flags & ALLOC_MEM_FLAGS_USERPTR) {
1084 domain = AMDGPU_GEM_DOMAIN_GTT;
1085 alloc_domain = AMDGPU_GEM_DOMAIN_CPU;
1087 if (!offset || !*offset)
1089 user_addr = *offset;
1090 } else if (flags & ALLOC_MEM_FLAGS_DOORBELL) {
1091 domain = AMDGPU_GEM_DOMAIN_GTT;
1092 alloc_domain = AMDGPU_GEM_DOMAIN_CPU;
1093 bo_type = ttm_bo_type_sg;
1095 if (size > UINT_MAX)
1097 sg = create_doorbell_sg(*offset, size);
1104 *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
1109 INIT_LIST_HEAD(&(*mem)->bo_va_list);
1110 mutex_init(&(*mem)->lock);
1111 (*mem)->aql_queue = !!(flags & ALLOC_MEM_FLAGS_AQL_QUEUE_MEM);
1113 /* Workaround for AQL queue wraparound bug. Map the same
1114 * memory twice. That means we only actually allocate half
1117 if ((*mem)->aql_queue)
1120 /* Workaround for TLB bug on older VI chips */
1121 byte_align = (adev->family == AMDGPU_FAMILY_VI &&
1122 adev->asic_type != CHIP_FIJI &&
1123 adev->asic_type != CHIP_POLARIS10 &&
1124 adev->asic_type != CHIP_POLARIS11 &&
1125 adev->asic_type != CHIP_POLARIS12) ?
1126 VI_BO_SIZE_ALIGN : 1;
1128 mapping_flags = AMDGPU_VM_PAGE_READABLE;
1129 if (flags & ALLOC_MEM_FLAGS_WRITABLE)
1130 mapping_flags |= AMDGPU_VM_PAGE_WRITEABLE;
1131 if (flags & ALLOC_MEM_FLAGS_EXECUTABLE)
1132 mapping_flags |= AMDGPU_VM_PAGE_EXECUTABLE;
1133 if (flags & ALLOC_MEM_FLAGS_COHERENT)
1134 mapping_flags |= AMDGPU_VM_MTYPE_UC;
1136 mapping_flags |= AMDGPU_VM_MTYPE_NC;
1137 (*mem)->mapping_flags = mapping_flags;
1139 amdgpu_sync_create(&(*mem)->sync);
1141 ret = amdgpu_amdkfd_reserve_mem_limit(adev, size, alloc_domain, !!sg);
1143 pr_debug("Insufficient system memory\n");
1144 goto err_reserve_limit;
1147 pr_debug("\tcreate BO VA 0x%llx size 0x%llx domain %s\n",
1148 va, size, domain_string(alloc_domain));
1150 memset(&bp, 0, sizeof(bp));
1152 bp.byte_align = byte_align;
1153 bp.domain = alloc_domain;
1154 bp.flags = alloc_flags;
1157 ret = amdgpu_bo_create(adev, &bp, &bo);
1159 pr_debug("Failed to create BO on domain %s. ret %d\n",
1160 domain_string(alloc_domain), ret);
1163 if (bo_type == ttm_bo_type_sg) {
1165 bo->tbo.ttm->sg = sg;
1170 bo->flags |= AMDGPU_AMDKFD_USERPTR_BO;
1173 (*mem)->domain = domain;
1174 (*mem)->mapped_to_gpu_memory = 0;
1175 (*mem)->process_info = avm->process_info;
1176 add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, user_addr);
1179 ret = init_user_pages(*mem, current->mm, user_addr);
1181 mutex_lock(&avm->process_info->lock);
1182 list_del(&(*mem)->validate_list.head);
1183 mutex_unlock(&avm->process_info->lock);
1184 goto allocate_init_user_pages_failed;
1189 *offset = amdgpu_bo_mmap_offset(bo);
1193 allocate_init_user_pages_failed:
1194 amdgpu_bo_unref(&bo);
1195 /* Don't unreserve system mem limit twice */
1196 goto err_reserve_limit;
1198 unreserve_mem_limit(adev, size, alloc_domain, !!sg);
1200 mutex_destroy(&(*mem)->lock);
1210 int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
1211 struct kgd_dev *kgd, struct kgd_mem *mem)
1213 struct amdkfd_process_info *process_info = mem->process_info;
1214 unsigned long bo_size = mem->bo->tbo.mem.size;
1215 struct kfd_bo_va_list *entry, *tmp;
1216 struct bo_vm_reservation_context ctx;
1217 struct ttm_validate_buffer *bo_list_entry;
1220 mutex_lock(&mem->lock);
1222 if (mem->mapped_to_gpu_memory > 0) {
1223 pr_debug("BO VA 0x%llx size 0x%lx is still mapped.\n",
1225 mutex_unlock(&mem->lock);
1229 mutex_unlock(&mem->lock);
1230 /* lock is not needed after this, since mem is unused and will
1234 /* No more MMU notifiers */
1235 amdgpu_mn_unregister(mem->bo);
1237 /* Make sure restore workers don't access the BO any more */
1238 bo_list_entry = &mem->validate_list;
1239 mutex_lock(&process_info->lock);
1240 list_del(&bo_list_entry->head);
1241 mutex_unlock(&process_info->lock);
1243 ret = reserve_bo_and_cond_vms(mem, NULL, BO_VM_ALL, &ctx);
1247 /* The eviction fence should be removed by the last unmap.
1248 * TODO: Log an error condition if the bo still has the eviction fence
1251 amdgpu_amdkfd_remove_eviction_fence(mem->bo,
1252 process_info->eviction_fence);
1253 pr_debug("Release VA 0x%llx - 0x%llx\n", mem->va,
1254 mem->va + bo_size * (1 + mem->aql_queue));
1256 /* Remove from VM internal data structures */
1257 list_for_each_entry_safe(entry, tmp, &mem->bo_va_list, bo_list)
1258 remove_bo_from_vm((struct amdgpu_device *)entry->kgd_dev,
1261 ret = unreserve_bo_and_vms(&ctx, false, false);
1263 /* Free the sync object */
1264 amdgpu_sync_free(&mem->sync);
1266 /* If the SG is not NULL, it's one we created for a doorbell
1267 * BO. We need to free it.
1269 if (mem->bo->tbo.sg) {
1270 sg_free_table(mem->bo->tbo.sg);
1271 kfree(mem->bo->tbo.sg);
1275 amdgpu_bo_unref(&mem->bo);
1276 mutex_destroy(&mem->lock);
1282 int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
1283 struct kgd_dev *kgd, struct kgd_mem *mem, void *vm)
1285 struct amdgpu_device *adev = get_amdgpu_device(kgd);
1286 struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1288 struct amdgpu_bo *bo;
1290 struct kfd_bo_va_list *entry;
1291 struct bo_vm_reservation_context ctx;
1292 struct kfd_bo_va_list *bo_va_entry = NULL;
1293 struct kfd_bo_va_list *bo_va_entry_aql = NULL;
1294 unsigned long bo_size;
1295 bool is_invalid_userptr = false;
1299 pr_err("Invalid BO when mapping memory to GPU\n");
1303 /* Make sure restore is not running concurrently. Since we
1304 * don't map invalid userptr BOs, we rely on the next restore
1305 * worker to do the mapping
1307 mutex_lock(&mem->process_info->lock);
1309 /* Lock mmap-sem. If we find an invalid userptr BO, we can be
1310 * sure that the MMU notifier is no longer running
1311 * concurrently and the queues are actually stopped
1313 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
1314 down_write(¤t->mm->mmap_sem);
1315 is_invalid_userptr = atomic_read(&mem->invalid);
1316 up_write(¤t->mm->mmap_sem);
1319 mutex_lock(&mem->lock);
1321 domain = mem->domain;
1322 bo_size = bo->tbo.mem.size;
1324 pr_debug("Map VA 0x%llx - 0x%llx to vm %p domain %s\n",
1326 mem->va + bo_size * (1 + mem->aql_queue),
1327 vm, domain_string(domain));
1329 ret = reserve_bo_and_vm(mem, vm, &ctx);
1333 /* Userptr can be marked as "not invalid", but not actually be
1334 * validated yet (still in the system domain). In that case
1335 * the queues are still stopped and we can leave mapping for
1336 * the next restore worker
1338 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) &&
1339 bo->tbo.mem.mem_type == TTM_PL_SYSTEM)
1340 is_invalid_userptr = true;
1342 if (check_if_add_bo_to_vm(avm, mem)) {
1343 ret = add_bo_to_vm(adev, mem, avm, false,
1346 goto add_bo_to_vm_failed;
1347 if (mem->aql_queue) {
1348 ret = add_bo_to_vm(adev, mem, avm,
1349 true, &bo_va_entry_aql);
1351 goto add_bo_to_vm_failed_aql;
1354 ret = vm_validate_pt_pd_bos(avm);
1356 goto add_bo_to_vm_failed;
1359 if (mem->mapped_to_gpu_memory == 0 &&
1360 !amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
1361 /* Validate BO only once. The eviction fence gets added to BO
1362 * the first time it is mapped. Validate will wait for all
1363 * background evictions to complete.
1365 ret = amdgpu_amdkfd_bo_validate(bo, domain, true);
1367 pr_debug("Validate failed\n");
1368 goto map_bo_to_gpuvm_failed;
1372 list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
1373 if (entry->bo_va->base.vm == vm && !entry->is_mapped) {
1374 pr_debug("\t map VA 0x%llx - 0x%llx in entry %p\n",
1375 entry->va, entry->va + bo_size,
1378 ret = map_bo_to_gpuvm(adev, entry, ctx.sync,
1379 is_invalid_userptr);
1381 pr_err("Failed to map radeon bo to gpuvm\n");
1382 goto map_bo_to_gpuvm_failed;
1385 ret = vm_update_pds(vm, ctx.sync);
1387 pr_err("Failed to update page directories\n");
1388 goto map_bo_to_gpuvm_failed;
1391 entry->is_mapped = true;
1392 mem->mapped_to_gpu_memory++;
1393 pr_debug("\t INC mapping count %d\n",
1394 mem->mapped_to_gpu_memory);
1398 if (!amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) && !bo->pin_count)
1400 &avm->process_info->eviction_fence->base,
1402 ret = unreserve_bo_and_vms(&ctx, false, false);
1406 map_bo_to_gpuvm_failed:
1407 if (bo_va_entry_aql)
1408 remove_bo_from_vm(adev, bo_va_entry_aql, bo_size);
1409 add_bo_to_vm_failed_aql:
1411 remove_bo_from_vm(adev, bo_va_entry, bo_size);
1412 add_bo_to_vm_failed:
1413 unreserve_bo_and_vms(&ctx, false, false);
1415 mutex_unlock(&mem->process_info->lock);
1416 mutex_unlock(&mem->lock);
1420 int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
1421 struct kgd_dev *kgd, struct kgd_mem *mem, void *vm)
1423 struct amdgpu_device *adev = get_amdgpu_device(kgd);
1424 struct amdkfd_process_info *process_info =
1425 ((struct amdgpu_vm *)vm)->process_info;
1426 unsigned long bo_size = mem->bo->tbo.mem.size;
1427 struct kfd_bo_va_list *entry;
1428 struct bo_vm_reservation_context ctx;
1431 mutex_lock(&mem->lock);
1433 ret = reserve_bo_and_cond_vms(mem, vm, BO_VM_MAPPED, &ctx);
1436 /* If no VMs were reserved, it means the BO wasn't actually mapped */
1437 if (ctx.n_vms == 0) {
1442 ret = vm_validate_pt_pd_bos((struct amdgpu_vm *)vm);
1446 pr_debug("Unmap VA 0x%llx - 0x%llx from vm %p\n",
1448 mem->va + bo_size * (1 + mem->aql_queue),
1451 list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
1452 if (entry->bo_va->base.vm == vm && entry->is_mapped) {
1453 pr_debug("\t unmap VA 0x%llx - 0x%llx from entry %p\n",
1455 entry->va + bo_size,
1458 ret = unmap_bo_from_gpuvm(adev, entry, ctx.sync);
1460 entry->is_mapped = false;
1462 pr_err("failed to unmap VA 0x%llx\n",
1467 mem->mapped_to_gpu_memory--;
1468 pr_debug("\t DEC mapping count %d\n",
1469 mem->mapped_to_gpu_memory);
1473 /* If BO is unmapped from all VMs, unfence it. It can be evicted if
1476 if (mem->mapped_to_gpu_memory == 0 &&
1477 !amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm) && !mem->bo->pin_count)
1478 amdgpu_amdkfd_remove_eviction_fence(mem->bo,
1479 process_info->eviction_fence);
1482 unreserve_bo_and_vms(&ctx, false, false);
1484 mutex_unlock(&mem->lock);
1488 int amdgpu_amdkfd_gpuvm_sync_memory(
1489 struct kgd_dev *kgd, struct kgd_mem *mem, bool intr)
1491 struct amdgpu_sync sync;
1494 amdgpu_sync_create(&sync);
1496 mutex_lock(&mem->lock);
1497 amdgpu_sync_clone(&mem->sync, &sync);
1498 mutex_unlock(&mem->lock);
1500 ret = amdgpu_sync_wait(&sync, intr);
1501 amdgpu_sync_free(&sync);
1505 int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_dev *kgd,
1506 struct kgd_mem *mem, void **kptr, uint64_t *size)
1509 struct amdgpu_bo *bo = mem->bo;
1511 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
1512 pr_err("userptr can't be mapped to kernel\n");
1516 /* delete kgd_mem from kfd_bo_list to avoid re-validating
1517 * this BO in BO's restoring after eviction.
1519 mutex_lock(&mem->process_info->lock);
1521 ret = amdgpu_bo_reserve(bo, true);
1523 pr_err("Failed to reserve bo. ret %d\n", ret);
1524 goto bo_reserve_failed;
1527 ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
1529 pr_err("Failed to pin bo. ret %d\n", ret);
1533 ret = amdgpu_bo_kmap(bo, kptr);
1535 pr_err("Failed to map bo to kernel. ret %d\n", ret);
1539 amdgpu_amdkfd_remove_eviction_fence(
1540 bo, mem->process_info->eviction_fence);
1541 list_del_init(&mem->validate_list.head);
1544 *size = amdgpu_bo_size(bo);
1546 amdgpu_bo_unreserve(bo);
1548 mutex_unlock(&mem->process_info->lock);
1552 amdgpu_bo_unpin(bo);
1554 amdgpu_bo_unreserve(bo);
1556 mutex_unlock(&mem->process_info->lock);
1561 int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct kgd_dev *kgd,
1562 struct kfd_vm_fault_info *mem)
1564 struct amdgpu_device *adev;
1566 adev = (struct amdgpu_device *)kgd;
1567 if (atomic_read(&adev->gmc.vm_fault_info_updated) == 1) {
1568 *mem = *adev->gmc.vm_fault_info;
1570 atomic_set(&adev->gmc.vm_fault_info_updated, 0);
1575 int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd,
1576 struct dma_buf *dma_buf,
1577 uint64_t va, void *vm,
1578 struct kgd_mem **mem, uint64_t *size,
1579 uint64_t *mmap_offset)
1581 struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
1582 struct drm_gem_object *obj;
1583 struct amdgpu_bo *bo;
1584 struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1586 if (dma_buf->ops != &amdgpu_dmabuf_ops)
1587 /* Can't handle non-graphics buffers */
1590 obj = dma_buf->priv;
1591 if (obj->dev->dev_private != adev)
1592 /* Can't handle buffers from other devices */
1595 bo = gem_to_amdgpu_bo(obj);
1596 if (!(bo->preferred_domains & (AMDGPU_GEM_DOMAIN_VRAM |
1597 AMDGPU_GEM_DOMAIN_GTT)))
1598 /* Only VRAM and GTT BOs are supported */
1601 *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
1606 *size = amdgpu_bo_size(bo);
1609 *mmap_offset = amdgpu_bo_mmap_offset(bo);
1611 INIT_LIST_HEAD(&(*mem)->bo_va_list);
1612 mutex_init(&(*mem)->lock);
1613 (*mem)->mapping_flags =
1614 AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE |
1615 AMDGPU_VM_PAGE_EXECUTABLE | AMDGPU_VM_MTYPE_NC;
1617 (*mem)->bo = amdgpu_bo_ref(bo);
1619 (*mem)->domain = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ?
1620 AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT;
1621 (*mem)->mapped_to_gpu_memory = 0;
1622 (*mem)->process_info = avm->process_info;
1623 add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, false);
1624 amdgpu_sync_create(&(*mem)->sync);
1629 /* Evict a userptr BO by stopping the queues if necessary
1631 * Runs in MMU notifier, may be in RECLAIM_FS context. This means it
1632 * cannot do any memory allocations, and cannot take any locks that
1633 * are held elsewhere while allocating memory. Therefore this is as
1634 * simple as possible, using atomic counters.
1636 * It doesn't do anything to the BO itself. The real work happens in
1637 * restore, where we get updated page addresses. This function only
1638 * ensures that GPU access to the BO is stopped.
1640 int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem,
1641 struct mm_struct *mm)
1643 struct amdkfd_process_info *process_info = mem->process_info;
1644 int invalid, evicted_bos;
1647 invalid = atomic_inc_return(&mem->invalid);
1648 evicted_bos = atomic_inc_return(&process_info->evicted_bos);
1649 if (evicted_bos == 1) {
1650 /* First eviction, stop the queues */
1651 r = kgd2kfd_quiesce_mm(mm);
1653 pr_err("Failed to quiesce KFD\n");
1654 schedule_delayed_work(&process_info->restore_userptr_work,
1655 msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS));
1661 /* Update invalid userptr BOs
1663 * Moves invalidated (evicted) userptr BOs from userptr_valid_list to
1664 * userptr_inval_list and updates user pages for all BOs that have
1665 * been invalidated since their last update.
1667 static int update_invalid_user_pages(struct amdkfd_process_info *process_info,
1668 struct mm_struct *mm)
1670 struct kgd_mem *mem, *tmp_mem;
1671 struct amdgpu_bo *bo;
1672 struct ttm_operation_ctx ctx = { false, false };
1675 /* Move all invalidated BOs to the userptr_inval_list and
1676 * release their user pages by migration to the CPU domain
1678 list_for_each_entry_safe(mem, tmp_mem,
1679 &process_info->userptr_valid_list,
1680 validate_list.head) {
1681 if (!atomic_read(&mem->invalid))
1682 continue; /* BO is still valid */
1686 if (amdgpu_bo_reserve(bo, true))
1688 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
1689 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
1690 amdgpu_bo_unreserve(bo);
1692 pr_err("%s: Failed to invalidate userptr BO\n",
1697 list_move_tail(&mem->validate_list.head,
1698 &process_info->userptr_inval_list);
1701 if (list_empty(&process_info->userptr_inval_list))
1702 return 0; /* All evicted userptr BOs were freed */
1704 /* Go through userptr_inval_list and update any invalid user_pages */
1705 list_for_each_entry(mem, &process_info->userptr_inval_list,
1706 validate_list.head) {
1707 invalid = atomic_read(&mem->invalid);
1709 /* BO hasn't been invalidated since the last
1710 * revalidation attempt. Keep its BO list.
1716 /* Get updated user pages */
1717 ret = amdgpu_ttm_tt_get_user_pages(bo->tbo.ttm,
1718 bo->tbo.ttm->pages);
1720 bo->tbo.ttm->pages[0] = NULL;
1721 pr_info("%s: Failed to get user pages: %d\n",
1723 /* Pretend it succeeded. It will fail later
1724 * with a VM fault if the GPU tries to access
1725 * it. Better than hanging indefinitely with
1726 * stalled user mode queues.
1734 /* Validate invalid userptr BOs
1736 * Validates BOs on the userptr_inval_list, and moves them back to the
1737 * userptr_valid_list. Also updates GPUVM page tables with new page
1738 * addresses and waits for the page table updates to complete.
1740 static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
1742 struct amdgpu_bo_list_entry *pd_bo_list_entries;
1743 struct list_head resv_list, duplicates;
1744 struct ww_acquire_ctx ticket;
1745 struct amdgpu_sync sync;
1747 struct amdgpu_vm *peer_vm;
1748 struct kgd_mem *mem, *tmp_mem;
1749 struct amdgpu_bo *bo;
1750 struct ttm_operation_ctx ctx = { false, false };
1753 pd_bo_list_entries = kcalloc(process_info->n_vms,
1754 sizeof(struct amdgpu_bo_list_entry),
1756 if (!pd_bo_list_entries) {
1757 pr_err("%s: Failed to allocate PD BO list entries\n", __func__);
1762 INIT_LIST_HEAD(&resv_list);
1763 INIT_LIST_HEAD(&duplicates);
1765 /* Get all the page directory BOs that need to be reserved */
1767 list_for_each_entry(peer_vm, &process_info->vm_list_head,
1769 amdgpu_vm_get_pd_bo(peer_vm, &resv_list,
1770 &pd_bo_list_entries[i++]);
1771 /* Add the userptr_inval_list entries to resv_list */
1772 list_for_each_entry(mem, &process_info->userptr_inval_list,
1773 validate_list.head) {
1774 list_add_tail(&mem->resv_list.head, &resv_list);
1775 mem->resv_list.bo = mem->validate_list.bo;
1776 mem->resv_list.num_shared = mem->validate_list.num_shared;
1779 /* Reserve all BOs and page tables for validation */
1780 ret = ttm_eu_reserve_buffers(&ticket, &resv_list, false, &duplicates);
1781 WARN(!list_empty(&duplicates), "Duplicates should be empty");
1785 amdgpu_sync_create(&sync);
1787 ret = process_validate_vms(process_info);
1791 /* Validate BOs and update GPUVM page tables */
1792 list_for_each_entry_safe(mem, tmp_mem,
1793 &process_info->userptr_inval_list,
1794 validate_list.head) {
1795 struct kfd_bo_va_list *bo_va_entry;
1799 /* Validate the BO if we got user pages */
1800 if (bo->tbo.ttm->pages[0]) {
1801 amdgpu_bo_placement_from_domain(bo, mem->domain);
1802 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
1804 pr_err("%s: failed to validate BO\n", __func__);
1809 list_move_tail(&mem->validate_list.head,
1810 &process_info->userptr_valid_list);
1812 /* Stop HMM track the userptr update. We dont check the return
1813 * value for concurrent CPU page table update because we will
1814 * reschedule the restore worker if process_info->evicted_bos
1817 amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
1819 /* Update mapping. If the BO was not validated
1820 * (because we couldn't get user pages), this will
1821 * clear the page table entries, which will result in
1822 * VM faults if the GPU tries to access the invalid
1825 list_for_each_entry(bo_va_entry, &mem->bo_va_list, bo_list) {
1826 if (!bo_va_entry->is_mapped)
1829 ret = update_gpuvm_pte((struct amdgpu_device *)
1830 bo_va_entry->kgd_dev,
1831 bo_va_entry, &sync);
1833 pr_err("%s: update PTE failed\n", __func__);
1834 /* make sure this gets validated again */
1835 atomic_inc(&mem->invalid);
1841 /* Update page directories */
1842 ret = process_update_pds(process_info, &sync);
1845 ttm_eu_backoff_reservation(&ticket, &resv_list);
1846 amdgpu_sync_wait(&sync, false);
1847 amdgpu_sync_free(&sync);
1849 kfree(pd_bo_list_entries);
1851 list_for_each_entry_safe(mem, tmp_mem,
1852 &process_info->userptr_inval_list,
1853 validate_list.head) {
1855 amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
1861 /* Worker callback to restore evicted userptr BOs
1863 * Tries to update and validate all userptr BOs. If successful and no
1864 * concurrent evictions happened, the queues are restarted. Otherwise,
1865 * reschedule for another attempt later.
1867 static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work)
1869 struct delayed_work *dwork = to_delayed_work(work);
1870 struct amdkfd_process_info *process_info =
1871 container_of(dwork, struct amdkfd_process_info,
1872 restore_userptr_work);
1873 struct task_struct *usertask;
1874 struct mm_struct *mm;
1877 evicted_bos = atomic_read(&process_info->evicted_bos);
1881 /* Reference task and mm in case of concurrent process termination */
1882 usertask = get_pid_task(process_info->pid, PIDTYPE_PID);
1885 mm = get_task_mm(usertask);
1887 put_task_struct(usertask);
1891 mutex_lock(&process_info->lock);
1893 if (update_invalid_user_pages(process_info, mm))
1895 /* userptr_inval_list can be empty if all evicted userptr BOs
1896 * have been freed. In that case there is nothing to validate
1897 * and we can just restart the queues.
1899 if (!list_empty(&process_info->userptr_inval_list)) {
1900 if (atomic_read(&process_info->evicted_bos) != evicted_bos)
1901 goto unlock_out; /* Concurrent eviction, try again */
1903 if (validate_invalid_user_pages(process_info))
1906 /* Final check for concurrent evicton and atomic update. If
1907 * another eviction happens after successful update, it will
1908 * be a first eviction that calls quiesce_mm. The eviction
1909 * reference counting inside KFD will handle this case.
1911 if (atomic_cmpxchg(&process_info->evicted_bos, evicted_bos, 0) !=
1915 if (kgd2kfd_resume_mm(mm)) {
1916 pr_err("%s: Failed to resume KFD\n", __func__);
1917 /* No recovery from this failure. Probably the CP is
1918 * hanging. No point trying again.
1922 mutex_unlock(&process_info->lock);
1924 put_task_struct(usertask);
1926 /* If validation failed, reschedule another attempt */
1928 schedule_delayed_work(&process_info->restore_userptr_work,
1929 msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS));
1932 /** amdgpu_amdkfd_gpuvm_restore_process_bos - Restore all BOs for the given
1933 * KFD process identified by process_info
1935 * @process_info: amdkfd_process_info of the KFD process
1937 * After memory eviction, restore thread calls this function. The function
1938 * should be called when the Process is still valid. BO restore involves -
1940 * 1. Release old eviction fence and create new one
1941 * 2. Get two copies of PD BO list from all the VMs. Keep one copy as pd_list.
1942 * 3 Use the second PD list and kfd_bo_list to create a list (ctx.list) of
1943 * BOs that need to be reserved.
1944 * 4. Reserve all the BOs
1945 * 5. Validate of PD and PT BOs.
1946 * 6. Validate all KFD BOs using kfd_bo_list and Map them and add new fence
1947 * 7. Add fence to all PD and PT BOs.
1948 * 8. Unreserve all BOs
1950 int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
1952 struct amdgpu_bo_list_entry *pd_bo_list;
1953 struct amdkfd_process_info *process_info = info;
1954 struct amdgpu_vm *peer_vm;
1955 struct kgd_mem *mem;
1956 struct bo_vm_reservation_context ctx;
1957 struct amdgpu_amdkfd_fence *new_fence;
1959 struct list_head duplicate_save;
1960 struct amdgpu_sync sync_obj;
1962 INIT_LIST_HEAD(&duplicate_save);
1963 INIT_LIST_HEAD(&ctx.list);
1964 INIT_LIST_HEAD(&ctx.duplicates);
1966 pd_bo_list = kcalloc(process_info->n_vms,
1967 sizeof(struct amdgpu_bo_list_entry),
1973 mutex_lock(&process_info->lock);
1974 list_for_each_entry(peer_vm, &process_info->vm_list_head,
1976 amdgpu_vm_get_pd_bo(peer_vm, &ctx.list, &pd_bo_list[i++]);
1978 /* Reserve all BOs and page tables/directory. Add all BOs from
1979 * kfd_bo_list to ctx.list
1981 list_for_each_entry(mem, &process_info->kfd_bo_list,
1982 validate_list.head) {
1984 list_add_tail(&mem->resv_list.head, &ctx.list);
1985 mem->resv_list.bo = mem->validate_list.bo;
1986 mem->resv_list.num_shared = mem->validate_list.num_shared;
1989 ret = ttm_eu_reserve_buffers(&ctx.ticket, &ctx.list,
1990 false, &duplicate_save);
1992 pr_debug("Memory eviction: TTM Reserve Failed. Try again\n");
1993 goto ttm_reserve_fail;
1996 amdgpu_sync_create(&sync_obj);
1998 /* Validate PDs and PTs */
1999 ret = process_validate_vms(process_info);
2001 goto validate_map_fail;
2003 ret = process_sync_pds_resv(process_info, &sync_obj);
2005 pr_debug("Memory eviction: Failed to sync to PD BO moving fence. Try again\n");
2006 goto validate_map_fail;
2009 /* Validate BOs and map them to GPUVM (update VM page tables). */
2010 list_for_each_entry(mem, &process_info->kfd_bo_list,
2011 validate_list.head) {
2013 struct amdgpu_bo *bo = mem->bo;
2014 uint32_t domain = mem->domain;
2015 struct kfd_bo_va_list *bo_va_entry;
2017 ret = amdgpu_amdkfd_bo_validate(bo, domain, false);
2019 pr_debug("Memory eviction: Validate BOs failed. Try again\n");
2020 goto validate_map_fail;
2022 ret = amdgpu_sync_fence(NULL, &sync_obj, bo->tbo.moving, false);
2024 pr_debug("Memory eviction: Sync BO fence failed. Try again\n");
2025 goto validate_map_fail;
2027 list_for_each_entry(bo_va_entry, &mem->bo_va_list,
2029 ret = update_gpuvm_pte((struct amdgpu_device *)
2030 bo_va_entry->kgd_dev,
2034 pr_debug("Memory eviction: update PTE failed. Try again\n");
2035 goto validate_map_fail;
2040 /* Update page directories */
2041 ret = process_update_pds(process_info, &sync_obj);
2043 pr_debug("Memory eviction: update PDs failed. Try again\n");
2044 goto validate_map_fail;
2047 /* Wait for validate and PT updates to finish */
2048 amdgpu_sync_wait(&sync_obj, false);
2050 /* Release old eviction fence and create new one, because fence only
2051 * goes from unsignaled to signaled, fence cannot be reused.
2052 * Use context and mm from the old fence.
2054 new_fence = amdgpu_amdkfd_fence_create(
2055 process_info->eviction_fence->base.context,
2056 process_info->eviction_fence->mm);
2058 pr_err("Failed to create eviction fence\n");
2060 goto validate_map_fail;
2062 dma_fence_put(&process_info->eviction_fence->base);
2063 process_info->eviction_fence = new_fence;
2064 *ef = dma_fence_get(&new_fence->base);
2066 /* Attach new eviction fence to all BOs */
2067 list_for_each_entry(mem, &process_info->kfd_bo_list,
2069 amdgpu_bo_fence(mem->bo,
2070 &process_info->eviction_fence->base, true);
2072 /* Attach eviction fence to PD / PT BOs */
2073 list_for_each_entry(peer_vm, &process_info->vm_list_head,
2075 struct amdgpu_bo *bo = peer_vm->root.base.bo;
2077 amdgpu_bo_fence(bo, &process_info->eviction_fence->base, true);
2081 ttm_eu_backoff_reservation(&ctx.ticket, &ctx.list);
2082 amdgpu_sync_free(&sync_obj);
2084 mutex_unlock(&process_info->lock);