2 * Copyright 2014-2018 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
22 #include <linux/dma-buf.h>
23 #include <linux/list.h>
24 #include <linux/pagemap.h>
25 #include <linux/sched/mm.h>
26 #include <linux/sched/task.h>
28 #include "amdgpu_object.h"
29 #include "amdgpu_gem.h"
30 #include "amdgpu_vm.h"
31 #include "amdgpu_amdkfd.h"
32 #include "amdgpu_dma_buf.h"
33 #include <uapi/linux/kfd_ioctl.h>
34 #include "amdgpu_xgmi.h"
36 /* Userptr restore delay, just long enough to allow consecutive VM
37 * changes to accumulate
39 #define AMDGPU_USERPTR_RESTORE_DELAY_MS 1
41 /* Impose limit on how much memory KFD can use */
43 uint64_t max_system_mem_limit;
44 uint64_t max_ttm_mem_limit;
45 int64_t system_mem_used;
47 spinlock_t mem_limit_lock;
50 static const char * const domain_bit_to_string[] = {
59 #define domain_string(domain) domain_bit_to_string[ffs(domain)-1]
61 static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work);
64 static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
66 return (struct amdgpu_device *)kgd;
69 static bool kfd_mem_is_attached(struct amdgpu_vm *avm,
72 struct kfd_mem_attachment *entry;
74 list_for_each_entry(entry, &mem->attachments, list)
75 if (entry->bo_va->base.vm == avm)
81 /* Set memory usage limits. Current, limits are
82 * System (TTM + userptr) memory - 15/16th System RAM
83 * TTM memory - 3/8th System RAM
85 void amdgpu_amdkfd_gpuvm_init_mem_limits(void)
91 mem = si.freeram - si.freehigh;
94 spin_lock_init(&kfd_mem_limit.mem_limit_lock);
95 kfd_mem_limit.max_system_mem_limit = mem - (mem >> 4);
96 kfd_mem_limit.max_ttm_mem_limit = (mem >> 1) - (mem >> 3);
97 pr_debug("Kernel memory limit %lluM, TTM limit %lluM\n",
98 (kfd_mem_limit.max_system_mem_limit >> 20),
99 (kfd_mem_limit.max_ttm_mem_limit >> 20));
102 void amdgpu_amdkfd_reserve_system_mem(uint64_t size)
104 kfd_mem_limit.system_mem_used += size;
107 /* Estimate page table size needed to represent a given memory size
109 * With 4KB pages, we need one 8 byte PTE for each 4KB of memory
110 * (factor 512, >> 9). With 2MB pages, we need one 8 byte PTE for 2MB
111 * of memory (factor 256K, >> 18). ROCm user mode tries to optimize
112 * for 2MB pages for TLB efficiency. However, small allocations and
113 * fragmented system memory still need some 4KB pages. We choose a
114 * compromise that should work in most cases without reserving too
115 * much memory for page tables unnecessarily (factor 16K, >> 14).
117 #define ESTIMATE_PT_SIZE(mem_size) ((mem_size) >> 14)
119 static size_t amdgpu_amdkfd_acc_size(uint64_t size)
122 size *= sizeof(dma_addr_t) + sizeof(void *);
124 return __roundup_pow_of_two(sizeof(struct amdgpu_bo)) +
125 __roundup_pow_of_two(sizeof(struct ttm_tt)) +
129 static int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
130 uint64_t size, u32 domain, bool sg)
132 uint64_t reserved_for_pt =
133 ESTIMATE_PT_SIZE(amdgpu_amdkfd_total_mem_size);
134 size_t acc_size, system_mem_needed, ttm_mem_needed, vram_needed;
137 acc_size = amdgpu_amdkfd_acc_size(size);
140 if (domain == AMDGPU_GEM_DOMAIN_GTT) {
142 system_mem_needed = acc_size + size;
143 ttm_mem_needed = acc_size + size;
144 } else if (domain == AMDGPU_GEM_DOMAIN_CPU && !sg) {
146 system_mem_needed = acc_size + size;
147 ttm_mem_needed = acc_size;
150 system_mem_needed = acc_size;
151 ttm_mem_needed = acc_size;
152 if (domain == AMDGPU_GEM_DOMAIN_VRAM)
156 spin_lock(&kfd_mem_limit.mem_limit_lock);
158 if (kfd_mem_limit.system_mem_used + system_mem_needed >
159 kfd_mem_limit.max_system_mem_limit)
160 pr_debug("Set no_system_mem_limit=1 if using shared memory\n");
162 if ((kfd_mem_limit.system_mem_used + system_mem_needed >
163 kfd_mem_limit.max_system_mem_limit && !no_system_mem_limit) ||
164 (kfd_mem_limit.ttm_mem_used + ttm_mem_needed >
165 kfd_mem_limit.max_ttm_mem_limit) ||
166 (adev->kfd.vram_used + vram_needed >
167 adev->gmc.real_vram_size - reserved_for_pt)) {
170 kfd_mem_limit.system_mem_used += system_mem_needed;
171 kfd_mem_limit.ttm_mem_used += ttm_mem_needed;
172 adev->kfd.vram_used += vram_needed;
175 spin_unlock(&kfd_mem_limit.mem_limit_lock);
179 static void unreserve_mem_limit(struct amdgpu_device *adev,
180 uint64_t size, u32 domain, bool sg)
184 acc_size = amdgpu_amdkfd_acc_size(size);
186 spin_lock(&kfd_mem_limit.mem_limit_lock);
187 if (domain == AMDGPU_GEM_DOMAIN_GTT) {
188 kfd_mem_limit.system_mem_used -= (acc_size + size);
189 kfd_mem_limit.ttm_mem_used -= (acc_size + size);
190 } else if (domain == AMDGPU_GEM_DOMAIN_CPU && !sg) {
191 kfd_mem_limit.system_mem_used -= (acc_size + size);
192 kfd_mem_limit.ttm_mem_used -= acc_size;
194 kfd_mem_limit.system_mem_used -= acc_size;
195 kfd_mem_limit.ttm_mem_used -= acc_size;
196 if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
197 adev->kfd.vram_used -= size;
198 WARN_ONCE(adev->kfd.vram_used < 0,
199 "kfd VRAM memory accounting unbalanced");
202 WARN_ONCE(kfd_mem_limit.system_mem_used < 0,
203 "kfd system memory accounting unbalanced");
204 WARN_ONCE(kfd_mem_limit.ttm_mem_used < 0,
205 "kfd TTM memory accounting unbalanced");
207 spin_unlock(&kfd_mem_limit.mem_limit_lock);
210 void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo)
212 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
213 u32 domain = bo->preferred_domains;
214 bool sg = (bo->preferred_domains == AMDGPU_GEM_DOMAIN_CPU);
216 if (bo->flags & AMDGPU_AMDKFD_CREATE_USERPTR_BO) {
217 domain = AMDGPU_GEM_DOMAIN_CPU;
221 unreserve_mem_limit(adev, amdgpu_bo_size(bo), domain, sg);
225 /* amdgpu_amdkfd_remove_eviction_fence - Removes eviction fence from BO's
226 * reservation object.
228 * @bo: [IN] Remove eviction fence(s) from this BO
229 * @ef: [IN] This eviction fence is removed if it
230 * is present in the shared list.
232 * NOTE: Must be called with BO reserved i.e. bo->tbo.resv->lock held.
234 static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
235 struct amdgpu_amdkfd_fence *ef)
237 struct dma_resv *resv = bo->tbo.base.resv;
238 struct dma_resv_list *old, *new;
239 unsigned int i, j, k;
244 old = dma_resv_shared_list(resv);
248 new = kmalloc(struct_size(new, shared, old->shared_max), GFP_KERNEL);
252 /* Go through all the shared fences in the resevation object and sort
253 * the interesting ones to the end of the list.
255 for (i = 0, j = old->shared_count, k = 0; i < old->shared_count; ++i) {
258 f = rcu_dereference_protected(old->shared[i],
259 dma_resv_held(resv));
261 if (f->context == ef->base.context)
262 RCU_INIT_POINTER(new->shared[--j], f);
264 RCU_INIT_POINTER(new->shared[k++], f);
266 new->shared_max = old->shared_max;
267 new->shared_count = k;
269 /* Install the new fence list, seqcount provides the barriers */
270 write_seqcount_begin(&resv->seq);
271 RCU_INIT_POINTER(resv->fence, new);
272 write_seqcount_end(&resv->seq);
274 /* Drop the references to the removed fences or move them to ef_list */
275 for (i = j; i < old->shared_count; ++i) {
278 f = rcu_dereference_protected(new->shared[i],
279 dma_resv_held(resv));
287 int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo)
289 struct amdgpu_bo *root = bo;
290 struct amdgpu_vm_bo_base *vm_bo;
291 struct amdgpu_vm *vm;
292 struct amdkfd_process_info *info;
293 struct amdgpu_amdkfd_fence *ef;
296 /* we can always get vm_bo from root PD bo.*/
308 info = vm->process_info;
309 if (!info || !info->eviction_fence)
312 ef = container_of(dma_fence_get(&info->eviction_fence->base),
313 struct amdgpu_amdkfd_fence, base);
315 BUG_ON(!dma_resv_trylock(bo->tbo.base.resv));
316 ret = amdgpu_amdkfd_remove_eviction_fence(bo, ef);
317 dma_resv_unlock(bo->tbo.base.resv);
319 dma_fence_put(&ef->base);
323 static int amdgpu_amdkfd_bo_validate(struct amdgpu_bo *bo, uint32_t domain,
326 struct ttm_operation_ctx ctx = { false, false };
329 if (WARN(amdgpu_ttm_tt_get_usermm(bo->tbo.ttm),
330 "Called with userptr BO"))
333 amdgpu_bo_placement_from_domain(bo, domain);
335 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
339 amdgpu_bo_sync_wait(bo, AMDGPU_FENCE_OWNER_KFD, false);
345 static int amdgpu_amdkfd_validate_vm_bo(void *_unused, struct amdgpu_bo *bo)
347 return amdgpu_amdkfd_bo_validate(bo, bo->allowed_domains, false);
350 /* vm_validate_pt_pd_bos - Validate page table and directory BOs
352 * Page directories are not updated here because huge page handling
353 * during page table updates can invalidate page directory entries
354 * again. Page directories are only updated after updating page
357 static int vm_validate_pt_pd_bos(struct amdgpu_vm *vm)
359 struct amdgpu_bo *pd = vm->root.bo;
360 struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
363 ret = amdgpu_vm_validate_pt_bos(adev, vm, amdgpu_amdkfd_validate_vm_bo, NULL);
365 pr_err("failed to validate PT BOs\n");
369 ret = amdgpu_amdkfd_validate_vm_bo(NULL, pd);
371 pr_err("failed to validate PD\n");
375 vm->pd_phys_addr = amdgpu_gmc_pd_addr(vm->root.bo);
377 if (vm->use_cpu_for_update) {
378 ret = amdgpu_bo_kmap(pd, NULL);
380 pr_err("failed to kmap PD, ret=%d\n", ret);
388 static int vm_update_pds(struct amdgpu_vm *vm, struct amdgpu_sync *sync)
390 struct amdgpu_bo *pd = vm->root.bo;
391 struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
394 ret = amdgpu_vm_update_pdes(adev, vm, false);
398 return amdgpu_sync_fence(sync, vm->last_update);
401 static uint64_t get_pte_flags(struct amdgpu_device *adev, struct kgd_mem *mem)
403 struct amdgpu_device *bo_adev = amdgpu_ttm_adev(mem->bo->tbo.bdev);
404 bool coherent = mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_COHERENT;
405 bool uncached = mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_UNCACHED;
406 uint32_t mapping_flags;
410 mapping_flags = AMDGPU_VM_PAGE_READABLE;
411 if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE)
412 mapping_flags |= AMDGPU_VM_PAGE_WRITEABLE;
413 if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE)
414 mapping_flags |= AMDGPU_VM_PAGE_EXECUTABLE;
416 switch (adev->asic_type) {
418 if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
420 mapping_flags |= coherent ?
421 AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW;
423 mapping_flags |= coherent ?
424 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
426 mapping_flags |= coherent ?
427 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
431 if (coherent && uncached) {
432 if (adev->gmc.xgmi.connected_to_cpu ||
433 !(mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM))
435 mapping_flags |= AMDGPU_VM_MTYPE_UC;
436 } else if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
437 if (bo_adev == adev) {
438 mapping_flags |= coherent ?
439 AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW;
440 if (adev->gmc.xgmi.connected_to_cpu)
443 mapping_flags |= coherent ?
444 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
445 if (amdgpu_xgmi_same_hive(adev, bo_adev))
450 mapping_flags |= coherent ?
451 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
455 mapping_flags |= coherent ?
456 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
459 pte_flags = amdgpu_gem_va_map_flags(adev, mapping_flags);
460 pte_flags |= snoop ? AMDGPU_PTE_SNOOPED : 0;
466 kfd_mem_dmamap_userptr(struct kgd_mem *mem,
467 struct kfd_mem_attachment *attachment)
469 enum dma_data_direction direction =
470 mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ?
471 DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
472 struct ttm_operation_ctx ctx = {.interruptible = true};
473 struct amdgpu_bo *bo = attachment->bo_va->base.bo;
474 struct amdgpu_device *adev = attachment->adev;
475 struct ttm_tt *src_ttm = mem->bo->tbo.ttm;
476 struct ttm_tt *ttm = bo->tbo.ttm;
479 ttm->sg = kmalloc(sizeof(*ttm->sg), GFP_KERNEL);
480 if (unlikely(!ttm->sg))
483 if (WARN_ON(ttm->num_pages != src_ttm->num_pages))
486 /* Same sequence as in amdgpu_ttm_tt_pin_userptr */
487 ret = sg_alloc_table_from_pages(ttm->sg, src_ttm->pages,
489 (u64)ttm->num_pages << PAGE_SHIFT,
494 ret = dma_map_sgtable(adev->dev, ttm->sg, direction, 0);
498 drm_prime_sg_to_dma_addr_array(ttm->sg, ttm->dma_address,
501 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
502 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
509 dma_unmap_sgtable(adev->dev, ttm->sg, direction, 0);
511 pr_err("DMA map userptr failed: %d\n", ret);
512 sg_free_table(ttm->sg);
520 kfd_mem_dmamap_dmabuf(struct kfd_mem_attachment *attachment)
522 struct ttm_operation_ctx ctx = {.interruptible = true};
523 struct amdgpu_bo *bo = attachment->bo_va->base.bo;
525 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
526 return ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
530 kfd_mem_dmamap_attachment(struct kgd_mem *mem,
531 struct kfd_mem_attachment *attachment)
533 switch (attachment->type) {
534 case KFD_MEM_ATT_SHARED:
536 case KFD_MEM_ATT_USERPTR:
537 return kfd_mem_dmamap_userptr(mem, attachment);
538 case KFD_MEM_ATT_DMABUF:
539 return kfd_mem_dmamap_dmabuf(attachment);
547 kfd_mem_dmaunmap_userptr(struct kgd_mem *mem,
548 struct kfd_mem_attachment *attachment)
550 enum dma_data_direction direction =
551 mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ?
552 DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
553 struct ttm_operation_ctx ctx = {.interruptible = false};
554 struct amdgpu_bo *bo = attachment->bo_va->base.bo;
555 struct amdgpu_device *adev = attachment->adev;
556 struct ttm_tt *ttm = bo->tbo.ttm;
558 if (unlikely(!ttm->sg))
561 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
562 ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
564 dma_unmap_sgtable(adev->dev, ttm->sg, direction, 0);
565 sg_free_table(ttm->sg);
570 kfd_mem_dmaunmap_dmabuf(struct kfd_mem_attachment *attachment)
572 struct ttm_operation_ctx ctx = {.interruptible = true};
573 struct amdgpu_bo *bo = attachment->bo_va->base.bo;
575 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
576 ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
580 kfd_mem_dmaunmap_attachment(struct kgd_mem *mem,
581 struct kfd_mem_attachment *attachment)
583 switch (attachment->type) {
584 case KFD_MEM_ATT_SHARED:
586 case KFD_MEM_ATT_USERPTR:
587 kfd_mem_dmaunmap_userptr(mem, attachment);
589 case KFD_MEM_ATT_DMABUF:
590 kfd_mem_dmaunmap_dmabuf(attachment);
598 kfd_mem_attach_userptr(struct amdgpu_device *adev, struct kgd_mem *mem,
599 struct amdgpu_bo **bo)
601 unsigned long bo_size = mem->bo->tbo.base.size;
602 struct drm_gem_object *gobj;
605 ret = amdgpu_bo_reserve(mem->bo, false);
609 ret = amdgpu_gem_object_create(adev, bo_size, 1,
610 AMDGPU_GEM_DOMAIN_CPU,
611 AMDGPU_GEM_CREATE_PREEMPTIBLE,
612 ttm_bo_type_sg, mem->bo->tbo.base.resv,
614 amdgpu_bo_unreserve(mem->bo);
618 *bo = gem_to_amdgpu_bo(gobj);
619 (*bo)->parent = amdgpu_bo_ref(mem->bo);
625 kfd_mem_attach_dmabuf(struct amdgpu_device *adev, struct kgd_mem *mem,
626 struct amdgpu_bo **bo)
628 struct drm_gem_object *gobj;
632 mem->dmabuf = amdgpu_gem_prime_export(&mem->bo->tbo.base,
633 mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ?
635 if (IS_ERR(mem->dmabuf)) {
636 ret = PTR_ERR(mem->dmabuf);
642 gobj = amdgpu_gem_prime_import(adev_to_drm(adev), mem->dmabuf);
644 return PTR_ERR(gobj);
646 /* Import takes an extra reference on the dmabuf. Drop it now to
647 * avoid leaking it. We only need the one reference in
650 dma_buf_put(mem->dmabuf);
652 *bo = gem_to_amdgpu_bo(gobj);
653 (*bo)->flags |= AMDGPU_GEM_CREATE_PREEMPTIBLE;
654 (*bo)->parent = amdgpu_bo_ref(mem->bo);
659 /* kfd_mem_attach - Add a BO to a VM
661 * Everything that needs to bo done only once when a BO is first added
662 * to a VM. It can later be mapped and unmapped many times without
663 * repeating these steps.
665 * 0. Create BO for DMA mapping, if needed
666 * 1. Allocate and initialize BO VA entry data structure
667 * 2. Add BO to the VM
668 * 3. Determine ASIC-specific PTE flags
669 * 4. Alloc page tables and directories if needed
670 * 4a. Validate new page tables and directories
672 static int kfd_mem_attach(struct amdgpu_device *adev, struct kgd_mem *mem,
673 struct amdgpu_vm *vm, bool is_aql)
675 struct amdgpu_device *bo_adev = amdgpu_ttm_adev(mem->bo->tbo.bdev);
676 unsigned long bo_size = mem->bo->tbo.base.size;
677 uint64_t va = mem->va;
678 struct kfd_mem_attachment *attachment[2] = {NULL, NULL};
679 struct amdgpu_bo *bo[2] = {NULL, NULL};
683 pr_err("Invalid VA when adding BO to VM\n");
687 for (i = 0; i <= is_aql; i++) {
688 attachment[i] = kzalloc(sizeof(*attachment[i]), GFP_KERNEL);
689 if (unlikely(!attachment[i])) {
694 pr_debug("\t add VA 0x%llx - 0x%llx to vm %p\n", va,
697 if (adev == bo_adev || (mem->domain == AMDGPU_GEM_DOMAIN_VRAM &&
698 amdgpu_xgmi_same_hive(adev, bo_adev))) {
699 /* Mappings on the local GPU and VRAM mappings in the
700 * local hive share the original BO
702 attachment[i]->type = KFD_MEM_ATT_SHARED;
704 drm_gem_object_get(&bo[i]->tbo.base);
706 /* Multiple mappings on the same GPU share the BO */
707 attachment[i]->type = KFD_MEM_ATT_SHARED;
709 drm_gem_object_get(&bo[i]->tbo.base);
710 } else if (amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm)) {
711 /* Create an SG BO to DMA-map userptrs on other GPUs */
712 attachment[i]->type = KFD_MEM_ATT_USERPTR;
713 ret = kfd_mem_attach_userptr(adev, mem, &bo[i]);
716 } else if (mem->domain == AMDGPU_GEM_DOMAIN_GTT &&
717 mem->bo->tbo.type != ttm_bo_type_sg) {
718 /* GTT BOs use DMA-mapping ability of dynamic-attach
719 * DMA bufs. TODO: The same should work for VRAM on
722 attachment[i]->type = KFD_MEM_ATT_DMABUF;
723 ret = kfd_mem_attach_dmabuf(adev, mem, &bo[i]);
727 /* FIXME: Need to DMA-map other BO types:
728 * large-BAR VRAM, doorbells, MMIO remap
730 attachment[i]->type = KFD_MEM_ATT_SHARED;
732 drm_gem_object_get(&bo[i]->tbo.base);
735 /* Add BO to VM internal data structures */
736 attachment[i]->bo_va = amdgpu_vm_bo_add(adev, vm, bo[i]);
737 if (unlikely(!attachment[i]->bo_va)) {
739 pr_err("Failed to add BO object to VM. ret == %d\n",
744 attachment[i]->va = va;
745 attachment[i]->pte_flags = get_pte_flags(adev, mem);
746 attachment[i]->adev = adev;
747 list_add(&attachment[i]->list, &mem->attachments);
755 for (; i >= 0; i--) {
758 if (attachment[i]->bo_va) {
759 amdgpu_vm_bo_rmv(adev, attachment[i]->bo_va);
760 list_del(&attachment[i]->list);
763 drm_gem_object_put(&bo[i]->tbo.base);
764 kfree(attachment[i]);
769 static void kfd_mem_detach(struct kfd_mem_attachment *attachment)
771 struct amdgpu_bo *bo = attachment->bo_va->base.bo;
773 pr_debug("\t remove VA 0x%llx in entry %p\n",
774 attachment->va, attachment);
775 amdgpu_vm_bo_rmv(attachment->adev, attachment->bo_va);
776 drm_gem_object_put(&bo->tbo.base);
777 list_del(&attachment->list);
781 static void add_kgd_mem_to_kfd_bo_list(struct kgd_mem *mem,
782 struct amdkfd_process_info *process_info,
785 struct ttm_validate_buffer *entry = &mem->validate_list;
786 struct amdgpu_bo *bo = mem->bo;
788 INIT_LIST_HEAD(&entry->head);
789 entry->num_shared = 1;
790 entry->bo = &bo->tbo;
791 mutex_lock(&process_info->lock);
793 list_add_tail(&entry->head, &process_info->userptr_valid_list);
795 list_add_tail(&entry->head, &process_info->kfd_bo_list);
796 mutex_unlock(&process_info->lock);
799 static void remove_kgd_mem_from_kfd_bo_list(struct kgd_mem *mem,
800 struct amdkfd_process_info *process_info)
802 struct ttm_validate_buffer *bo_list_entry;
804 bo_list_entry = &mem->validate_list;
805 mutex_lock(&process_info->lock);
806 list_del(&bo_list_entry->head);
807 mutex_unlock(&process_info->lock);
810 /* Initializes user pages. It registers the MMU notifier and validates
811 * the userptr BO in the GTT domain.
813 * The BO must already be on the userptr_valid_list. Otherwise an
814 * eviction and restore may happen that leaves the new BO unmapped
815 * with the user mode queues running.
817 * Takes the process_info->lock to protect against concurrent restore
820 * Returns 0 for success, negative errno for errors.
822 static int init_user_pages(struct kgd_mem *mem, uint64_t user_addr)
824 struct amdkfd_process_info *process_info = mem->process_info;
825 struct amdgpu_bo *bo = mem->bo;
826 struct ttm_operation_ctx ctx = { true, false };
829 mutex_lock(&process_info->lock);
831 ret = amdgpu_ttm_tt_set_userptr(&bo->tbo, user_addr, 0);
833 pr_err("%s: Failed to set userptr: %d\n", __func__, ret);
837 ret = amdgpu_mn_register(bo, user_addr);
839 pr_err("%s: Failed to register MMU notifier: %d\n",
844 ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages);
846 pr_err("%s: Failed to get user pages: %d\n", __func__, ret);
850 ret = amdgpu_bo_reserve(bo, true);
852 pr_err("%s: Failed to reserve BO\n", __func__);
855 amdgpu_bo_placement_from_domain(bo, mem->domain);
856 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
858 pr_err("%s: failed to validate BO\n", __func__);
859 amdgpu_bo_unreserve(bo);
862 amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
865 amdgpu_mn_unregister(bo);
867 mutex_unlock(&process_info->lock);
871 /* Reserving a BO and its page table BOs must happen atomically to
872 * avoid deadlocks. Some operations update multiple VMs at once. Track
873 * all the reservation info in a context structure. Optionally a sync
874 * object can track VM updates.
876 struct bo_vm_reservation_context {
877 struct amdgpu_bo_list_entry kfd_bo; /* BO list entry for the KFD BO */
878 unsigned int n_vms; /* Number of VMs reserved */
879 struct amdgpu_bo_list_entry *vm_pd; /* Array of VM BO list entries */
880 struct ww_acquire_ctx ticket; /* Reservation ticket */
881 struct list_head list, duplicates; /* BO lists */
882 struct amdgpu_sync *sync; /* Pointer to sync object */
883 bool reserved; /* Whether BOs are reserved */
887 BO_VM_NOT_MAPPED = 0, /* Match VMs where a BO is not mapped */
888 BO_VM_MAPPED, /* Match VMs where a BO is mapped */
889 BO_VM_ALL, /* Match all VMs a BO was added to */
893 * reserve_bo_and_vm - reserve a BO and a VM unconditionally.
894 * @mem: KFD BO structure.
895 * @vm: the VM to reserve.
896 * @ctx: the struct that will be used in unreserve_bo_and_vms().
898 static int reserve_bo_and_vm(struct kgd_mem *mem,
899 struct amdgpu_vm *vm,
900 struct bo_vm_reservation_context *ctx)
902 struct amdgpu_bo *bo = mem->bo;
907 ctx->reserved = false;
909 ctx->sync = &mem->sync;
911 INIT_LIST_HEAD(&ctx->list);
912 INIT_LIST_HEAD(&ctx->duplicates);
914 ctx->vm_pd = kcalloc(ctx->n_vms, sizeof(*ctx->vm_pd), GFP_KERNEL);
918 ctx->kfd_bo.priority = 0;
919 ctx->kfd_bo.tv.bo = &bo->tbo;
920 ctx->kfd_bo.tv.num_shared = 1;
921 list_add(&ctx->kfd_bo.tv.head, &ctx->list);
923 amdgpu_vm_get_pd_bo(vm, &ctx->list, &ctx->vm_pd[0]);
925 ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list,
926 false, &ctx->duplicates);
928 pr_err("Failed to reserve buffers in ttm.\n");
934 ctx->reserved = true;
939 * reserve_bo_and_cond_vms - reserve a BO and some VMs conditionally
940 * @mem: KFD BO structure.
941 * @vm: the VM to reserve. If NULL, then all VMs associated with the BO
942 * is used. Otherwise, a single VM associated with the BO.
943 * @map_type: the mapping status that will be used to filter the VMs.
944 * @ctx: the struct that will be used in unreserve_bo_and_vms().
946 * Returns 0 for success, negative for failure.
948 static int reserve_bo_and_cond_vms(struct kgd_mem *mem,
949 struct amdgpu_vm *vm, enum bo_vm_match map_type,
950 struct bo_vm_reservation_context *ctx)
952 struct amdgpu_bo *bo = mem->bo;
953 struct kfd_mem_attachment *entry;
957 ctx->reserved = false;
960 ctx->sync = &mem->sync;
962 INIT_LIST_HEAD(&ctx->list);
963 INIT_LIST_HEAD(&ctx->duplicates);
965 list_for_each_entry(entry, &mem->attachments, list) {
966 if ((vm && vm != entry->bo_va->base.vm) ||
967 (entry->is_mapped != map_type
968 && map_type != BO_VM_ALL))
974 if (ctx->n_vms != 0) {
975 ctx->vm_pd = kcalloc(ctx->n_vms, sizeof(*ctx->vm_pd),
981 ctx->kfd_bo.priority = 0;
982 ctx->kfd_bo.tv.bo = &bo->tbo;
983 ctx->kfd_bo.tv.num_shared = 1;
984 list_add(&ctx->kfd_bo.tv.head, &ctx->list);
987 list_for_each_entry(entry, &mem->attachments, list) {
988 if ((vm && vm != entry->bo_va->base.vm) ||
989 (entry->is_mapped != map_type
990 && map_type != BO_VM_ALL))
993 amdgpu_vm_get_pd_bo(entry->bo_va->base.vm, &ctx->list,
998 ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list,
999 false, &ctx->duplicates);
1001 pr_err("Failed to reserve buffers in ttm.\n");
1007 ctx->reserved = true;
1012 * unreserve_bo_and_vms - Unreserve BO and VMs from a reservation context
1013 * @ctx: Reservation context to unreserve
1014 * @wait: Optionally wait for a sync object representing pending VM updates
1015 * @intr: Whether the wait is interruptible
1017 * Also frees any resources allocated in
1018 * reserve_bo_and_(cond_)vm(s). Returns the status from
1021 static int unreserve_bo_and_vms(struct bo_vm_reservation_context *ctx,
1022 bool wait, bool intr)
1027 ret = amdgpu_sync_wait(ctx->sync, intr);
1030 ttm_eu_backoff_reservation(&ctx->ticket, &ctx->list);
1035 ctx->reserved = false;
1041 static void unmap_bo_from_gpuvm(struct kgd_mem *mem,
1042 struct kfd_mem_attachment *entry,
1043 struct amdgpu_sync *sync)
1045 struct amdgpu_bo_va *bo_va = entry->bo_va;
1046 struct amdgpu_device *adev = entry->adev;
1047 struct amdgpu_vm *vm = bo_va->base.vm;
1049 amdgpu_vm_bo_unmap(adev, bo_va, entry->va);
1051 amdgpu_vm_clear_freed(adev, vm, &bo_va->last_pt_update);
1053 amdgpu_sync_fence(sync, bo_va->last_pt_update);
1055 kfd_mem_dmaunmap_attachment(mem, entry);
1058 static int update_gpuvm_pte(struct kgd_mem *mem,
1059 struct kfd_mem_attachment *entry,
1060 struct amdgpu_sync *sync,
1063 struct amdgpu_bo_va *bo_va = entry->bo_va;
1064 struct amdgpu_device *adev = entry->adev;
1067 ret = kfd_mem_dmamap_attachment(mem, entry);
1071 /* Update the page tables */
1072 ret = amdgpu_vm_bo_update(adev, bo_va, false, table_freed);
1074 pr_err("amdgpu_vm_bo_update failed\n");
1078 return amdgpu_sync_fence(sync, bo_va->last_pt_update);
1081 static int map_bo_to_gpuvm(struct kgd_mem *mem,
1082 struct kfd_mem_attachment *entry,
1083 struct amdgpu_sync *sync,
1089 /* Set virtual address for the allocation */
1090 ret = amdgpu_vm_bo_map(entry->adev, entry->bo_va, entry->va, 0,
1091 amdgpu_bo_size(entry->bo_va->base.bo),
1094 pr_err("Failed to map VA 0x%llx in vm. ret %d\n",
1102 ret = update_gpuvm_pte(mem, entry, sync, table_freed);
1104 pr_err("update_gpuvm_pte() failed\n");
1105 goto update_gpuvm_pte_failed;
1110 update_gpuvm_pte_failed:
1111 unmap_bo_from_gpuvm(mem, entry, sync);
1115 static struct sg_table *create_doorbell_sg(uint64_t addr, uint32_t size)
1117 struct sg_table *sg = kmalloc(sizeof(*sg), GFP_KERNEL);
1121 if (sg_alloc_table(sg, 1, GFP_KERNEL)) {
1125 sg->sgl->dma_address = addr;
1126 sg->sgl->length = size;
1127 #ifdef CONFIG_NEED_SG_DMA_LENGTH
1128 sg->sgl->dma_length = size;
1133 static int process_validate_vms(struct amdkfd_process_info *process_info)
1135 struct amdgpu_vm *peer_vm;
1138 list_for_each_entry(peer_vm, &process_info->vm_list_head,
1140 ret = vm_validate_pt_pd_bos(peer_vm);
1148 static int process_sync_pds_resv(struct amdkfd_process_info *process_info,
1149 struct amdgpu_sync *sync)
1151 struct amdgpu_vm *peer_vm;
1154 list_for_each_entry(peer_vm, &process_info->vm_list_head,
1156 struct amdgpu_bo *pd = peer_vm->root.bo;
1158 ret = amdgpu_sync_resv(NULL, sync, pd->tbo.base.resv,
1159 AMDGPU_SYNC_NE_OWNER,
1160 AMDGPU_FENCE_OWNER_KFD);
1168 static int process_update_pds(struct amdkfd_process_info *process_info,
1169 struct amdgpu_sync *sync)
1171 struct amdgpu_vm *peer_vm;
1174 list_for_each_entry(peer_vm, &process_info->vm_list_head,
1176 ret = vm_update_pds(peer_vm, sync);
1184 static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info,
1185 struct dma_fence **ef)
1187 struct amdkfd_process_info *info = NULL;
1190 if (!*process_info) {
1191 info = kzalloc(sizeof(*info), GFP_KERNEL);
1195 mutex_init(&info->lock);
1196 INIT_LIST_HEAD(&info->vm_list_head);
1197 INIT_LIST_HEAD(&info->kfd_bo_list);
1198 INIT_LIST_HEAD(&info->userptr_valid_list);
1199 INIT_LIST_HEAD(&info->userptr_inval_list);
1201 info->eviction_fence =
1202 amdgpu_amdkfd_fence_create(dma_fence_context_alloc(1),
1205 if (!info->eviction_fence) {
1206 pr_err("Failed to create eviction fence\n");
1208 goto create_evict_fence_fail;
1211 info->pid = get_task_pid(current->group_leader, PIDTYPE_PID);
1212 atomic_set(&info->evicted_bos, 0);
1213 INIT_DELAYED_WORK(&info->restore_userptr_work,
1214 amdgpu_amdkfd_restore_userptr_worker);
1216 *process_info = info;
1217 *ef = dma_fence_get(&info->eviction_fence->base);
1220 vm->process_info = *process_info;
1222 /* Validate page directory and attach eviction fence */
1223 ret = amdgpu_bo_reserve(vm->root.bo, true);
1225 goto reserve_pd_fail;
1226 ret = vm_validate_pt_pd_bos(vm);
1228 pr_err("validate_pt_pd_bos() failed\n");
1229 goto validate_pd_fail;
1231 ret = amdgpu_bo_sync_wait(vm->root.bo,
1232 AMDGPU_FENCE_OWNER_KFD, false);
1235 ret = dma_resv_reserve_shared(vm->root.bo->tbo.base.resv, 1);
1237 goto reserve_shared_fail;
1238 amdgpu_bo_fence(vm->root.bo,
1239 &vm->process_info->eviction_fence->base, true);
1240 amdgpu_bo_unreserve(vm->root.bo);
1242 /* Update process info */
1243 mutex_lock(&vm->process_info->lock);
1244 list_add_tail(&vm->vm_list_node,
1245 &(vm->process_info->vm_list_head));
1246 vm->process_info->n_vms++;
1247 mutex_unlock(&vm->process_info->lock);
1251 reserve_shared_fail:
1254 amdgpu_bo_unreserve(vm->root.bo);
1256 vm->process_info = NULL;
1258 /* Two fence references: one in info and one in *ef */
1259 dma_fence_put(&info->eviction_fence->base);
1262 *process_info = NULL;
1264 create_evict_fence_fail:
1265 mutex_destroy(&info->lock);
1271 int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct kgd_dev *kgd,
1272 struct file *filp, u32 pasid,
1273 void **process_info,
1274 struct dma_fence **ef)
1276 struct amdgpu_device *adev = get_amdgpu_device(kgd);
1277 struct amdgpu_fpriv *drv_priv;
1278 struct amdgpu_vm *avm;
1281 ret = amdgpu_file_to_fpriv(filp, &drv_priv);
1284 avm = &drv_priv->vm;
1286 /* Already a compute VM? */
1287 if (avm->process_info)
1290 /* Convert VM into a compute VM */
1291 ret = amdgpu_vm_make_compute(adev, avm, pasid);
1295 /* Initialize KFD part of the VM and process info */
1296 ret = init_kfd_vm(avm, process_info, ef);
1300 amdgpu_vm_set_task_info(avm);
1305 void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
1306 struct amdgpu_vm *vm)
1308 struct amdkfd_process_info *process_info = vm->process_info;
1309 struct amdgpu_bo *pd = vm->root.bo;
1314 /* Release eviction fence from PD */
1315 amdgpu_bo_reserve(pd, false);
1316 amdgpu_bo_fence(pd, NULL, false);
1317 amdgpu_bo_unreserve(pd);
1319 /* Update process info */
1320 mutex_lock(&process_info->lock);
1321 process_info->n_vms--;
1322 list_del(&vm->vm_list_node);
1323 mutex_unlock(&process_info->lock);
1325 vm->process_info = NULL;
1327 /* Release per-process resources when last compute VM is destroyed */
1328 if (!process_info->n_vms) {
1329 WARN_ON(!list_empty(&process_info->kfd_bo_list));
1330 WARN_ON(!list_empty(&process_info->userptr_valid_list));
1331 WARN_ON(!list_empty(&process_info->userptr_inval_list));
1333 dma_fence_put(&process_info->eviction_fence->base);
1334 cancel_delayed_work_sync(&process_info->restore_userptr_work);
1335 put_pid(process_info->pid);
1336 mutex_destroy(&process_info->lock);
1337 kfree(process_info);
1341 void amdgpu_amdkfd_gpuvm_release_process_vm(struct kgd_dev *kgd, void *drm_priv)
1343 struct amdgpu_device *adev = get_amdgpu_device(kgd);
1344 struct amdgpu_vm *avm;
1346 if (WARN_ON(!kgd || !drm_priv))
1349 avm = drm_priv_to_vm(drm_priv);
1351 pr_debug("Releasing process vm %p\n", avm);
1353 /* The original pasid of amdgpu vm has already been
1354 * released during making a amdgpu vm to a compute vm
1355 * The current pasid is managed by kfd and will be
1356 * released on kfd process destroy. Set amdgpu pasid
1357 * to 0 to avoid duplicate release.
1359 amdgpu_vm_release_compute(adev, avm);
1362 uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *drm_priv)
1364 struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
1365 struct amdgpu_bo *pd = avm->root.bo;
1366 struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
1368 if (adev->asic_type < CHIP_VEGA10)
1369 return avm->pd_phys_addr >> AMDGPU_GPU_PAGE_SHIFT;
1370 return avm->pd_phys_addr;
1373 int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
1374 struct kgd_dev *kgd, uint64_t va, uint64_t size,
1375 void *drm_priv, struct kgd_mem **mem,
1376 uint64_t *offset, uint32_t flags)
1378 struct amdgpu_device *adev = get_amdgpu_device(kgd);
1379 struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
1380 enum ttm_bo_type bo_type = ttm_bo_type_device;
1381 struct sg_table *sg = NULL;
1382 uint64_t user_addr = 0;
1383 struct amdgpu_bo *bo;
1384 struct drm_gem_object *gobj;
1385 u32 domain, alloc_domain;
1390 * Check on which domain to allocate BO
1392 if (flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
1393 domain = alloc_domain = AMDGPU_GEM_DOMAIN_VRAM;
1394 alloc_flags = AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE;
1395 alloc_flags |= (flags & KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC) ?
1396 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED :
1397 AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
1398 } else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_GTT) {
1399 domain = alloc_domain = AMDGPU_GEM_DOMAIN_GTT;
1401 } else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) {
1402 domain = AMDGPU_GEM_DOMAIN_GTT;
1403 alloc_domain = AMDGPU_GEM_DOMAIN_CPU;
1404 alloc_flags = AMDGPU_GEM_CREATE_PREEMPTIBLE;
1405 if (!offset || !*offset)
1407 user_addr = untagged_addr(*offset);
1408 } else if (flags & (KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL |
1409 KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)) {
1410 domain = AMDGPU_GEM_DOMAIN_GTT;
1411 alloc_domain = AMDGPU_GEM_DOMAIN_CPU;
1412 bo_type = ttm_bo_type_sg;
1414 if (size > UINT_MAX)
1416 sg = create_doorbell_sg(*offset, size);
1423 *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
1428 INIT_LIST_HEAD(&(*mem)->attachments);
1429 mutex_init(&(*mem)->lock);
1430 (*mem)->aql_queue = !!(flags & KFD_IOC_ALLOC_MEM_FLAGS_AQL_QUEUE_MEM);
1432 /* Workaround for AQL queue wraparound bug. Map the same
1433 * memory twice. That means we only actually allocate half
1436 if ((*mem)->aql_queue)
1439 (*mem)->alloc_flags = flags;
1441 amdgpu_sync_create(&(*mem)->sync);
1443 ret = amdgpu_amdkfd_reserve_mem_limit(adev, size, alloc_domain, !!sg);
1445 pr_debug("Insufficient memory\n");
1446 goto err_reserve_limit;
1449 pr_debug("\tcreate BO VA 0x%llx size 0x%llx domain %s\n",
1450 va, size, domain_string(alloc_domain));
1452 ret = amdgpu_gem_object_create(adev, size, 1, alloc_domain, alloc_flags,
1453 bo_type, NULL, &gobj);
1455 pr_debug("Failed to create BO on domain %s. ret %d\n",
1456 domain_string(alloc_domain), ret);
1459 ret = drm_vma_node_allow(&gobj->vma_node, drm_priv);
1461 pr_debug("Failed to allow vma node access. ret %d\n", ret);
1462 goto err_node_allow;
1464 bo = gem_to_amdgpu_bo(gobj);
1465 if (bo_type == ttm_bo_type_sg) {
1467 bo->tbo.ttm->sg = sg;
1472 bo->flags |= AMDGPU_AMDKFD_CREATE_USERPTR_BO;
1475 (*mem)->domain = domain;
1476 (*mem)->mapped_to_gpu_memory = 0;
1477 (*mem)->process_info = avm->process_info;
1478 add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, user_addr);
1481 ret = init_user_pages(*mem, user_addr);
1483 goto allocate_init_user_pages_failed;
1487 *offset = amdgpu_bo_mmap_offset(bo);
1491 allocate_init_user_pages_failed:
1492 remove_kgd_mem_from_kfd_bo_list(*mem, avm->process_info);
1493 drm_vma_node_revoke(&gobj->vma_node, drm_priv);
1495 amdgpu_bo_unref(&bo);
1496 /* Don't unreserve system mem limit twice */
1497 goto err_reserve_limit;
1499 unreserve_mem_limit(adev, size, alloc_domain, !!sg);
1501 mutex_destroy(&(*mem)->lock);
1511 int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
1512 struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv,
1515 struct amdkfd_process_info *process_info = mem->process_info;
1516 unsigned long bo_size = mem->bo->tbo.base.size;
1517 struct kfd_mem_attachment *entry, *tmp;
1518 struct bo_vm_reservation_context ctx;
1519 struct ttm_validate_buffer *bo_list_entry;
1520 unsigned int mapped_to_gpu_memory;
1522 bool is_imported = false;
1524 mutex_lock(&mem->lock);
1525 mapped_to_gpu_memory = mem->mapped_to_gpu_memory;
1526 is_imported = mem->is_imported;
1527 mutex_unlock(&mem->lock);
1528 /* lock is not needed after this, since mem is unused and will
1532 if (mapped_to_gpu_memory > 0) {
1533 pr_debug("BO VA 0x%llx size 0x%lx is still mapped.\n",
1538 /* Make sure restore workers don't access the BO any more */
1539 bo_list_entry = &mem->validate_list;
1540 mutex_lock(&process_info->lock);
1541 list_del(&bo_list_entry->head);
1542 mutex_unlock(&process_info->lock);
1544 /* No more MMU notifiers */
1545 amdgpu_mn_unregister(mem->bo);
1547 ret = reserve_bo_and_cond_vms(mem, NULL, BO_VM_ALL, &ctx);
1551 /* The eviction fence should be removed by the last unmap.
1552 * TODO: Log an error condition if the bo still has the eviction fence
1555 amdgpu_amdkfd_remove_eviction_fence(mem->bo,
1556 process_info->eviction_fence);
1557 pr_debug("Release VA 0x%llx - 0x%llx\n", mem->va,
1558 mem->va + bo_size * (1 + mem->aql_queue));
1560 ret = unreserve_bo_and_vms(&ctx, false, false);
1562 /* Remove from VM internal data structures */
1563 list_for_each_entry_safe(entry, tmp, &mem->attachments, list)
1564 kfd_mem_detach(entry);
1566 /* Free the sync object */
1567 amdgpu_sync_free(&mem->sync);
1569 /* If the SG is not NULL, it's one we created for a doorbell or mmio
1570 * remap BO. We need to free it.
1572 if (mem->bo->tbo.sg) {
1573 sg_free_table(mem->bo->tbo.sg);
1574 kfree(mem->bo->tbo.sg);
1577 /* Update the size of the BO being freed if it was allocated from
1578 * VRAM and is not imported.
1581 if ((mem->bo->preferred_domains == AMDGPU_GEM_DOMAIN_VRAM) &&
1589 drm_vma_node_revoke(&mem->bo->tbo.base.vma_node, drm_priv);
1591 dma_buf_put(mem->dmabuf);
1592 drm_gem_object_put(&mem->bo->tbo.base);
1593 mutex_destroy(&mem->lock);
1599 int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
1600 struct kgd_dev *kgd, struct kgd_mem *mem,
1601 void *drm_priv, bool *table_freed)
1603 struct amdgpu_device *adev = get_amdgpu_device(kgd);
1604 struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
1606 struct amdgpu_bo *bo;
1608 struct kfd_mem_attachment *entry;
1609 struct bo_vm_reservation_context ctx;
1610 unsigned long bo_size;
1611 bool is_invalid_userptr = false;
1615 pr_err("Invalid BO when mapping memory to GPU\n");
1619 /* Make sure restore is not running concurrently. Since we
1620 * don't map invalid userptr BOs, we rely on the next restore
1621 * worker to do the mapping
1623 mutex_lock(&mem->process_info->lock);
1625 /* Lock mmap-sem. If we find an invalid userptr BO, we can be
1626 * sure that the MMU notifier is no longer running
1627 * concurrently and the queues are actually stopped
1629 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
1630 mmap_write_lock(current->mm);
1631 is_invalid_userptr = atomic_read(&mem->invalid);
1632 mmap_write_unlock(current->mm);
1635 mutex_lock(&mem->lock);
1637 domain = mem->domain;
1638 bo_size = bo->tbo.base.size;
1640 pr_debug("Map VA 0x%llx - 0x%llx to vm %p domain %s\n",
1642 mem->va + bo_size * (1 + mem->aql_queue),
1643 avm, domain_string(domain));
1645 if (!kfd_mem_is_attached(avm, mem)) {
1646 ret = kfd_mem_attach(adev, mem, avm, mem->aql_queue);
1651 ret = reserve_bo_and_vm(mem, avm, &ctx);
1655 /* Userptr can be marked as "not invalid", but not actually be
1656 * validated yet (still in the system domain). In that case
1657 * the queues are still stopped and we can leave mapping for
1658 * the next restore worker
1660 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) &&
1661 bo->tbo.resource->mem_type == TTM_PL_SYSTEM)
1662 is_invalid_userptr = true;
1664 ret = vm_validate_pt_pd_bos(avm);
1668 if (mem->mapped_to_gpu_memory == 0 &&
1669 !amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
1670 /* Validate BO only once. The eviction fence gets added to BO
1671 * the first time it is mapped. Validate will wait for all
1672 * background evictions to complete.
1674 ret = amdgpu_amdkfd_bo_validate(bo, domain, true);
1676 pr_debug("Validate failed\n");
1681 list_for_each_entry(entry, &mem->attachments, list) {
1682 if (entry->bo_va->base.vm != avm || entry->is_mapped)
1685 pr_debug("\t map VA 0x%llx - 0x%llx in entry %p\n",
1686 entry->va, entry->va + bo_size, entry);
1688 ret = map_bo_to_gpuvm(mem, entry, ctx.sync,
1689 is_invalid_userptr, table_freed);
1691 pr_err("Failed to map bo to gpuvm\n");
1695 ret = vm_update_pds(avm, ctx.sync);
1697 pr_err("Failed to update page directories\n");
1701 entry->is_mapped = true;
1702 mem->mapped_to_gpu_memory++;
1703 pr_debug("\t INC mapping count %d\n",
1704 mem->mapped_to_gpu_memory);
1707 if (!amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) && !bo->tbo.pin_count)
1709 &avm->process_info->eviction_fence->base,
1711 ret = unreserve_bo_and_vms(&ctx, false, false);
1716 unreserve_bo_and_vms(&ctx, false, false);
1718 mutex_unlock(&mem->process_info->lock);
1719 mutex_unlock(&mem->lock);
1723 int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
1724 struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv)
1726 struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
1727 struct amdkfd_process_info *process_info = avm->process_info;
1728 unsigned long bo_size = mem->bo->tbo.base.size;
1729 struct kfd_mem_attachment *entry;
1730 struct bo_vm_reservation_context ctx;
1733 mutex_lock(&mem->lock);
1735 ret = reserve_bo_and_cond_vms(mem, avm, BO_VM_MAPPED, &ctx);
1738 /* If no VMs were reserved, it means the BO wasn't actually mapped */
1739 if (ctx.n_vms == 0) {
1744 ret = vm_validate_pt_pd_bos(avm);
1748 pr_debug("Unmap VA 0x%llx - 0x%llx from vm %p\n",
1750 mem->va + bo_size * (1 + mem->aql_queue),
1753 list_for_each_entry(entry, &mem->attachments, list) {
1754 if (entry->bo_va->base.vm != avm || !entry->is_mapped)
1757 pr_debug("\t unmap VA 0x%llx - 0x%llx from entry %p\n",
1758 entry->va, entry->va + bo_size, entry);
1760 unmap_bo_from_gpuvm(mem, entry, ctx.sync);
1761 entry->is_mapped = false;
1763 mem->mapped_to_gpu_memory--;
1764 pr_debug("\t DEC mapping count %d\n",
1765 mem->mapped_to_gpu_memory);
1768 /* If BO is unmapped from all VMs, unfence it. It can be evicted if
1771 if (mem->mapped_to_gpu_memory == 0 &&
1772 !amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm) &&
1773 !mem->bo->tbo.pin_count)
1774 amdgpu_amdkfd_remove_eviction_fence(mem->bo,
1775 process_info->eviction_fence);
1778 unreserve_bo_and_vms(&ctx, false, false);
1780 mutex_unlock(&mem->lock);
1784 int amdgpu_amdkfd_gpuvm_sync_memory(
1785 struct kgd_dev *kgd, struct kgd_mem *mem, bool intr)
1787 struct amdgpu_sync sync;
1790 amdgpu_sync_create(&sync);
1792 mutex_lock(&mem->lock);
1793 amdgpu_sync_clone(&mem->sync, &sync);
1794 mutex_unlock(&mem->lock);
1796 ret = amdgpu_sync_wait(&sync, intr);
1797 amdgpu_sync_free(&sync);
1801 int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_dev *kgd,
1802 struct kgd_mem *mem, void **kptr, uint64_t *size)
1805 struct amdgpu_bo *bo = mem->bo;
1807 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
1808 pr_err("userptr can't be mapped to kernel\n");
1812 /* delete kgd_mem from kfd_bo_list to avoid re-validating
1813 * this BO in BO's restoring after eviction.
1815 mutex_lock(&mem->process_info->lock);
1817 ret = amdgpu_bo_reserve(bo, true);
1819 pr_err("Failed to reserve bo. ret %d\n", ret);
1820 goto bo_reserve_failed;
1823 ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
1825 pr_err("Failed to pin bo. ret %d\n", ret);
1829 ret = amdgpu_bo_kmap(bo, kptr);
1831 pr_err("Failed to map bo to kernel. ret %d\n", ret);
1835 amdgpu_amdkfd_remove_eviction_fence(
1836 bo, mem->process_info->eviction_fence);
1837 list_del_init(&mem->validate_list.head);
1840 *size = amdgpu_bo_size(bo);
1842 amdgpu_bo_unreserve(bo);
1844 mutex_unlock(&mem->process_info->lock);
1848 amdgpu_bo_unpin(bo);
1850 amdgpu_bo_unreserve(bo);
1852 mutex_unlock(&mem->process_info->lock);
1857 int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct kgd_dev *kgd,
1858 struct kfd_vm_fault_info *mem)
1860 struct amdgpu_device *adev;
1862 adev = (struct amdgpu_device *)kgd;
1863 if (atomic_read(&adev->gmc.vm_fault_info_updated) == 1) {
1864 *mem = *adev->gmc.vm_fault_info;
1866 atomic_set(&adev->gmc.vm_fault_info_updated, 0);
1871 int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd,
1872 struct dma_buf *dma_buf,
1873 uint64_t va, void *drm_priv,
1874 struct kgd_mem **mem, uint64_t *size,
1875 uint64_t *mmap_offset)
1877 struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
1878 struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
1879 struct drm_gem_object *obj;
1880 struct amdgpu_bo *bo;
1883 if (dma_buf->ops != &amdgpu_dmabuf_ops)
1884 /* Can't handle non-graphics buffers */
1887 obj = dma_buf->priv;
1888 if (drm_to_adev(obj->dev) != adev)
1889 /* Can't handle buffers from other devices */
1892 bo = gem_to_amdgpu_bo(obj);
1893 if (!(bo->preferred_domains & (AMDGPU_GEM_DOMAIN_VRAM |
1894 AMDGPU_GEM_DOMAIN_GTT)))
1895 /* Only VRAM and GTT BOs are supported */
1898 *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
1902 ret = drm_vma_node_allow(&obj->vma_node, drm_priv);
1909 *size = amdgpu_bo_size(bo);
1912 *mmap_offset = amdgpu_bo_mmap_offset(bo);
1914 INIT_LIST_HEAD(&(*mem)->attachments);
1915 mutex_init(&(*mem)->lock);
1917 (*mem)->alloc_flags =
1918 ((bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ?
1919 KFD_IOC_ALLOC_MEM_FLAGS_VRAM : KFD_IOC_ALLOC_MEM_FLAGS_GTT)
1920 | KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE
1921 | KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE;
1923 drm_gem_object_get(&bo->tbo.base);
1926 (*mem)->domain = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ?
1927 AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT;
1928 (*mem)->mapped_to_gpu_memory = 0;
1929 (*mem)->process_info = avm->process_info;
1930 add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, false);
1931 amdgpu_sync_create(&(*mem)->sync);
1932 (*mem)->is_imported = true;
1937 /* Evict a userptr BO by stopping the queues if necessary
1939 * Runs in MMU notifier, may be in RECLAIM_FS context. This means it
1940 * cannot do any memory allocations, and cannot take any locks that
1941 * are held elsewhere while allocating memory. Therefore this is as
1942 * simple as possible, using atomic counters.
1944 * It doesn't do anything to the BO itself. The real work happens in
1945 * restore, where we get updated page addresses. This function only
1946 * ensures that GPU access to the BO is stopped.
1948 int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem,
1949 struct mm_struct *mm)
1951 struct amdkfd_process_info *process_info = mem->process_info;
1955 atomic_inc(&mem->invalid);
1956 evicted_bos = atomic_inc_return(&process_info->evicted_bos);
1957 if (evicted_bos == 1) {
1958 /* First eviction, stop the queues */
1959 r = kgd2kfd_quiesce_mm(mm);
1961 pr_err("Failed to quiesce KFD\n");
1962 schedule_delayed_work(&process_info->restore_userptr_work,
1963 msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS));
1969 /* Update invalid userptr BOs
1971 * Moves invalidated (evicted) userptr BOs from userptr_valid_list to
1972 * userptr_inval_list and updates user pages for all BOs that have
1973 * been invalidated since their last update.
1975 static int update_invalid_user_pages(struct amdkfd_process_info *process_info,
1976 struct mm_struct *mm)
1978 struct kgd_mem *mem, *tmp_mem;
1979 struct amdgpu_bo *bo;
1980 struct ttm_operation_ctx ctx = { false, false };
1983 /* Move all invalidated BOs to the userptr_inval_list and
1984 * release their user pages by migration to the CPU domain
1986 list_for_each_entry_safe(mem, tmp_mem,
1987 &process_info->userptr_valid_list,
1988 validate_list.head) {
1989 if (!atomic_read(&mem->invalid))
1990 continue; /* BO is still valid */
1994 if (amdgpu_bo_reserve(bo, true))
1996 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
1997 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
1998 amdgpu_bo_unreserve(bo);
2000 pr_err("%s: Failed to invalidate userptr BO\n",
2005 list_move_tail(&mem->validate_list.head,
2006 &process_info->userptr_inval_list);
2009 if (list_empty(&process_info->userptr_inval_list))
2010 return 0; /* All evicted userptr BOs were freed */
2012 /* Go through userptr_inval_list and update any invalid user_pages */
2013 list_for_each_entry(mem, &process_info->userptr_inval_list,
2014 validate_list.head) {
2015 invalid = atomic_read(&mem->invalid);
2017 /* BO hasn't been invalidated since the last
2018 * revalidation attempt. Keep its BO list.
2024 /* Get updated user pages */
2025 ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages);
2027 pr_debug("%s: Failed to get user pages: %d\n",
2030 /* Return error -EBUSY or -ENOMEM, retry restore */
2035 * FIXME: Cannot ignore the return code, must hold
2038 amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
2040 /* Mark the BO as valid unless it was invalidated
2041 * again concurrently.
2043 if (atomic_cmpxchg(&mem->invalid, invalid, 0) != invalid)
2050 /* Validate invalid userptr BOs
2052 * Validates BOs on the userptr_inval_list, and moves them back to the
2053 * userptr_valid_list. Also updates GPUVM page tables with new page
2054 * addresses and waits for the page table updates to complete.
2056 static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
2058 struct amdgpu_bo_list_entry *pd_bo_list_entries;
2059 struct list_head resv_list, duplicates;
2060 struct ww_acquire_ctx ticket;
2061 struct amdgpu_sync sync;
2063 struct amdgpu_vm *peer_vm;
2064 struct kgd_mem *mem, *tmp_mem;
2065 struct amdgpu_bo *bo;
2066 struct ttm_operation_ctx ctx = { false, false };
2069 pd_bo_list_entries = kcalloc(process_info->n_vms,
2070 sizeof(struct amdgpu_bo_list_entry),
2072 if (!pd_bo_list_entries) {
2073 pr_err("%s: Failed to allocate PD BO list entries\n", __func__);
2078 INIT_LIST_HEAD(&resv_list);
2079 INIT_LIST_HEAD(&duplicates);
2081 /* Get all the page directory BOs that need to be reserved */
2083 list_for_each_entry(peer_vm, &process_info->vm_list_head,
2085 amdgpu_vm_get_pd_bo(peer_vm, &resv_list,
2086 &pd_bo_list_entries[i++]);
2087 /* Add the userptr_inval_list entries to resv_list */
2088 list_for_each_entry(mem, &process_info->userptr_inval_list,
2089 validate_list.head) {
2090 list_add_tail(&mem->resv_list.head, &resv_list);
2091 mem->resv_list.bo = mem->validate_list.bo;
2092 mem->resv_list.num_shared = mem->validate_list.num_shared;
2095 /* Reserve all BOs and page tables for validation */
2096 ret = ttm_eu_reserve_buffers(&ticket, &resv_list, false, &duplicates);
2097 WARN(!list_empty(&duplicates), "Duplicates should be empty");
2101 amdgpu_sync_create(&sync);
2103 ret = process_validate_vms(process_info);
2107 /* Validate BOs and update GPUVM page tables */
2108 list_for_each_entry_safe(mem, tmp_mem,
2109 &process_info->userptr_inval_list,
2110 validate_list.head) {
2111 struct kfd_mem_attachment *attachment;
2115 /* Validate the BO if we got user pages */
2116 if (bo->tbo.ttm->pages[0]) {
2117 amdgpu_bo_placement_from_domain(bo, mem->domain);
2118 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
2120 pr_err("%s: failed to validate BO\n", __func__);
2125 list_move_tail(&mem->validate_list.head,
2126 &process_info->userptr_valid_list);
2128 /* Update mapping. If the BO was not validated
2129 * (because we couldn't get user pages), this will
2130 * clear the page table entries, which will result in
2131 * VM faults if the GPU tries to access the invalid
2134 list_for_each_entry(attachment, &mem->attachments, list) {
2135 if (!attachment->is_mapped)
2138 kfd_mem_dmaunmap_attachment(mem, attachment);
2139 ret = update_gpuvm_pte(mem, attachment, &sync, NULL);
2141 pr_err("%s: update PTE failed\n", __func__);
2142 /* make sure this gets validated again */
2143 atomic_inc(&mem->invalid);
2149 /* Update page directories */
2150 ret = process_update_pds(process_info, &sync);
2153 ttm_eu_backoff_reservation(&ticket, &resv_list);
2154 amdgpu_sync_wait(&sync, false);
2155 amdgpu_sync_free(&sync);
2157 kfree(pd_bo_list_entries);
2163 /* Worker callback to restore evicted userptr BOs
2165 * Tries to update and validate all userptr BOs. If successful and no
2166 * concurrent evictions happened, the queues are restarted. Otherwise,
2167 * reschedule for another attempt later.
2169 static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work)
2171 struct delayed_work *dwork = to_delayed_work(work);
2172 struct amdkfd_process_info *process_info =
2173 container_of(dwork, struct amdkfd_process_info,
2174 restore_userptr_work);
2175 struct task_struct *usertask;
2176 struct mm_struct *mm;
2179 evicted_bos = atomic_read(&process_info->evicted_bos);
2183 /* Reference task and mm in case of concurrent process termination */
2184 usertask = get_pid_task(process_info->pid, PIDTYPE_PID);
2187 mm = get_task_mm(usertask);
2189 put_task_struct(usertask);
2193 mutex_lock(&process_info->lock);
2195 if (update_invalid_user_pages(process_info, mm))
2197 /* userptr_inval_list can be empty if all evicted userptr BOs
2198 * have been freed. In that case there is nothing to validate
2199 * and we can just restart the queues.
2201 if (!list_empty(&process_info->userptr_inval_list)) {
2202 if (atomic_read(&process_info->evicted_bos) != evicted_bos)
2203 goto unlock_out; /* Concurrent eviction, try again */
2205 if (validate_invalid_user_pages(process_info))
2208 /* Final check for concurrent evicton and atomic update. If
2209 * another eviction happens after successful update, it will
2210 * be a first eviction that calls quiesce_mm. The eviction
2211 * reference counting inside KFD will handle this case.
2213 if (atomic_cmpxchg(&process_info->evicted_bos, evicted_bos, 0) !=
2217 if (kgd2kfd_resume_mm(mm)) {
2218 pr_err("%s: Failed to resume KFD\n", __func__);
2219 /* No recovery from this failure. Probably the CP is
2220 * hanging. No point trying again.
2225 mutex_unlock(&process_info->lock);
2227 put_task_struct(usertask);
2229 /* If validation failed, reschedule another attempt */
2231 schedule_delayed_work(&process_info->restore_userptr_work,
2232 msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS));
2235 /** amdgpu_amdkfd_gpuvm_restore_process_bos - Restore all BOs for the given
2236 * KFD process identified by process_info
2238 * @process_info: amdkfd_process_info of the KFD process
2240 * After memory eviction, restore thread calls this function. The function
2241 * should be called when the Process is still valid. BO restore involves -
2243 * 1. Release old eviction fence and create new one
2244 * 2. Get two copies of PD BO list from all the VMs. Keep one copy as pd_list.
2245 * 3 Use the second PD list and kfd_bo_list to create a list (ctx.list) of
2246 * BOs that need to be reserved.
2247 * 4. Reserve all the BOs
2248 * 5. Validate of PD and PT BOs.
2249 * 6. Validate all KFD BOs using kfd_bo_list and Map them and add new fence
2250 * 7. Add fence to all PD and PT BOs.
2251 * 8. Unreserve all BOs
2253 int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
2255 struct amdgpu_bo_list_entry *pd_bo_list;
2256 struct amdkfd_process_info *process_info = info;
2257 struct amdgpu_vm *peer_vm;
2258 struct kgd_mem *mem;
2259 struct bo_vm_reservation_context ctx;
2260 struct amdgpu_amdkfd_fence *new_fence;
2262 struct list_head duplicate_save;
2263 struct amdgpu_sync sync_obj;
2264 unsigned long failed_size = 0;
2265 unsigned long total_size = 0;
2267 INIT_LIST_HEAD(&duplicate_save);
2268 INIT_LIST_HEAD(&ctx.list);
2269 INIT_LIST_HEAD(&ctx.duplicates);
2271 pd_bo_list = kcalloc(process_info->n_vms,
2272 sizeof(struct amdgpu_bo_list_entry),
2278 mutex_lock(&process_info->lock);
2279 list_for_each_entry(peer_vm, &process_info->vm_list_head,
2281 amdgpu_vm_get_pd_bo(peer_vm, &ctx.list, &pd_bo_list[i++]);
2283 /* Reserve all BOs and page tables/directory. Add all BOs from
2284 * kfd_bo_list to ctx.list
2286 list_for_each_entry(mem, &process_info->kfd_bo_list,
2287 validate_list.head) {
2289 list_add_tail(&mem->resv_list.head, &ctx.list);
2290 mem->resv_list.bo = mem->validate_list.bo;
2291 mem->resv_list.num_shared = mem->validate_list.num_shared;
2294 ret = ttm_eu_reserve_buffers(&ctx.ticket, &ctx.list,
2295 false, &duplicate_save);
2297 pr_debug("Memory eviction: TTM Reserve Failed. Try again\n");
2298 goto ttm_reserve_fail;
2301 amdgpu_sync_create(&sync_obj);
2303 /* Validate PDs and PTs */
2304 ret = process_validate_vms(process_info);
2306 goto validate_map_fail;
2308 ret = process_sync_pds_resv(process_info, &sync_obj);
2310 pr_debug("Memory eviction: Failed to sync to PD BO moving fence. Try again\n");
2311 goto validate_map_fail;
2314 /* Validate BOs and map them to GPUVM (update VM page tables). */
2315 list_for_each_entry(mem, &process_info->kfd_bo_list,
2316 validate_list.head) {
2318 struct amdgpu_bo *bo = mem->bo;
2319 uint32_t domain = mem->domain;
2320 struct kfd_mem_attachment *attachment;
2322 total_size += amdgpu_bo_size(bo);
2324 ret = amdgpu_amdkfd_bo_validate(bo, domain, false);
2326 pr_debug("Memory eviction: Validate BOs failed\n");
2327 failed_size += amdgpu_bo_size(bo);
2328 ret = amdgpu_amdkfd_bo_validate(bo,
2329 AMDGPU_GEM_DOMAIN_GTT, false);
2331 pr_debug("Memory eviction: Try again\n");
2332 goto validate_map_fail;
2335 ret = amdgpu_sync_fence(&sync_obj, bo->tbo.moving);
2337 pr_debug("Memory eviction: Sync BO fence failed. Try again\n");
2338 goto validate_map_fail;
2340 list_for_each_entry(attachment, &mem->attachments, list) {
2341 if (!attachment->is_mapped)
2344 kfd_mem_dmaunmap_attachment(mem, attachment);
2345 ret = update_gpuvm_pte(mem, attachment, &sync_obj, NULL);
2347 pr_debug("Memory eviction: update PTE failed. Try again\n");
2348 goto validate_map_fail;
2354 pr_debug("0x%lx/0x%lx in system\n", failed_size, total_size);
2356 /* Update page directories */
2357 ret = process_update_pds(process_info, &sync_obj);
2359 pr_debug("Memory eviction: update PDs failed. Try again\n");
2360 goto validate_map_fail;
2363 /* Wait for validate and PT updates to finish */
2364 amdgpu_sync_wait(&sync_obj, false);
2366 /* Release old eviction fence and create new one, because fence only
2367 * goes from unsignaled to signaled, fence cannot be reused.
2368 * Use context and mm from the old fence.
2370 new_fence = amdgpu_amdkfd_fence_create(
2371 process_info->eviction_fence->base.context,
2372 process_info->eviction_fence->mm,
2375 pr_err("Failed to create eviction fence\n");
2377 goto validate_map_fail;
2379 dma_fence_put(&process_info->eviction_fence->base);
2380 process_info->eviction_fence = new_fence;
2381 *ef = dma_fence_get(&new_fence->base);
2383 /* Attach new eviction fence to all BOs */
2384 list_for_each_entry(mem, &process_info->kfd_bo_list,
2386 amdgpu_bo_fence(mem->bo,
2387 &process_info->eviction_fence->base, true);
2389 /* Attach eviction fence to PD / PT BOs */
2390 list_for_each_entry(peer_vm, &process_info->vm_list_head,
2392 struct amdgpu_bo *bo = peer_vm->root.bo;
2394 amdgpu_bo_fence(bo, &process_info->eviction_fence->base, true);
2398 ttm_eu_backoff_reservation(&ctx.ticket, &ctx.list);
2399 amdgpu_sync_free(&sync_obj);
2401 mutex_unlock(&process_info->lock);
2406 int amdgpu_amdkfd_add_gws_to_process(void *info, void *gws, struct kgd_mem **mem)
2408 struct amdkfd_process_info *process_info = (struct amdkfd_process_info *)info;
2409 struct amdgpu_bo *gws_bo = (struct amdgpu_bo *)gws;
2415 *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
2419 mutex_init(&(*mem)->lock);
2420 INIT_LIST_HEAD(&(*mem)->attachments);
2421 (*mem)->bo = amdgpu_bo_ref(gws_bo);
2422 (*mem)->domain = AMDGPU_GEM_DOMAIN_GWS;
2423 (*mem)->process_info = process_info;
2424 add_kgd_mem_to_kfd_bo_list(*mem, process_info, false);
2425 amdgpu_sync_create(&(*mem)->sync);
2428 /* Validate gws bo the first time it is added to process */
2429 mutex_lock(&(*mem)->process_info->lock);
2430 ret = amdgpu_bo_reserve(gws_bo, false);
2431 if (unlikely(ret)) {
2432 pr_err("Reserve gws bo failed %d\n", ret);
2433 goto bo_reservation_failure;
2436 ret = amdgpu_amdkfd_bo_validate(gws_bo, AMDGPU_GEM_DOMAIN_GWS, true);
2438 pr_err("GWS BO validate failed %d\n", ret);
2439 goto bo_validation_failure;
2441 /* GWS resource is shared b/t amdgpu and amdkfd
2442 * Add process eviction fence to bo so they can
2445 ret = dma_resv_reserve_shared(gws_bo->tbo.base.resv, 1);
2447 goto reserve_shared_fail;
2448 amdgpu_bo_fence(gws_bo, &process_info->eviction_fence->base, true);
2449 amdgpu_bo_unreserve(gws_bo);
2450 mutex_unlock(&(*mem)->process_info->lock);
2454 reserve_shared_fail:
2455 bo_validation_failure:
2456 amdgpu_bo_unreserve(gws_bo);
2457 bo_reservation_failure:
2458 mutex_unlock(&(*mem)->process_info->lock);
2459 amdgpu_sync_free(&(*mem)->sync);
2460 remove_kgd_mem_from_kfd_bo_list(*mem, process_info);
2461 amdgpu_bo_unref(&gws_bo);
2462 mutex_destroy(&(*mem)->lock);
2468 int amdgpu_amdkfd_remove_gws_from_process(void *info, void *mem)
2471 struct amdkfd_process_info *process_info = (struct amdkfd_process_info *)info;
2472 struct kgd_mem *kgd_mem = (struct kgd_mem *)mem;
2473 struct amdgpu_bo *gws_bo = kgd_mem->bo;
2475 /* Remove BO from process's validate list so restore worker won't touch
2478 remove_kgd_mem_from_kfd_bo_list(kgd_mem, process_info);
2480 ret = amdgpu_bo_reserve(gws_bo, false);
2481 if (unlikely(ret)) {
2482 pr_err("Reserve gws bo failed %d\n", ret);
2483 //TODO add BO back to validate_list?
2486 amdgpu_amdkfd_remove_eviction_fence(gws_bo,
2487 process_info->eviction_fence);
2488 amdgpu_bo_unreserve(gws_bo);
2489 amdgpu_sync_free(&kgd_mem->sync);
2490 amdgpu_bo_unref(&gws_bo);
2491 mutex_destroy(&kgd_mem->lock);
2496 /* Returns GPU-specific tiling mode information */
2497 int amdgpu_amdkfd_get_tile_config(struct kgd_dev *kgd,
2498 struct tile_config *config)
2500 struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
2502 config->gb_addr_config = adev->gfx.config.gb_addr_config;
2503 config->tile_config_ptr = adev->gfx.config.tile_mode_array;
2504 config->num_tile_configs =
2505 ARRAY_SIZE(adev->gfx.config.tile_mode_array);
2506 config->macro_tile_config_ptr =
2507 adev->gfx.config.macrotile_mode_array;
2508 config->num_macro_tile_configs =
2509 ARRAY_SIZE(adev->gfx.config.macrotile_mode_array);
2511 /* Those values are not set from GFX9 onwards */
2512 config->num_banks = adev->gfx.config.num_banks;
2513 config->num_ranks = adev->gfx.config.num_ranks;