1 // SPDX-License-Identifier: MIT
3 * Copyright 2014-2018 Advanced Micro Devices, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
23 #include <linux/dma-buf.h>
24 #include <linux/list.h>
25 #include <linux/pagemap.h>
26 #include <linux/sched/mm.h>
27 #include <linux/sched/task.h>
28 #include <drm/ttm/ttm_tt.h>
30 #include "amdgpu_object.h"
31 #include "amdgpu_gem.h"
32 #include "amdgpu_vm.h"
33 #include "amdgpu_hmm.h"
34 #include "amdgpu_amdkfd.h"
35 #include "amdgpu_dma_buf.h"
36 #include <uapi/linux/kfd_ioctl.h>
37 #include "amdgpu_xgmi.h"
39 #include "kfd_smi_events.h"
40 #include <drm/ttm/ttm_tt.h>
42 /* Userptr restore delay, just long enough to allow consecutive VM
43 * changes to accumulate
45 #define AMDGPU_USERPTR_RESTORE_DELAY_MS 1
48 * Align VRAM availability to 2MB to avoid fragmentation caused by 4K allocations in the tail 2MB
51 #define VRAM_AVAILABLITY_ALIGN (1 << 21)
53 /* Impose limit on how much memory KFD can use */
55 uint64_t max_system_mem_limit;
56 uint64_t max_ttm_mem_limit;
57 int64_t system_mem_used;
59 spinlock_t mem_limit_lock;
62 static const char * const domain_bit_to_string[] = {
71 #define domain_string(domain) domain_bit_to_string[ffs(domain)-1]
73 static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work);
75 static bool kfd_mem_is_attached(struct amdgpu_vm *avm,
78 struct kfd_mem_attachment *entry;
80 list_for_each_entry(entry, &mem->attachments, list)
81 if (entry->bo_va->base.vm == avm)
88 * reuse_dmamap() - Check whether adev can share the original
91 * If both adev and bo_adev are in direct mapping or
92 * in the same iommu group, they can share the original BO.
94 * @adev: Device to which can or cannot share the original BO
95 * @bo_adev: Device to which allocated BO belongs to
97 * Return: returns true if adev can share original userptr BO,
100 static bool reuse_dmamap(struct amdgpu_device *adev, struct amdgpu_device *bo_adev)
102 return (adev->ram_is_direct_mapped && bo_adev->ram_is_direct_mapped) ||
103 (adev->dev->iommu_group == bo_adev->dev->iommu_group);
106 /* Set memory usage limits. Current, limits are
107 * System (TTM + userptr) memory - 15/16th System RAM
108 * TTM memory - 3/8th System RAM
110 void amdgpu_amdkfd_gpuvm_init_mem_limits(void)
115 if (kfd_mem_limit.max_system_mem_limit)
119 mem = si.freeram - si.freehigh;
122 spin_lock_init(&kfd_mem_limit.mem_limit_lock);
123 kfd_mem_limit.max_system_mem_limit = mem - (mem >> 4);
124 kfd_mem_limit.max_ttm_mem_limit = ttm_tt_pages_limit() << PAGE_SHIFT;
125 pr_debug("Kernel memory limit %lluM, TTM limit %lluM\n",
126 (kfd_mem_limit.max_system_mem_limit >> 20),
127 (kfd_mem_limit.max_ttm_mem_limit >> 20));
130 void amdgpu_amdkfd_reserve_system_mem(uint64_t size)
132 kfd_mem_limit.system_mem_used += size;
135 /* Estimate page table size needed to represent a given memory size
137 * With 4KB pages, we need one 8 byte PTE for each 4KB of memory
138 * (factor 512, >> 9). With 2MB pages, we need one 8 byte PTE for 2MB
139 * of memory (factor 256K, >> 18). ROCm user mode tries to optimize
140 * for 2MB pages for TLB efficiency. However, small allocations and
141 * fragmented system memory still need some 4KB pages. We choose a
142 * compromise that should work in most cases without reserving too
143 * much memory for page tables unnecessarily (factor 16K, >> 14).
146 #define ESTIMATE_PT_SIZE(mem_size) max(((mem_size) >> 14), AMDGPU_VM_RESERVED_VRAM)
149 * amdgpu_amdkfd_reserve_mem_limit() - Decrease available memory by size
152 * @adev: Device to which allocated BO belongs to
153 * @size: Size of buffer, in bytes, encapsulated by B0. This should be
154 * equivalent to amdgpu_bo_size(BO)
155 * @alloc_flag: Flag used in allocating a BO as noted above
157 * Return: returns -ENOMEM in case of error, ZERO otherwise
159 int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
160 uint64_t size, u32 alloc_flag)
162 uint64_t reserved_for_pt =
163 ESTIMATE_PT_SIZE(amdgpu_amdkfd_total_mem_size);
164 size_t system_mem_needed, ttm_mem_needed, vram_needed;
167 system_mem_needed = 0;
170 if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_GTT) {
171 system_mem_needed = size;
172 ttm_mem_needed = size;
173 } else if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
175 * Conservatively round up the allocation requirement to 2 MB
176 * to avoid fragmentation caused by 4K allocations in the tail
180 } else if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) {
181 system_mem_needed = size;
182 } else if (!(alloc_flag &
183 (KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL |
184 KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP))) {
185 pr_err("%s: Invalid BO type %#x\n", __func__, alloc_flag);
189 spin_lock(&kfd_mem_limit.mem_limit_lock);
191 if (kfd_mem_limit.system_mem_used + system_mem_needed >
192 kfd_mem_limit.max_system_mem_limit)
193 pr_debug("Set no_system_mem_limit=1 if using shared memory\n");
195 if ((kfd_mem_limit.system_mem_used + system_mem_needed >
196 kfd_mem_limit.max_system_mem_limit && !no_system_mem_limit) ||
197 (kfd_mem_limit.ttm_mem_used + ttm_mem_needed >
198 kfd_mem_limit.max_ttm_mem_limit) ||
199 (adev && adev->kfd.vram_used + vram_needed >
200 adev->gmc.real_vram_size - reserved_for_pt)) {
205 /* Update memory accounting by decreasing available system
206 * memory, TTM memory and GPU memory as computed above
208 WARN_ONCE(vram_needed && !adev,
209 "adev reference can't be null when vram is used");
211 adev->kfd.vram_used += vram_needed;
212 adev->kfd.vram_used_aligned += ALIGN(vram_needed, VRAM_AVAILABLITY_ALIGN);
214 kfd_mem_limit.system_mem_used += system_mem_needed;
215 kfd_mem_limit.ttm_mem_used += ttm_mem_needed;
218 spin_unlock(&kfd_mem_limit.mem_limit_lock);
222 void amdgpu_amdkfd_unreserve_mem_limit(struct amdgpu_device *adev,
223 uint64_t size, u32 alloc_flag)
225 spin_lock(&kfd_mem_limit.mem_limit_lock);
227 if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_GTT) {
228 kfd_mem_limit.system_mem_used -= size;
229 kfd_mem_limit.ttm_mem_used -= size;
230 } else if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
232 "adev reference can't be null when alloc mem flags vram is set");
234 adev->kfd.vram_used -= size;
235 adev->kfd.vram_used_aligned -= ALIGN(size, VRAM_AVAILABLITY_ALIGN);
237 } else if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) {
238 kfd_mem_limit.system_mem_used -= size;
239 } else if (!(alloc_flag &
240 (KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL |
241 KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP))) {
242 pr_err("%s: Invalid BO type %#x\n", __func__, alloc_flag);
245 WARN_ONCE(adev && adev->kfd.vram_used < 0,
246 "KFD VRAM memory accounting unbalanced");
247 WARN_ONCE(kfd_mem_limit.ttm_mem_used < 0,
248 "KFD TTM memory accounting unbalanced");
249 WARN_ONCE(kfd_mem_limit.system_mem_used < 0,
250 "KFD system memory accounting unbalanced");
253 spin_unlock(&kfd_mem_limit.mem_limit_lock);
256 void amdgpu_amdkfd_release_notify(struct amdgpu_bo *bo)
258 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
259 u32 alloc_flags = bo->kfd_bo->alloc_flags;
260 u64 size = amdgpu_bo_size(bo);
262 amdgpu_amdkfd_unreserve_mem_limit(adev, size, alloc_flags);
268 * @create_dmamap_sg_bo: Creates a amdgpu_bo object to reflect information
269 * about USERPTR or DOOREBELL or MMIO BO.
270 * @adev: Device for which dmamap BO is being created
271 * @mem: BO of peer device that is being DMA mapped. Provides parameters
272 * in building the dmamap BO
273 * @bo_out: Output parameter updated with handle of dmamap BO
276 create_dmamap_sg_bo(struct amdgpu_device *adev,
277 struct kgd_mem *mem, struct amdgpu_bo **bo_out)
279 struct drm_gem_object *gem_obj;
283 ret = amdgpu_bo_reserve(mem->bo, false);
287 if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR)
288 flags |= mem->bo->flags & (AMDGPU_GEM_CREATE_COHERENT |
289 AMDGPU_GEM_CREATE_UNCACHED);
291 ret = amdgpu_gem_object_create(adev, mem->bo->tbo.base.size, 1,
292 AMDGPU_GEM_DOMAIN_CPU, AMDGPU_GEM_CREATE_PREEMPTIBLE | flags,
293 ttm_bo_type_sg, mem->bo->tbo.base.resv, &gem_obj);
295 amdgpu_bo_unreserve(mem->bo);
298 pr_err("Error in creating DMA mappable SG BO on domain: %d\n", ret);
302 *bo_out = gem_to_amdgpu_bo(gem_obj);
303 (*bo_out)->parent = amdgpu_bo_ref(mem->bo);
307 /* amdgpu_amdkfd_remove_eviction_fence - Removes eviction fence from BO's
308 * reservation object.
310 * @bo: [IN] Remove eviction fence(s) from this BO
311 * @ef: [IN] This eviction fence is removed if it
312 * is present in the shared list.
314 * NOTE: Must be called with BO reserved i.e. bo->tbo.resv->lock held.
316 static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
317 struct amdgpu_amdkfd_fence *ef)
319 struct dma_fence *replacement;
324 /* TODO: Instead of block before we should use the fence of the page
325 * table update and TLB flush here directly.
327 replacement = dma_fence_get_stub();
328 dma_resv_replace_fences(bo->tbo.base.resv, ef->base.context,
329 replacement, DMA_RESV_USAGE_BOOKKEEP);
330 dma_fence_put(replacement);
334 int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo)
336 struct amdgpu_bo *root = bo;
337 struct amdgpu_vm_bo_base *vm_bo;
338 struct amdgpu_vm *vm;
339 struct amdkfd_process_info *info;
340 struct amdgpu_amdkfd_fence *ef;
343 /* we can always get vm_bo from root PD bo.*/
355 info = vm->process_info;
356 if (!info || !info->eviction_fence)
359 ef = container_of(dma_fence_get(&info->eviction_fence->base),
360 struct amdgpu_amdkfd_fence, base);
362 BUG_ON(!dma_resv_trylock(bo->tbo.base.resv));
363 ret = amdgpu_amdkfd_remove_eviction_fence(bo, ef);
364 dma_resv_unlock(bo->tbo.base.resv);
366 dma_fence_put(&ef->base);
370 static int amdgpu_amdkfd_bo_validate(struct amdgpu_bo *bo, uint32_t domain,
373 struct ttm_operation_ctx ctx = { false, false };
376 if (WARN(amdgpu_ttm_tt_get_usermm(bo->tbo.ttm),
377 "Called with userptr BO"))
380 amdgpu_bo_placement_from_domain(bo, domain);
382 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
386 amdgpu_bo_sync_wait(bo, AMDGPU_FENCE_OWNER_KFD, false);
392 static int amdgpu_amdkfd_validate_vm_bo(void *_unused, struct amdgpu_bo *bo)
394 return amdgpu_amdkfd_bo_validate(bo, bo->allowed_domains, false);
397 /* vm_validate_pt_pd_bos - Validate page table and directory BOs
399 * Page directories are not updated here because huge page handling
400 * during page table updates can invalidate page directory entries
401 * again. Page directories are only updated after updating page
404 static int vm_validate_pt_pd_bos(struct amdgpu_vm *vm)
406 struct amdgpu_bo *pd = vm->root.bo;
407 struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
410 ret = amdgpu_vm_validate_pt_bos(adev, vm, amdgpu_amdkfd_validate_vm_bo, NULL);
412 pr_err("failed to validate PT BOs\n");
416 vm->pd_phys_addr = amdgpu_gmc_pd_addr(vm->root.bo);
421 static int vm_update_pds(struct amdgpu_vm *vm, struct amdgpu_sync *sync)
423 struct amdgpu_bo *pd = vm->root.bo;
424 struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
427 ret = amdgpu_vm_update_pdes(adev, vm, false);
431 return amdgpu_sync_fence(sync, vm->last_update);
434 static uint64_t get_pte_flags(struct amdgpu_device *adev, struct kgd_mem *mem)
436 uint32_t mapping_flags = AMDGPU_VM_PAGE_READABLE |
437 AMDGPU_VM_MTYPE_DEFAULT;
439 if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE)
440 mapping_flags |= AMDGPU_VM_PAGE_WRITEABLE;
441 if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE)
442 mapping_flags |= AMDGPU_VM_PAGE_EXECUTABLE;
444 return amdgpu_gem_va_map_flags(adev, mapping_flags);
448 * create_sg_table() - Create an sg_table for a contiguous DMA addr range
449 * @addr: The starting address to point to
450 * @size: Size of memory area in bytes being pointed to
452 * Allocates an instance of sg_table and initializes it to point to memory
453 * area specified by input parameters. The address used to build is assumed
454 * to be DMA mapped, if needed.
456 * DOORBELL or MMIO BOs use only one scatterlist node in their sg_table
457 * because they are physically contiguous.
459 * Return: Initialized instance of SG Table or NULL
461 static struct sg_table *create_sg_table(uint64_t addr, uint32_t size)
463 struct sg_table *sg = kmalloc(sizeof(*sg), GFP_KERNEL);
467 if (sg_alloc_table(sg, 1, GFP_KERNEL)) {
471 sg_dma_address(sg->sgl) = addr;
472 sg->sgl->length = size;
473 #ifdef CONFIG_NEED_SG_DMA_LENGTH
474 sg->sgl->dma_length = size;
480 kfd_mem_dmamap_userptr(struct kgd_mem *mem,
481 struct kfd_mem_attachment *attachment)
483 enum dma_data_direction direction =
484 mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ?
485 DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
486 struct ttm_operation_ctx ctx = {.interruptible = true};
487 struct amdgpu_bo *bo = attachment->bo_va->base.bo;
488 struct amdgpu_device *adev = attachment->adev;
489 struct ttm_tt *src_ttm = mem->bo->tbo.ttm;
490 struct ttm_tt *ttm = bo->tbo.ttm;
493 if (WARN_ON(ttm->num_pages != src_ttm->num_pages))
496 ttm->sg = kmalloc(sizeof(*ttm->sg), GFP_KERNEL);
497 if (unlikely(!ttm->sg))
500 /* Same sequence as in amdgpu_ttm_tt_pin_userptr */
501 ret = sg_alloc_table_from_pages(ttm->sg, src_ttm->pages,
503 (u64)ttm->num_pages << PAGE_SHIFT,
508 ret = dma_map_sgtable(adev->dev, ttm->sg, direction, 0);
512 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
513 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
520 dma_unmap_sgtable(adev->dev, ttm->sg, direction, 0);
522 pr_err("DMA map userptr failed: %d\n", ret);
523 sg_free_table(ttm->sg);
531 kfd_mem_dmamap_dmabuf(struct kfd_mem_attachment *attachment)
533 struct ttm_operation_ctx ctx = {.interruptible = true};
534 struct amdgpu_bo *bo = attachment->bo_va->base.bo;
537 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
538 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
542 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
543 return ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
547 * kfd_mem_dmamap_sg_bo() - Create DMA mapped sg_table to access DOORBELL or MMIO BO
548 * @mem: SG BO of the DOORBELL or MMIO resource on the owning device
549 * @attachment: Virtual address attachment of the BO on accessing device
551 * An access request from the device that owns DOORBELL does not require DMA mapping.
552 * This is because the request doesn't go through PCIe root complex i.e. it instead
553 * loops back. The need to DMA map arises only when accessing peer device's DOORBELL
555 * In contrast, all access requests for MMIO need to be DMA mapped without regard to
556 * device ownership. This is because access requests for MMIO go through PCIe root
559 * This is accomplished in two steps:
560 * - Obtain DMA mapped address of DOORBELL or MMIO memory that could be used
561 * in updating requesting device's page table
562 * - Signal TTM to mark memory pointed to by requesting device's BO as GPU
563 * accessible. This allows an update of requesting device's page table
564 * with entries associated with DOOREBELL or MMIO memory
566 * This method is invoked in the following contexts:
567 * - Mapping of DOORBELL or MMIO BO of same or peer device
568 * - Validating an evicted DOOREBELL or MMIO BO on device seeking access
570 * Return: ZERO if successful, NON-ZERO otherwise
573 kfd_mem_dmamap_sg_bo(struct kgd_mem *mem,
574 struct kfd_mem_attachment *attachment)
576 struct ttm_operation_ctx ctx = {.interruptible = true};
577 struct amdgpu_bo *bo = attachment->bo_va->base.bo;
578 struct amdgpu_device *adev = attachment->adev;
579 struct ttm_tt *ttm = bo->tbo.ttm;
580 enum dma_data_direction dir;
585 /* Expect SG Table of dmapmap BO to be NULL */
586 mmio = (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP);
587 if (unlikely(ttm->sg)) {
588 pr_err("SG Table of %d BO for peer device is UNEXPECTEDLY NON-NULL", mmio);
592 dir = mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ?
593 DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
594 dma_addr = mem->bo->tbo.sg->sgl->dma_address;
595 pr_debug("%d BO size: %d\n", mmio, mem->bo->tbo.sg->sgl->length);
596 pr_debug("%d BO address before DMA mapping: %llx\n", mmio, dma_addr);
597 dma_addr = dma_map_resource(adev->dev, dma_addr,
598 mem->bo->tbo.sg->sgl->length, dir, DMA_ATTR_SKIP_CPU_SYNC);
599 ret = dma_mapping_error(adev->dev, dma_addr);
602 pr_debug("%d BO address after DMA mapping: %llx\n", mmio, dma_addr);
604 ttm->sg = create_sg_table(dma_addr, mem->bo->tbo.sg->sgl->length);
605 if (unlikely(!ttm->sg)) {
610 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
611 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
618 sg_free_table(ttm->sg);
622 dma_unmap_resource(adev->dev, dma_addr, mem->bo->tbo.sg->sgl->length,
623 dir, DMA_ATTR_SKIP_CPU_SYNC);
628 kfd_mem_dmamap_attachment(struct kgd_mem *mem,
629 struct kfd_mem_attachment *attachment)
631 switch (attachment->type) {
632 case KFD_MEM_ATT_SHARED:
634 case KFD_MEM_ATT_USERPTR:
635 return kfd_mem_dmamap_userptr(mem, attachment);
636 case KFD_MEM_ATT_DMABUF:
637 return kfd_mem_dmamap_dmabuf(attachment);
639 return kfd_mem_dmamap_sg_bo(mem, attachment);
647 kfd_mem_dmaunmap_userptr(struct kgd_mem *mem,
648 struct kfd_mem_attachment *attachment)
650 enum dma_data_direction direction =
651 mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ?
652 DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
653 struct ttm_operation_ctx ctx = {.interruptible = false};
654 struct amdgpu_bo *bo = attachment->bo_va->base.bo;
655 struct amdgpu_device *adev = attachment->adev;
656 struct ttm_tt *ttm = bo->tbo.ttm;
658 if (unlikely(!ttm->sg))
661 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
662 ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
664 dma_unmap_sgtable(adev->dev, ttm->sg, direction, 0);
665 sg_free_table(ttm->sg);
671 kfd_mem_dmaunmap_dmabuf(struct kfd_mem_attachment *attachment)
673 /* This is a no-op. We don't want to trigger eviction fences when
674 * unmapping DMABufs. Therefore the invalidation (moving to system
675 * domain) is done in kfd_mem_dmamap_dmabuf.
680 * kfd_mem_dmaunmap_sg_bo() - Free DMA mapped sg_table of DOORBELL or MMIO BO
681 * @mem: SG BO of the DOORBELL or MMIO resource on the owning device
682 * @attachment: Virtual address attachment of the BO on accessing device
684 * The method performs following steps:
685 * - Signal TTM to mark memory pointed to by BO as GPU inaccessible
686 * - Free SG Table that is used to encapsulate DMA mapped memory of
687 * peer device's DOORBELL or MMIO memory
689 * This method is invoked in the following contexts:
690 * UNMapping of DOORBELL or MMIO BO on a device having access to its memory
691 * Eviction of DOOREBELL or MMIO BO on device having access to its memory
696 kfd_mem_dmaunmap_sg_bo(struct kgd_mem *mem,
697 struct kfd_mem_attachment *attachment)
699 struct ttm_operation_ctx ctx = {.interruptible = true};
700 struct amdgpu_bo *bo = attachment->bo_va->base.bo;
701 struct amdgpu_device *adev = attachment->adev;
702 struct ttm_tt *ttm = bo->tbo.ttm;
703 enum dma_data_direction dir;
705 if (unlikely(!ttm->sg)) {
706 pr_err("SG Table of BO is UNEXPECTEDLY NULL");
710 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
711 ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
713 dir = mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ?
714 DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
715 dma_unmap_resource(adev->dev, ttm->sg->sgl->dma_address,
716 ttm->sg->sgl->length, dir, DMA_ATTR_SKIP_CPU_SYNC);
717 sg_free_table(ttm->sg);
724 kfd_mem_dmaunmap_attachment(struct kgd_mem *mem,
725 struct kfd_mem_attachment *attachment)
727 switch (attachment->type) {
728 case KFD_MEM_ATT_SHARED:
730 case KFD_MEM_ATT_USERPTR:
731 kfd_mem_dmaunmap_userptr(mem, attachment);
733 case KFD_MEM_ATT_DMABUF:
734 kfd_mem_dmaunmap_dmabuf(attachment);
737 kfd_mem_dmaunmap_sg_bo(mem, attachment);
744 static int kfd_mem_export_dmabuf(struct kgd_mem *mem)
747 struct dma_buf *ret = amdgpu_gem_prime_export(
749 mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ?
760 kfd_mem_attach_dmabuf(struct amdgpu_device *adev, struct kgd_mem *mem,
761 struct amdgpu_bo **bo)
763 struct drm_gem_object *gobj;
766 ret = kfd_mem_export_dmabuf(mem);
770 gobj = amdgpu_gem_prime_import(adev_to_drm(adev), mem->dmabuf);
772 return PTR_ERR(gobj);
774 *bo = gem_to_amdgpu_bo(gobj);
775 (*bo)->flags |= AMDGPU_GEM_CREATE_PREEMPTIBLE;
780 /* kfd_mem_attach - Add a BO to a VM
782 * Everything that needs to bo done only once when a BO is first added
783 * to a VM. It can later be mapped and unmapped many times without
784 * repeating these steps.
786 * 0. Create BO for DMA mapping, if needed
787 * 1. Allocate and initialize BO VA entry data structure
788 * 2. Add BO to the VM
789 * 3. Determine ASIC-specific PTE flags
790 * 4. Alloc page tables and directories if needed
791 * 4a. Validate new page tables and directories
793 static int kfd_mem_attach(struct amdgpu_device *adev, struct kgd_mem *mem,
794 struct amdgpu_vm *vm, bool is_aql)
796 struct amdgpu_device *bo_adev = amdgpu_ttm_adev(mem->bo->tbo.bdev);
797 unsigned long bo_size = mem->bo->tbo.base.size;
798 uint64_t va = mem->va;
799 struct kfd_mem_attachment *attachment[2] = {NULL, NULL};
800 struct amdgpu_bo *bo[2] = {NULL, NULL};
801 bool same_hive = false;
805 pr_err("Invalid VA when adding BO to VM\n");
809 /* Determine access to VRAM, MMIO and DOORBELL BOs of peer devices
811 * The access path of MMIO and DOORBELL BOs of is always over PCIe.
812 * In contrast the access path of VRAM BOs depens upon the type of
813 * link that connects the peer device. Access over PCIe is allowed
814 * if peer device has large BAR. In contrast, access over xGMI is
815 * allowed for both small and large BAR configurations of peer device
817 if ((adev != bo_adev) &&
818 ((mem->domain == AMDGPU_GEM_DOMAIN_VRAM) ||
819 (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL) ||
820 (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP))) {
821 if (mem->domain == AMDGPU_GEM_DOMAIN_VRAM)
822 same_hive = amdgpu_xgmi_same_hive(adev, bo_adev);
823 if (!same_hive && !amdgpu_device_is_peer_accessible(bo_adev, adev))
827 for (i = 0; i <= is_aql; i++) {
828 attachment[i] = kzalloc(sizeof(*attachment[i]), GFP_KERNEL);
829 if (unlikely(!attachment[i])) {
834 pr_debug("\t add VA 0x%llx - 0x%llx to vm %p\n", va,
837 if ((adev == bo_adev && !(mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)) ||
838 (amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm) && reuse_dmamap(adev, bo_adev)) ||
840 /* Mappings on the local GPU, or VRAM mappings in the
841 * local hive, or userptr mapping can reuse dma map
842 * address space share the original BO
844 attachment[i]->type = KFD_MEM_ATT_SHARED;
846 drm_gem_object_get(&bo[i]->tbo.base);
848 /* Multiple mappings on the same GPU share the BO */
849 attachment[i]->type = KFD_MEM_ATT_SHARED;
851 drm_gem_object_get(&bo[i]->tbo.base);
852 } else if (amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm)) {
853 /* Create an SG BO to DMA-map userptrs on other GPUs */
854 attachment[i]->type = KFD_MEM_ATT_USERPTR;
855 ret = create_dmamap_sg_bo(adev, mem, &bo[i]);
858 /* Handle DOORBELL BOs of peer devices and MMIO BOs of local and peer devices */
859 } else if (mem->bo->tbo.type == ttm_bo_type_sg) {
860 WARN_ONCE(!(mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL ||
861 mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP),
862 "Handing invalid SG BO in ATTACH request");
863 attachment[i]->type = KFD_MEM_ATT_SG;
864 ret = create_dmamap_sg_bo(adev, mem, &bo[i]);
867 /* Enable acces to GTT and VRAM BOs of peer devices */
868 } else if (mem->domain == AMDGPU_GEM_DOMAIN_GTT ||
869 mem->domain == AMDGPU_GEM_DOMAIN_VRAM) {
870 attachment[i]->type = KFD_MEM_ATT_DMABUF;
871 ret = kfd_mem_attach_dmabuf(adev, mem, &bo[i]);
874 pr_debug("Employ DMABUF mechanism to enable peer GPU access\n");
876 WARN_ONCE(true, "Handling invalid ATTACH request");
881 /* Add BO to VM internal data structures */
882 ret = amdgpu_bo_reserve(bo[i], false);
884 pr_debug("Unable to reserve BO during memory attach");
887 attachment[i]->bo_va = amdgpu_vm_bo_add(adev, vm, bo[i]);
888 amdgpu_bo_unreserve(bo[i]);
889 if (unlikely(!attachment[i]->bo_va)) {
891 pr_err("Failed to add BO object to VM. ret == %d\n",
895 attachment[i]->va = va;
896 attachment[i]->pte_flags = get_pte_flags(adev, mem);
897 attachment[i]->adev = adev;
898 list_add(&attachment[i]->list, &mem->attachments);
906 for (; i >= 0; i--) {
909 if (attachment[i]->bo_va) {
910 amdgpu_bo_reserve(bo[i], true);
911 amdgpu_vm_bo_del(adev, attachment[i]->bo_va);
912 amdgpu_bo_unreserve(bo[i]);
913 list_del(&attachment[i]->list);
916 drm_gem_object_put(&bo[i]->tbo.base);
917 kfree(attachment[i]);
922 static void kfd_mem_detach(struct kfd_mem_attachment *attachment)
924 struct amdgpu_bo *bo = attachment->bo_va->base.bo;
926 pr_debug("\t remove VA 0x%llx in entry %p\n",
927 attachment->va, attachment);
928 amdgpu_vm_bo_del(attachment->adev, attachment->bo_va);
929 drm_gem_object_put(&bo->tbo.base);
930 list_del(&attachment->list);
934 static void add_kgd_mem_to_kfd_bo_list(struct kgd_mem *mem,
935 struct amdkfd_process_info *process_info,
938 struct ttm_validate_buffer *entry = &mem->validate_list;
939 struct amdgpu_bo *bo = mem->bo;
941 INIT_LIST_HEAD(&entry->head);
942 entry->num_shared = 1;
943 entry->bo = &bo->tbo;
944 mutex_lock(&process_info->lock);
946 list_add_tail(&entry->head, &process_info->userptr_valid_list);
948 list_add_tail(&entry->head, &process_info->kfd_bo_list);
949 mutex_unlock(&process_info->lock);
952 static void remove_kgd_mem_from_kfd_bo_list(struct kgd_mem *mem,
953 struct amdkfd_process_info *process_info)
955 struct ttm_validate_buffer *bo_list_entry;
957 bo_list_entry = &mem->validate_list;
958 mutex_lock(&process_info->lock);
959 list_del(&bo_list_entry->head);
960 mutex_unlock(&process_info->lock);
963 /* Initializes user pages. It registers the MMU notifier and validates
964 * the userptr BO in the GTT domain.
966 * The BO must already be on the userptr_valid_list. Otherwise an
967 * eviction and restore may happen that leaves the new BO unmapped
968 * with the user mode queues running.
970 * Takes the process_info->lock to protect against concurrent restore
973 * Returns 0 for success, negative errno for errors.
975 static int init_user_pages(struct kgd_mem *mem, uint64_t user_addr,
978 struct amdkfd_process_info *process_info = mem->process_info;
979 struct amdgpu_bo *bo = mem->bo;
980 struct ttm_operation_ctx ctx = { true, false };
981 struct hmm_range *range;
984 mutex_lock(&process_info->lock);
986 ret = amdgpu_ttm_tt_set_userptr(&bo->tbo, user_addr, 0);
988 pr_err("%s: Failed to set userptr: %d\n", __func__, ret);
992 ret = amdgpu_hmm_register(bo, user_addr);
994 pr_err("%s: Failed to register MMU notifier: %d\n",
1001 * During a CRIU restore operation, the userptr buffer objects
1002 * will be validated in the restore_userptr_work worker at a
1003 * later stage when it is scheduled by another ioctl called by
1004 * CRIU master process for the target pid for restore.
1006 mutex_lock(&process_info->notifier_lock);
1008 mutex_unlock(&process_info->notifier_lock);
1009 mutex_unlock(&process_info->lock);
1013 ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages, &range);
1015 pr_err("%s: Failed to get user pages: %d\n", __func__, ret);
1016 goto unregister_out;
1019 ret = amdgpu_bo_reserve(bo, true);
1021 pr_err("%s: Failed to reserve BO\n", __func__);
1024 amdgpu_bo_placement_from_domain(bo, mem->domain);
1025 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
1027 pr_err("%s: failed to validate BO\n", __func__);
1028 amdgpu_bo_unreserve(bo);
1031 amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm, range);
1034 amdgpu_hmm_unregister(bo);
1036 mutex_unlock(&process_info->lock);
1040 /* Reserving a BO and its page table BOs must happen atomically to
1041 * avoid deadlocks. Some operations update multiple VMs at once. Track
1042 * all the reservation info in a context structure. Optionally a sync
1043 * object can track VM updates.
1045 struct bo_vm_reservation_context {
1046 struct amdgpu_bo_list_entry kfd_bo; /* BO list entry for the KFD BO */
1047 unsigned int n_vms; /* Number of VMs reserved */
1048 struct amdgpu_bo_list_entry *vm_pd; /* Array of VM BO list entries */
1049 struct ww_acquire_ctx ticket; /* Reservation ticket */
1050 struct list_head list, duplicates; /* BO lists */
1051 struct amdgpu_sync *sync; /* Pointer to sync object */
1052 bool reserved; /* Whether BOs are reserved */
1056 BO_VM_NOT_MAPPED = 0, /* Match VMs where a BO is not mapped */
1057 BO_VM_MAPPED, /* Match VMs where a BO is mapped */
1058 BO_VM_ALL, /* Match all VMs a BO was added to */
1062 * reserve_bo_and_vm - reserve a BO and a VM unconditionally.
1063 * @mem: KFD BO structure.
1064 * @vm: the VM to reserve.
1065 * @ctx: the struct that will be used in unreserve_bo_and_vms().
1067 static int reserve_bo_and_vm(struct kgd_mem *mem,
1068 struct amdgpu_vm *vm,
1069 struct bo_vm_reservation_context *ctx)
1071 struct amdgpu_bo *bo = mem->bo;
1076 ctx->reserved = false;
1078 ctx->sync = &mem->sync;
1080 INIT_LIST_HEAD(&ctx->list);
1081 INIT_LIST_HEAD(&ctx->duplicates);
1083 ctx->vm_pd = kcalloc(ctx->n_vms, sizeof(*ctx->vm_pd), GFP_KERNEL);
1087 ctx->kfd_bo.priority = 0;
1088 ctx->kfd_bo.tv.bo = &bo->tbo;
1089 ctx->kfd_bo.tv.num_shared = 1;
1090 list_add(&ctx->kfd_bo.tv.head, &ctx->list);
1092 amdgpu_vm_get_pd_bo(vm, &ctx->list, &ctx->vm_pd[0]);
1094 ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list,
1095 false, &ctx->duplicates);
1097 pr_err("Failed to reserve buffers in ttm.\n");
1103 ctx->reserved = true;
1108 * reserve_bo_and_cond_vms - reserve a BO and some VMs conditionally
1109 * @mem: KFD BO structure.
1110 * @vm: the VM to reserve. If NULL, then all VMs associated with the BO
1111 * is used. Otherwise, a single VM associated with the BO.
1112 * @map_type: the mapping status that will be used to filter the VMs.
1113 * @ctx: the struct that will be used in unreserve_bo_and_vms().
1115 * Returns 0 for success, negative for failure.
1117 static int reserve_bo_and_cond_vms(struct kgd_mem *mem,
1118 struct amdgpu_vm *vm, enum bo_vm_match map_type,
1119 struct bo_vm_reservation_context *ctx)
1121 struct amdgpu_bo *bo = mem->bo;
1122 struct kfd_mem_attachment *entry;
1126 ctx->reserved = false;
1129 ctx->sync = &mem->sync;
1131 INIT_LIST_HEAD(&ctx->list);
1132 INIT_LIST_HEAD(&ctx->duplicates);
1134 list_for_each_entry(entry, &mem->attachments, list) {
1135 if ((vm && vm != entry->bo_va->base.vm) ||
1136 (entry->is_mapped != map_type
1137 && map_type != BO_VM_ALL))
1143 if (ctx->n_vms != 0) {
1144 ctx->vm_pd = kcalloc(ctx->n_vms, sizeof(*ctx->vm_pd),
1150 ctx->kfd_bo.priority = 0;
1151 ctx->kfd_bo.tv.bo = &bo->tbo;
1152 ctx->kfd_bo.tv.num_shared = 1;
1153 list_add(&ctx->kfd_bo.tv.head, &ctx->list);
1156 list_for_each_entry(entry, &mem->attachments, list) {
1157 if ((vm && vm != entry->bo_va->base.vm) ||
1158 (entry->is_mapped != map_type
1159 && map_type != BO_VM_ALL))
1162 amdgpu_vm_get_pd_bo(entry->bo_va->base.vm, &ctx->list,
1167 ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list,
1168 false, &ctx->duplicates);
1170 pr_err("Failed to reserve buffers in ttm.\n");
1176 ctx->reserved = true;
1181 * unreserve_bo_and_vms - Unreserve BO and VMs from a reservation context
1182 * @ctx: Reservation context to unreserve
1183 * @wait: Optionally wait for a sync object representing pending VM updates
1184 * @intr: Whether the wait is interruptible
1186 * Also frees any resources allocated in
1187 * reserve_bo_and_(cond_)vm(s). Returns the status from
1190 static int unreserve_bo_and_vms(struct bo_vm_reservation_context *ctx,
1191 bool wait, bool intr)
1196 ret = amdgpu_sync_wait(ctx->sync, intr);
1199 ttm_eu_backoff_reservation(&ctx->ticket, &ctx->list);
1204 ctx->reserved = false;
1210 static void unmap_bo_from_gpuvm(struct kgd_mem *mem,
1211 struct kfd_mem_attachment *entry,
1212 struct amdgpu_sync *sync)
1214 struct amdgpu_bo_va *bo_va = entry->bo_va;
1215 struct amdgpu_device *adev = entry->adev;
1216 struct amdgpu_vm *vm = bo_va->base.vm;
1218 amdgpu_vm_bo_unmap(adev, bo_va, entry->va);
1220 amdgpu_vm_clear_freed(adev, vm, &bo_va->last_pt_update);
1222 amdgpu_sync_fence(sync, bo_va->last_pt_update);
1224 kfd_mem_dmaunmap_attachment(mem, entry);
1227 static int update_gpuvm_pte(struct kgd_mem *mem,
1228 struct kfd_mem_attachment *entry,
1229 struct amdgpu_sync *sync)
1231 struct amdgpu_bo_va *bo_va = entry->bo_va;
1232 struct amdgpu_device *adev = entry->adev;
1235 ret = kfd_mem_dmamap_attachment(mem, entry);
1239 /* Update the page tables */
1240 ret = amdgpu_vm_bo_update(adev, bo_va, false);
1242 pr_err("amdgpu_vm_bo_update failed\n");
1246 return amdgpu_sync_fence(sync, bo_va->last_pt_update);
1249 static int map_bo_to_gpuvm(struct kgd_mem *mem,
1250 struct kfd_mem_attachment *entry,
1251 struct amdgpu_sync *sync,
1256 /* Set virtual address for the allocation */
1257 ret = amdgpu_vm_bo_map(entry->adev, entry->bo_va, entry->va, 0,
1258 amdgpu_bo_size(entry->bo_va->base.bo),
1261 pr_err("Failed to map VA 0x%llx in vm. ret %d\n",
1269 ret = update_gpuvm_pte(mem, entry, sync);
1271 pr_err("update_gpuvm_pte() failed\n");
1272 goto update_gpuvm_pte_failed;
1277 update_gpuvm_pte_failed:
1278 unmap_bo_from_gpuvm(mem, entry, sync);
1282 static int process_validate_vms(struct amdkfd_process_info *process_info)
1284 struct amdgpu_vm *peer_vm;
1287 list_for_each_entry(peer_vm, &process_info->vm_list_head,
1289 ret = vm_validate_pt_pd_bos(peer_vm);
1297 static int process_sync_pds_resv(struct amdkfd_process_info *process_info,
1298 struct amdgpu_sync *sync)
1300 struct amdgpu_vm *peer_vm;
1303 list_for_each_entry(peer_vm, &process_info->vm_list_head,
1305 struct amdgpu_bo *pd = peer_vm->root.bo;
1307 ret = amdgpu_sync_resv(NULL, sync, pd->tbo.base.resv,
1308 AMDGPU_SYNC_NE_OWNER,
1309 AMDGPU_FENCE_OWNER_KFD);
1317 static int process_update_pds(struct amdkfd_process_info *process_info,
1318 struct amdgpu_sync *sync)
1320 struct amdgpu_vm *peer_vm;
1323 list_for_each_entry(peer_vm, &process_info->vm_list_head,
1325 ret = vm_update_pds(peer_vm, sync);
1333 static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info,
1334 struct dma_fence **ef)
1336 struct amdkfd_process_info *info = NULL;
1339 if (!*process_info) {
1340 info = kzalloc(sizeof(*info), GFP_KERNEL);
1344 mutex_init(&info->lock);
1345 mutex_init(&info->notifier_lock);
1346 INIT_LIST_HEAD(&info->vm_list_head);
1347 INIT_LIST_HEAD(&info->kfd_bo_list);
1348 INIT_LIST_HEAD(&info->userptr_valid_list);
1349 INIT_LIST_HEAD(&info->userptr_inval_list);
1351 info->eviction_fence =
1352 amdgpu_amdkfd_fence_create(dma_fence_context_alloc(1),
1355 if (!info->eviction_fence) {
1356 pr_err("Failed to create eviction fence\n");
1358 goto create_evict_fence_fail;
1361 info->pid = get_task_pid(current->group_leader, PIDTYPE_PID);
1362 INIT_DELAYED_WORK(&info->restore_userptr_work,
1363 amdgpu_amdkfd_restore_userptr_worker);
1365 *process_info = info;
1366 *ef = dma_fence_get(&info->eviction_fence->base);
1369 vm->process_info = *process_info;
1371 /* Validate page directory and attach eviction fence */
1372 ret = amdgpu_bo_reserve(vm->root.bo, true);
1374 goto reserve_pd_fail;
1375 ret = vm_validate_pt_pd_bos(vm);
1377 pr_err("validate_pt_pd_bos() failed\n");
1378 goto validate_pd_fail;
1380 ret = amdgpu_bo_sync_wait(vm->root.bo,
1381 AMDGPU_FENCE_OWNER_KFD, false);
1384 ret = dma_resv_reserve_fences(vm->root.bo->tbo.base.resv, 1);
1386 goto reserve_shared_fail;
1387 dma_resv_add_fence(vm->root.bo->tbo.base.resv,
1388 &vm->process_info->eviction_fence->base,
1389 DMA_RESV_USAGE_BOOKKEEP);
1390 amdgpu_bo_unreserve(vm->root.bo);
1392 /* Update process info */
1393 mutex_lock(&vm->process_info->lock);
1394 list_add_tail(&vm->vm_list_node,
1395 &(vm->process_info->vm_list_head));
1396 vm->process_info->n_vms++;
1397 mutex_unlock(&vm->process_info->lock);
1401 reserve_shared_fail:
1404 amdgpu_bo_unreserve(vm->root.bo);
1406 vm->process_info = NULL;
1408 /* Two fence references: one in info and one in *ef */
1409 dma_fence_put(&info->eviction_fence->base);
1412 *process_info = NULL;
1414 create_evict_fence_fail:
1415 mutex_destroy(&info->lock);
1416 mutex_destroy(&info->notifier_lock);
1423 * amdgpu_amdkfd_gpuvm_pin_bo() - Pins a BO using following criteria
1424 * @bo: Handle of buffer object being pinned
1425 * @domain: Domain into which BO should be pinned
1427 * - USERPTR BOs are UNPINNABLE and will return error
1428 * - All other BO types (GTT, VRAM, MMIO and DOORBELL) will have their
1429 * PIN count incremented. It is valid to PIN a BO multiple times
1431 * Return: ZERO if successful in pinning, Non-Zero in case of error.
1433 static int amdgpu_amdkfd_gpuvm_pin_bo(struct amdgpu_bo *bo, u32 domain)
1437 ret = amdgpu_bo_reserve(bo, false);
1441 ret = amdgpu_bo_pin_restricted(bo, domain, 0, 0);
1443 pr_err("Error in Pinning BO to domain: %d\n", domain);
1445 amdgpu_bo_sync_wait(bo, AMDGPU_FENCE_OWNER_KFD, false);
1446 amdgpu_bo_unreserve(bo);
1452 * amdgpu_amdkfd_gpuvm_unpin_bo() - Unpins BO using following criteria
1453 * @bo: Handle of buffer object being unpinned
1455 * - Is a illegal request for USERPTR BOs and is ignored
1456 * - All other BO types (GTT, VRAM, MMIO and DOORBELL) will have their
1457 * PIN count decremented. Calls to UNPIN must balance calls to PIN
1459 static void amdgpu_amdkfd_gpuvm_unpin_bo(struct amdgpu_bo *bo)
1463 ret = amdgpu_bo_reserve(bo, false);
1467 amdgpu_bo_unpin(bo);
1468 amdgpu_bo_unreserve(bo);
1471 int amdgpu_amdkfd_gpuvm_set_vm_pasid(struct amdgpu_device *adev,
1472 struct amdgpu_vm *avm, u32 pasid)
1477 /* Free the original amdgpu allocated pasid,
1478 * will be replaced with kfd allocated pasid.
1481 amdgpu_pasid_free(avm->pasid);
1482 amdgpu_vm_set_pasid(adev, avm, 0);
1485 ret = amdgpu_vm_set_pasid(adev, avm, pasid);
1492 int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct amdgpu_device *adev,
1493 struct amdgpu_vm *avm,
1494 void **process_info,
1495 struct dma_fence **ef)
1499 /* Already a compute VM? */
1500 if (avm->process_info)
1503 /* Convert VM into a compute VM */
1504 ret = amdgpu_vm_make_compute(adev, avm);
1508 /* Initialize KFD part of the VM and process info */
1509 ret = init_kfd_vm(avm, process_info, ef);
1513 amdgpu_vm_set_task_info(avm);
1518 void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
1519 struct amdgpu_vm *vm)
1521 struct amdkfd_process_info *process_info = vm->process_info;
1526 /* Update process info */
1527 mutex_lock(&process_info->lock);
1528 process_info->n_vms--;
1529 list_del(&vm->vm_list_node);
1530 mutex_unlock(&process_info->lock);
1532 vm->process_info = NULL;
1534 /* Release per-process resources when last compute VM is destroyed */
1535 if (!process_info->n_vms) {
1536 WARN_ON(!list_empty(&process_info->kfd_bo_list));
1537 WARN_ON(!list_empty(&process_info->userptr_valid_list));
1538 WARN_ON(!list_empty(&process_info->userptr_inval_list));
1540 dma_fence_put(&process_info->eviction_fence->base);
1541 cancel_delayed_work_sync(&process_info->restore_userptr_work);
1542 put_pid(process_info->pid);
1543 mutex_destroy(&process_info->lock);
1544 mutex_destroy(&process_info->notifier_lock);
1545 kfree(process_info);
1549 void amdgpu_amdkfd_gpuvm_release_process_vm(struct amdgpu_device *adev,
1552 struct amdgpu_vm *avm;
1554 if (WARN_ON(!adev || !drm_priv))
1557 avm = drm_priv_to_vm(drm_priv);
1559 pr_debug("Releasing process vm %p\n", avm);
1561 /* The original pasid of amdgpu vm has already been
1562 * released during making a amdgpu vm to a compute vm
1563 * The current pasid is managed by kfd and will be
1564 * released on kfd process destroy. Set amdgpu pasid
1565 * to 0 to avoid duplicate release.
1567 amdgpu_vm_release_compute(adev, avm);
1570 uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *drm_priv)
1572 struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
1573 struct amdgpu_bo *pd = avm->root.bo;
1574 struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
1576 if (adev->asic_type < CHIP_VEGA10)
1577 return avm->pd_phys_addr >> AMDGPU_GPU_PAGE_SHIFT;
1578 return avm->pd_phys_addr;
1581 void amdgpu_amdkfd_block_mmu_notifications(void *p)
1583 struct amdkfd_process_info *pinfo = (struct amdkfd_process_info *)p;
1585 mutex_lock(&pinfo->lock);
1586 WRITE_ONCE(pinfo->block_mmu_notifications, true);
1587 mutex_unlock(&pinfo->lock);
1590 int amdgpu_amdkfd_criu_resume(void *p)
1593 struct amdkfd_process_info *pinfo = (struct amdkfd_process_info *)p;
1595 mutex_lock(&pinfo->lock);
1596 pr_debug("scheduling work\n");
1597 mutex_lock(&pinfo->notifier_lock);
1598 pinfo->evicted_bos++;
1599 mutex_unlock(&pinfo->notifier_lock);
1600 if (!READ_ONCE(pinfo->block_mmu_notifications)) {
1604 WRITE_ONCE(pinfo->block_mmu_notifications, false);
1605 schedule_delayed_work(&pinfo->restore_userptr_work, 0);
1608 mutex_unlock(&pinfo->lock);
1612 size_t amdgpu_amdkfd_get_available_memory(struct amdgpu_device *adev)
1614 uint64_t reserved_for_pt =
1615 ESTIMATE_PT_SIZE(amdgpu_amdkfd_total_mem_size);
1618 spin_lock(&kfd_mem_limit.mem_limit_lock);
1619 available = adev->gmc.real_vram_size
1620 - adev->kfd.vram_used_aligned
1621 - atomic64_read(&adev->vram_pin_size)
1623 spin_unlock(&kfd_mem_limit.mem_limit_lock);
1628 return ALIGN_DOWN(available, VRAM_AVAILABLITY_ALIGN);
1631 int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
1632 struct amdgpu_device *adev, uint64_t va, uint64_t size,
1633 void *drm_priv, struct kgd_mem **mem,
1634 uint64_t *offset, uint32_t flags, bool criu_resume)
1636 struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
1637 enum ttm_bo_type bo_type = ttm_bo_type_device;
1638 struct sg_table *sg = NULL;
1639 uint64_t user_addr = 0;
1640 struct amdgpu_bo *bo;
1641 struct drm_gem_object *gobj = NULL;
1642 u32 domain, alloc_domain;
1643 uint64_t aligned_size;
1648 * Check on which domain to allocate BO
1650 if (flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
1651 domain = alloc_domain = AMDGPU_GEM_DOMAIN_VRAM;
1652 alloc_flags = AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE;
1653 alloc_flags |= (flags & KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC) ?
1654 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED : 0;
1655 } else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_GTT) {
1656 domain = alloc_domain = AMDGPU_GEM_DOMAIN_GTT;
1659 domain = AMDGPU_GEM_DOMAIN_GTT;
1660 alloc_domain = AMDGPU_GEM_DOMAIN_CPU;
1661 alloc_flags = AMDGPU_GEM_CREATE_PREEMPTIBLE;
1663 if (flags & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) {
1664 if (!offset || !*offset)
1666 user_addr = untagged_addr(*offset);
1667 } else if (flags & (KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL |
1668 KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)) {
1669 bo_type = ttm_bo_type_sg;
1670 if (size > UINT_MAX)
1672 sg = create_sg_table(*offset, size);
1680 if (flags & KFD_IOC_ALLOC_MEM_FLAGS_COHERENT)
1681 alloc_flags |= AMDGPU_GEM_CREATE_COHERENT;
1682 if (flags & KFD_IOC_ALLOC_MEM_FLAGS_UNCACHED)
1683 alloc_flags |= AMDGPU_GEM_CREATE_UNCACHED;
1685 *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
1690 INIT_LIST_HEAD(&(*mem)->attachments);
1691 mutex_init(&(*mem)->lock);
1692 (*mem)->aql_queue = !!(flags & KFD_IOC_ALLOC_MEM_FLAGS_AQL_QUEUE_MEM);
1694 /* Workaround for AQL queue wraparound bug. Map the same
1695 * memory twice. That means we only actually allocate half
1698 if ((*mem)->aql_queue)
1700 aligned_size = PAGE_ALIGN(size);
1702 (*mem)->alloc_flags = flags;
1704 amdgpu_sync_create(&(*mem)->sync);
1706 ret = amdgpu_amdkfd_reserve_mem_limit(adev, aligned_size, flags);
1708 pr_debug("Insufficient memory\n");
1709 goto err_reserve_limit;
1712 pr_debug("\tcreate BO VA 0x%llx size 0x%llx domain %s\n",
1713 va, (*mem)->aql_queue ? size << 1 : size, domain_string(alloc_domain));
1715 ret = amdgpu_gem_object_create(adev, aligned_size, 1, alloc_domain, alloc_flags,
1716 bo_type, NULL, &gobj);
1718 pr_debug("Failed to create BO on domain %s. ret %d\n",
1719 domain_string(alloc_domain), ret);
1722 ret = drm_vma_node_allow(&gobj->vma_node, drm_priv);
1724 pr_debug("Failed to allow vma node access. ret %d\n", ret);
1725 goto err_node_allow;
1727 bo = gem_to_amdgpu_bo(gobj);
1728 if (bo_type == ttm_bo_type_sg) {
1730 bo->tbo.ttm->sg = sg;
1735 bo->flags |= AMDGPU_AMDKFD_CREATE_USERPTR_BO;
1738 (*mem)->domain = domain;
1739 (*mem)->mapped_to_gpu_memory = 0;
1740 (*mem)->process_info = avm->process_info;
1741 add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, user_addr);
1744 pr_debug("creating userptr BO for user_addr = %llx\n", user_addr);
1745 ret = init_user_pages(*mem, user_addr, criu_resume);
1747 goto allocate_init_user_pages_failed;
1748 } else if (flags & (KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL |
1749 KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)) {
1750 ret = amdgpu_amdkfd_gpuvm_pin_bo(bo, AMDGPU_GEM_DOMAIN_GTT);
1752 pr_err("Pinning MMIO/DOORBELL BO during ALLOC FAILED\n");
1755 bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
1756 bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT;
1760 *offset = amdgpu_bo_mmap_offset(bo);
1764 allocate_init_user_pages_failed:
1766 remove_kgd_mem_from_kfd_bo_list(*mem, avm->process_info);
1767 drm_vma_node_revoke(&gobj->vma_node, drm_priv);
1769 /* Don't unreserve system mem limit twice */
1770 goto err_reserve_limit;
1772 amdgpu_amdkfd_unreserve_mem_limit(adev, aligned_size, flags);
1774 mutex_destroy(&(*mem)->lock);
1776 drm_gem_object_put(gobj);
1787 int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
1788 struct amdgpu_device *adev, struct kgd_mem *mem, void *drm_priv,
1791 struct amdkfd_process_info *process_info = mem->process_info;
1792 unsigned long bo_size = mem->bo->tbo.base.size;
1793 bool use_release_notifier = (mem->bo->kfd_bo == mem);
1794 struct kfd_mem_attachment *entry, *tmp;
1795 struct bo_vm_reservation_context ctx;
1796 struct ttm_validate_buffer *bo_list_entry;
1797 unsigned int mapped_to_gpu_memory;
1799 bool is_imported = false;
1801 mutex_lock(&mem->lock);
1803 /* Unpin MMIO/DOORBELL BO's that were pinned during allocation */
1804 if (mem->alloc_flags &
1805 (KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL |
1806 KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)) {
1807 amdgpu_amdkfd_gpuvm_unpin_bo(mem->bo);
1810 mapped_to_gpu_memory = mem->mapped_to_gpu_memory;
1811 is_imported = mem->is_imported;
1812 mutex_unlock(&mem->lock);
1813 /* lock is not needed after this, since mem is unused and will
1817 if (mapped_to_gpu_memory > 0) {
1818 pr_debug("BO VA 0x%llx size 0x%lx is still mapped.\n",
1823 /* Make sure restore workers don't access the BO any more */
1824 bo_list_entry = &mem->validate_list;
1825 mutex_lock(&process_info->lock);
1826 list_del(&bo_list_entry->head);
1827 mutex_unlock(&process_info->lock);
1829 /* Cleanup user pages and MMU notifiers */
1830 if (amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm)) {
1831 amdgpu_hmm_unregister(mem->bo);
1832 mutex_lock(&process_info->notifier_lock);
1833 amdgpu_ttm_tt_discard_user_pages(mem->bo->tbo.ttm, mem->range);
1834 mutex_unlock(&process_info->notifier_lock);
1837 ret = reserve_bo_and_cond_vms(mem, NULL, BO_VM_ALL, &ctx);
1841 /* The eviction fence should be removed by the last unmap.
1842 * TODO: Log an error condition if the bo still has the eviction fence
1845 amdgpu_amdkfd_remove_eviction_fence(mem->bo,
1846 process_info->eviction_fence);
1847 pr_debug("Release VA 0x%llx - 0x%llx\n", mem->va,
1848 mem->va + bo_size * (1 + mem->aql_queue));
1850 /* Remove from VM internal data structures */
1851 list_for_each_entry_safe(entry, tmp, &mem->attachments, list)
1852 kfd_mem_detach(entry);
1854 ret = unreserve_bo_and_vms(&ctx, false, false);
1856 /* Free the sync object */
1857 amdgpu_sync_free(&mem->sync);
1859 /* If the SG is not NULL, it's one we created for a doorbell or mmio
1860 * remap BO. We need to free it.
1862 if (mem->bo->tbo.sg) {
1863 sg_free_table(mem->bo->tbo.sg);
1864 kfree(mem->bo->tbo.sg);
1867 /* Update the size of the BO being freed if it was allocated from
1868 * VRAM and is not imported.
1871 if ((mem->bo->preferred_domains == AMDGPU_GEM_DOMAIN_VRAM) &&
1879 drm_vma_node_revoke(&mem->bo->tbo.base.vma_node, drm_priv);
1881 dma_buf_put(mem->dmabuf);
1882 mutex_destroy(&mem->lock);
1884 /* If this releases the last reference, it will end up calling
1885 * amdgpu_amdkfd_release_notify and kfree the mem struct. That's why
1886 * this needs to be the last call here.
1888 drm_gem_object_put(&mem->bo->tbo.base);
1891 * For kgd_mem allocated in amdgpu_amdkfd_gpuvm_import_dmabuf(),
1892 * explicitly free it here.
1894 if (!use_release_notifier)
1900 int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
1901 struct amdgpu_device *adev, struct kgd_mem *mem,
1904 struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
1906 struct amdgpu_bo *bo;
1908 struct kfd_mem_attachment *entry;
1909 struct bo_vm_reservation_context ctx;
1910 unsigned long bo_size;
1911 bool is_invalid_userptr = false;
1915 pr_err("Invalid BO when mapping memory to GPU\n");
1919 /* Make sure restore is not running concurrently. Since we
1920 * don't map invalid userptr BOs, we rely on the next restore
1921 * worker to do the mapping
1923 mutex_lock(&mem->process_info->lock);
1925 /* Lock notifier lock. If we find an invalid userptr BO, we can be
1926 * sure that the MMU notifier is no longer running
1927 * concurrently and the queues are actually stopped
1929 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
1930 mutex_lock(&mem->process_info->notifier_lock);
1931 is_invalid_userptr = !!mem->invalid;
1932 mutex_unlock(&mem->process_info->notifier_lock);
1935 mutex_lock(&mem->lock);
1937 domain = mem->domain;
1938 bo_size = bo->tbo.base.size;
1940 pr_debug("Map VA 0x%llx - 0x%llx to vm %p domain %s\n",
1942 mem->va + bo_size * (1 + mem->aql_queue),
1943 avm, domain_string(domain));
1945 if (!kfd_mem_is_attached(avm, mem)) {
1946 ret = kfd_mem_attach(adev, mem, avm, mem->aql_queue);
1951 ret = reserve_bo_and_vm(mem, avm, &ctx);
1955 /* Userptr can be marked as "not invalid", but not actually be
1956 * validated yet (still in the system domain). In that case
1957 * the queues are still stopped and we can leave mapping for
1958 * the next restore worker
1960 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) &&
1961 bo->tbo.resource->mem_type == TTM_PL_SYSTEM)
1962 is_invalid_userptr = true;
1964 ret = vm_validate_pt_pd_bos(avm);
1968 if (mem->mapped_to_gpu_memory == 0 &&
1969 !amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
1970 /* Validate BO only once. The eviction fence gets added to BO
1971 * the first time it is mapped. Validate will wait for all
1972 * background evictions to complete.
1974 ret = amdgpu_amdkfd_bo_validate(bo, domain, true);
1976 pr_debug("Validate failed\n");
1981 list_for_each_entry(entry, &mem->attachments, list) {
1982 if (entry->bo_va->base.vm != avm || entry->is_mapped)
1985 pr_debug("\t map VA 0x%llx - 0x%llx in entry %p\n",
1986 entry->va, entry->va + bo_size, entry);
1988 ret = map_bo_to_gpuvm(mem, entry, ctx.sync,
1989 is_invalid_userptr);
1991 pr_err("Failed to map bo to gpuvm\n");
1995 ret = vm_update_pds(avm, ctx.sync);
1997 pr_err("Failed to update page directories\n");
2001 entry->is_mapped = true;
2002 mem->mapped_to_gpu_memory++;
2003 pr_debug("\t INC mapping count %d\n",
2004 mem->mapped_to_gpu_memory);
2007 if (!amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) && !bo->tbo.pin_count)
2008 dma_resv_add_fence(bo->tbo.base.resv,
2009 &avm->process_info->eviction_fence->base,
2010 DMA_RESV_USAGE_BOOKKEEP);
2011 ret = unreserve_bo_and_vms(&ctx, false, false);
2016 unreserve_bo_and_vms(&ctx, false, false);
2018 mutex_unlock(&mem->process_info->lock);
2019 mutex_unlock(&mem->lock);
2023 int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
2024 struct amdgpu_device *adev, struct kgd_mem *mem, void *drm_priv)
2026 struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
2027 struct amdkfd_process_info *process_info = avm->process_info;
2028 unsigned long bo_size = mem->bo->tbo.base.size;
2029 struct kfd_mem_attachment *entry;
2030 struct bo_vm_reservation_context ctx;
2033 mutex_lock(&mem->lock);
2035 ret = reserve_bo_and_cond_vms(mem, avm, BO_VM_MAPPED, &ctx);
2038 /* If no VMs were reserved, it means the BO wasn't actually mapped */
2039 if (ctx.n_vms == 0) {
2044 ret = vm_validate_pt_pd_bos(avm);
2048 pr_debug("Unmap VA 0x%llx - 0x%llx from vm %p\n",
2050 mem->va + bo_size * (1 + mem->aql_queue),
2053 list_for_each_entry(entry, &mem->attachments, list) {
2054 if (entry->bo_va->base.vm != avm || !entry->is_mapped)
2057 pr_debug("\t unmap VA 0x%llx - 0x%llx from entry %p\n",
2058 entry->va, entry->va + bo_size, entry);
2060 unmap_bo_from_gpuvm(mem, entry, ctx.sync);
2061 entry->is_mapped = false;
2063 mem->mapped_to_gpu_memory--;
2064 pr_debug("\t DEC mapping count %d\n",
2065 mem->mapped_to_gpu_memory);
2068 /* If BO is unmapped from all VMs, unfence it. It can be evicted if
2071 if (mem->mapped_to_gpu_memory == 0 &&
2072 !amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm) &&
2073 !mem->bo->tbo.pin_count)
2074 amdgpu_amdkfd_remove_eviction_fence(mem->bo,
2075 process_info->eviction_fence);
2078 unreserve_bo_and_vms(&ctx, false, false);
2080 mutex_unlock(&mem->lock);
2084 int amdgpu_amdkfd_gpuvm_sync_memory(
2085 struct amdgpu_device *adev, struct kgd_mem *mem, bool intr)
2087 struct amdgpu_sync sync;
2090 amdgpu_sync_create(&sync);
2092 mutex_lock(&mem->lock);
2093 amdgpu_sync_clone(&mem->sync, &sync);
2094 mutex_unlock(&mem->lock);
2096 ret = amdgpu_sync_wait(&sync, intr);
2097 amdgpu_sync_free(&sync);
2102 * amdgpu_amdkfd_map_gtt_bo_to_gart - Map BO to GART and increment reference count
2103 * @adev: Device to which allocated BO belongs
2104 * @bo: Buffer object to be mapped
2106 * Before return, bo reference count is incremented. To release the reference and unpin/
2107 * unmap the BO, call amdgpu_amdkfd_free_gtt_mem.
2109 int amdgpu_amdkfd_map_gtt_bo_to_gart(struct amdgpu_device *adev, struct amdgpu_bo *bo)
2113 ret = amdgpu_bo_reserve(bo, true);
2115 pr_err("Failed to reserve bo. ret %d\n", ret);
2116 goto err_reserve_bo_failed;
2119 ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
2121 pr_err("Failed to pin bo. ret %d\n", ret);
2122 goto err_pin_bo_failed;
2125 ret = amdgpu_ttm_alloc_gart(&bo->tbo);
2127 pr_err("Failed to bind bo to GART. ret %d\n", ret);
2128 goto err_map_bo_gart_failed;
2131 amdgpu_amdkfd_remove_eviction_fence(
2132 bo, bo->vm_bo->vm->process_info->eviction_fence);
2134 amdgpu_bo_unreserve(bo);
2136 bo = amdgpu_bo_ref(bo);
2140 err_map_bo_gart_failed:
2141 amdgpu_bo_unpin(bo);
2143 amdgpu_bo_unreserve(bo);
2144 err_reserve_bo_failed:
2149 /** amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel() - Map a GTT BO for kernel CPU access
2151 * @mem: Buffer object to be mapped for CPU access
2152 * @kptr[out]: pointer in kernel CPU address space
2153 * @size[out]: size of the buffer
2155 * Pins the BO and maps it for kernel CPU access. The eviction fence is removed
2156 * from the BO, since pinned BOs cannot be evicted. The bo must remain on the
2157 * validate_list, so the GPU mapping can be restored after a page table was
2160 * Return: 0 on success, error code on failure
2162 int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_mem *mem,
2163 void **kptr, uint64_t *size)
2166 struct amdgpu_bo *bo = mem->bo;
2168 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
2169 pr_err("userptr can't be mapped to kernel\n");
2173 mutex_lock(&mem->process_info->lock);
2175 ret = amdgpu_bo_reserve(bo, true);
2177 pr_err("Failed to reserve bo. ret %d\n", ret);
2178 goto bo_reserve_failed;
2181 ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
2183 pr_err("Failed to pin bo. ret %d\n", ret);
2187 ret = amdgpu_bo_kmap(bo, kptr);
2189 pr_err("Failed to map bo to kernel. ret %d\n", ret);
2193 amdgpu_amdkfd_remove_eviction_fence(
2194 bo, mem->process_info->eviction_fence);
2197 *size = amdgpu_bo_size(bo);
2199 amdgpu_bo_unreserve(bo);
2201 mutex_unlock(&mem->process_info->lock);
2205 amdgpu_bo_unpin(bo);
2207 amdgpu_bo_unreserve(bo);
2209 mutex_unlock(&mem->process_info->lock);
2214 /** amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel() - Unmap a GTT BO for kernel CPU access
2216 * @mem: Buffer object to be unmapped for CPU access
2218 * Removes the kernel CPU mapping and unpins the BO. It does not restore the
2219 * eviction fence, so this function should only be used for cleanup before the
2222 void amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(struct kgd_mem *mem)
2224 struct amdgpu_bo *bo = mem->bo;
2226 amdgpu_bo_reserve(bo, true);
2227 amdgpu_bo_kunmap(bo);
2228 amdgpu_bo_unpin(bo);
2229 amdgpu_bo_unreserve(bo);
2232 int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct amdgpu_device *adev,
2233 struct kfd_vm_fault_info *mem)
2235 if (atomic_read(&adev->gmc.vm_fault_info_updated) == 1) {
2236 *mem = *adev->gmc.vm_fault_info;
2237 mb(); /* make sure read happened */
2238 atomic_set(&adev->gmc.vm_fault_info_updated, 0);
2243 int amdgpu_amdkfd_gpuvm_import_dmabuf(struct amdgpu_device *adev,
2244 struct dma_buf *dma_buf,
2245 uint64_t va, void *drm_priv,
2246 struct kgd_mem **mem, uint64_t *size,
2247 uint64_t *mmap_offset)
2249 struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
2250 struct drm_gem_object *obj;
2251 struct amdgpu_bo *bo;
2254 obj = amdgpu_gem_prime_import(adev_to_drm(adev), dma_buf);
2256 return PTR_ERR(obj);
2258 bo = gem_to_amdgpu_bo(obj);
2259 if (!(bo->preferred_domains & (AMDGPU_GEM_DOMAIN_VRAM |
2260 AMDGPU_GEM_DOMAIN_GTT))) {
2261 /* Only VRAM and GTT BOs are supported */
2266 *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
2272 ret = drm_vma_node_allow(&obj->vma_node, drm_priv);
2277 *size = amdgpu_bo_size(bo);
2280 *mmap_offset = amdgpu_bo_mmap_offset(bo);
2282 INIT_LIST_HEAD(&(*mem)->attachments);
2283 mutex_init(&(*mem)->lock);
2285 (*mem)->alloc_flags =
2286 ((bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ?
2287 KFD_IOC_ALLOC_MEM_FLAGS_VRAM : KFD_IOC_ALLOC_MEM_FLAGS_GTT)
2288 | KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE
2289 | KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE;
2291 get_dma_buf(dma_buf);
2292 (*mem)->dmabuf = dma_buf;
2295 (*mem)->domain = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) && !adev->gmc.is_app_apu ?
2296 AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT;
2298 (*mem)->mapped_to_gpu_memory = 0;
2299 (*mem)->process_info = avm->process_info;
2300 add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, false);
2301 amdgpu_sync_create(&(*mem)->sync);
2302 (*mem)->is_imported = true;
2309 drm_gem_object_put(obj);
2313 int amdgpu_amdkfd_gpuvm_export_dmabuf(struct kgd_mem *mem,
2314 struct dma_buf **dma_buf)
2318 mutex_lock(&mem->lock);
2319 ret = kfd_mem_export_dmabuf(mem);
2323 get_dma_buf(mem->dmabuf);
2324 *dma_buf = mem->dmabuf;
2326 mutex_unlock(&mem->lock);
2330 /* Evict a userptr BO by stopping the queues if necessary
2332 * Runs in MMU notifier, may be in RECLAIM_FS context. This means it
2333 * cannot do any memory allocations, and cannot take any locks that
2334 * are held elsewhere while allocating memory.
2336 * It doesn't do anything to the BO itself. The real work happens in
2337 * restore, where we get updated page addresses. This function only
2338 * ensures that GPU access to the BO is stopped.
2340 int amdgpu_amdkfd_evict_userptr(struct mmu_interval_notifier *mni,
2341 unsigned long cur_seq, struct kgd_mem *mem)
2343 struct amdkfd_process_info *process_info = mem->process_info;
2346 /* Do not process MMU notifications during CRIU restore until
2347 * KFD_CRIU_OP_RESUME IOCTL is received
2349 if (READ_ONCE(process_info->block_mmu_notifications))
2352 mutex_lock(&process_info->notifier_lock);
2353 mmu_interval_set_seq(mni, cur_seq);
2356 if (++process_info->evicted_bos == 1) {
2357 /* First eviction, stop the queues */
2358 r = kgd2kfd_quiesce_mm(mni->mm,
2359 KFD_QUEUE_EVICTION_TRIGGER_USERPTR);
2361 pr_err("Failed to quiesce KFD\n");
2362 schedule_delayed_work(&process_info->restore_userptr_work,
2363 msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS));
2365 mutex_unlock(&process_info->notifier_lock);
2370 /* Update invalid userptr BOs
2372 * Moves invalidated (evicted) userptr BOs from userptr_valid_list to
2373 * userptr_inval_list and updates user pages for all BOs that have
2374 * been invalidated since their last update.
2376 static int update_invalid_user_pages(struct amdkfd_process_info *process_info,
2377 struct mm_struct *mm)
2379 struct kgd_mem *mem, *tmp_mem;
2380 struct amdgpu_bo *bo;
2381 struct ttm_operation_ctx ctx = { false, false };
2385 mutex_lock(&process_info->notifier_lock);
2387 /* Move all invalidated BOs to the userptr_inval_list */
2388 list_for_each_entry_safe(mem, tmp_mem,
2389 &process_info->userptr_valid_list,
2392 list_move_tail(&mem->validate_list.head,
2393 &process_info->userptr_inval_list);
2395 /* Go through userptr_inval_list and update any invalid user_pages */
2396 list_for_each_entry(mem, &process_info->userptr_inval_list,
2397 validate_list.head) {
2398 invalid = mem->invalid;
2400 /* BO hasn't been invalidated since the last
2401 * revalidation attempt. Keep its page list.
2407 amdgpu_ttm_tt_discard_user_pages(bo->tbo.ttm, mem->range);
2410 /* BO reservations and getting user pages (hmm_range_fault)
2411 * must happen outside the notifier lock
2413 mutex_unlock(&process_info->notifier_lock);
2415 /* Move the BO to system (CPU) domain if necessary to unmap
2416 * and free the SG table
2418 if (bo->tbo.resource->mem_type != TTM_PL_SYSTEM) {
2419 if (amdgpu_bo_reserve(bo, true))
2421 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
2422 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
2423 amdgpu_bo_unreserve(bo);
2425 pr_err("%s: Failed to invalidate userptr BO\n",
2431 /* Get updated user pages */
2432 ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages,
2435 pr_debug("Failed %d to get user pages\n", ret);
2437 /* Return -EFAULT bad address error as success. It will
2438 * fail later with a VM fault if the GPU tries to access
2439 * it. Better than hanging indefinitely with stalled
2442 * Return other error -EBUSY or -ENOMEM to retry restore
2450 mutex_lock(&process_info->notifier_lock);
2452 /* Mark the BO as valid unless it was invalidated
2453 * again concurrently.
2455 if (mem->invalid != invalid) {
2459 /* set mem valid if mem has hmm range associated */
2465 mutex_unlock(&process_info->notifier_lock);
2470 /* Validate invalid userptr BOs
2472 * Validates BOs on the userptr_inval_list. Also updates GPUVM page tables
2473 * with new page addresses and waits for the page table updates to complete.
2475 static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
2477 struct amdgpu_bo_list_entry *pd_bo_list_entries;
2478 struct list_head resv_list, duplicates;
2479 struct ww_acquire_ctx ticket;
2480 struct amdgpu_sync sync;
2482 struct amdgpu_vm *peer_vm;
2483 struct kgd_mem *mem, *tmp_mem;
2484 struct amdgpu_bo *bo;
2485 struct ttm_operation_ctx ctx = { false, false };
2488 pd_bo_list_entries = kcalloc(process_info->n_vms,
2489 sizeof(struct amdgpu_bo_list_entry),
2491 if (!pd_bo_list_entries) {
2492 pr_err("%s: Failed to allocate PD BO list entries\n", __func__);
2497 INIT_LIST_HEAD(&resv_list);
2498 INIT_LIST_HEAD(&duplicates);
2500 /* Get all the page directory BOs that need to be reserved */
2502 list_for_each_entry(peer_vm, &process_info->vm_list_head,
2504 amdgpu_vm_get_pd_bo(peer_vm, &resv_list,
2505 &pd_bo_list_entries[i++]);
2506 /* Add the userptr_inval_list entries to resv_list */
2507 list_for_each_entry(mem, &process_info->userptr_inval_list,
2508 validate_list.head) {
2509 list_add_tail(&mem->resv_list.head, &resv_list);
2510 mem->resv_list.bo = mem->validate_list.bo;
2511 mem->resv_list.num_shared = mem->validate_list.num_shared;
2514 /* Reserve all BOs and page tables for validation */
2515 ret = ttm_eu_reserve_buffers(&ticket, &resv_list, false, &duplicates);
2516 WARN(!list_empty(&duplicates), "Duplicates should be empty");
2520 amdgpu_sync_create(&sync);
2522 ret = process_validate_vms(process_info);
2526 /* Validate BOs and update GPUVM page tables */
2527 list_for_each_entry_safe(mem, tmp_mem,
2528 &process_info->userptr_inval_list,
2529 validate_list.head) {
2530 struct kfd_mem_attachment *attachment;
2534 /* Validate the BO if we got user pages */
2535 if (bo->tbo.ttm->pages[0]) {
2536 amdgpu_bo_placement_from_domain(bo, mem->domain);
2537 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
2539 pr_err("%s: failed to validate BO\n", __func__);
2544 /* Update mapping. If the BO was not validated
2545 * (because we couldn't get user pages), this will
2546 * clear the page table entries, which will result in
2547 * VM faults if the GPU tries to access the invalid
2550 list_for_each_entry(attachment, &mem->attachments, list) {
2551 if (!attachment->is_mapped)
2554 kfd_mem_dmaunmap_attachment(mem, attachment);
2555 ret = update_gpuvm_pte(mem, attachment, &sync);
2557 pr_err("%s: update PTE failed\n", __func__);
2558 /* make sure this gets validated again */
2559 mutex_lock(&process_info->notifier_lock);
2561 mutex_unlock(&process_info->notifier_lock);
2567 /* Update page directories */
2568 ret = process_update_pds(process_info, &sync);
2571 ttm_eu_backoff_reservation(&ticket, &resv_list);
2572 amdgpu_sync_wait(&sync, false);
2573 amdgpu_sync_free(&sync);
2575 kfree(pd_bo_list_entries);
2581 /* Confirm that all user pages are valid while holding the notifier lock
2583 * Moves valid BOs from the userptr_inval_list back to userptr_val_list.
2585 static int confirm_valid_user_pages_locked(struct amdkfd_process_info *process_info)
2587 struct kgd_mem *mem, *tmp_mem;
2590 list_for_each_entry_safe(mem, tmp_mem,
2591 &process_info->userptr_inval_list,
2592 validate_list.head) {
2595 /* keep mem without hmm range at userptr_inval_list */
2599 /* Only check mem with hmm range associated */
2600 valid = amdgpu_ttm_tt_get_user_pages_done(
2601 mem->bo->tbo.ttm, mem->range);
2605 WARN(!mem->invalid, "Invalid BO not marked invalid");
2611 WARN(1, "Valid BO is marked invalid");
2616 list_move_tail(&mem->validate_list.head,
2617 &process_info->userptr_valid_list);
2623 /* Worker callback to restore evicted userptr BOs
2625 * Tries to update and validate all userptr BOs. If successful and no
2626 * concurrent evictions happened, the queues are restarted. Otherwise,
2627 * reschedule for another attempt later.
2629 static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work)
2631 struct delayed_work *dwork = to_delayed_work(work);
2632 struct amdkfd_process_info *process_info =
2633 container_of(dwork, struct amdkfd_process_info,
2634 restore_userptr_work);
2635 struct task_struct *usertask;
2636 struct mm_struct *mm;
2637 uint32_t evicted_bos;
2639 mutex_lock(&process_info->notifier_lock);
2640 evicted_bos = process_info->evicted_bos;
2641 mutex_unlock(&process_info->notifier_lock);
2645 /* Reference task and mm in case of concurrent process termination */
2646 usertask = get_pid_task(process_info->pid, PIDTYPE_PID);
2649 mm = get_task_mm(usertask);
2651 put_task_struct(usertask);
2655 mutex_lock(&process_info->lock);
2657 if (update_invalid_user_pages(process_info, mm))
2659 /* userptr_inval_list can be empty if all evicted userptr BOs
2660 * have been freed. In that case there is nothing to validate
2661 * and we can just restart the queues.
2663 if (!list_empty(&process_info->userptr_inval_list)) {
2664 if (validate_invalid_user_pages(process_info))
2667 /* Final check for concurrent evicton and atomic update. If
2668 * another eviction happens after successful update, it will
2669 * be a first eviction that calls quiesce_mm. The eviction
2670 * reference counting inside KFD will handle this case.
2672 mutex_lock(&process_info->notifier_lock);
2673 if (process_info->evicted_bos != evicted_bos)
2674 goto unlock_notifier_out;
2676 if (confirm_valid_user_pages_locked(process_info)) {
2677 WARN(1, "User pages unexpectedly invalid");
2678 goto unlock_notifier_out;
2681 process_info->evicted_bos = evicted_bos = 0;
2683 if (kgd2kfd_resume_mm(mm)) {
2684 pr_err("%s: Failed to resume KFD\n", __func__);
2685 /* No recovery from this failure. Probably the CP is
2686 * hanging. No point trying again.
2690 unlock_notifier_out:
2691 mutex_unlock(&process_info->notifier_lock);
2693 mutex_unlock(&process_info->lock);
2695 /* If validation failed, reschedule another attempt */
2697 schedule_delayed_work(&process_info->restore_userptr_work,
2698 msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS));
2700 kfd_smi_event_queue_restore_rescheduled(mm);
2703 put_task_struct(usertask);
2706 /** amdgpu_amdkfd_gpuvm_restore_process_bos - Restore all BOs for the given
2707 * KFD process identified by process_info
2709 * @process_info: amdkfd_process_info of the KFD process
2711 * After memory eviction, restore thread calls this function. The function
2712 * should be called when the Process is still valid. BO restore involves -
2714 * 1. Release old eviction fence and create new one
2715 * 2. Get two copies of PD BO list from all the VMs. Keep one copy as pd_list.
2716 * 3 Use the second PD list and kfd_bo_list to create a list (ctx.list) of
2717 * BOs that need to be reserved.
2718 * 4. Reserve all the BOs
2719 * 5. Validate of PD and PT BOs.
2720 * 6. Validate all KFD BOs using kfd_bo_list and Map them and add new fence
2721 * 7. Add fence to all PD and PT BOs.
2722 * 8. Unreserve all BOs
2724 int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
2726 struct amdgpu_bo_list_entry *pd_bo_list;
2727 struct amdkfd_process_info *process_info = info;
2728 struct amdgpu_vm *peer_vm;
2729 struct kgd_mem *mem;
2730 struct bo_vm_reservation_context ctx;
2731 struct amdgpu_amdkfd_fence *new_fence;
2733 struct list_head duplicate_save;
2734 struct amdgpu_sync sync_obj;
2735 unsigned long failed_size = 0;
2736 unsigned long total_size = 0;
2738 INIT_LIST_HEAD(&duplicate_save);
2739 INIT_LIST_HEAD(&ctx.list);
2740 INIT_LIST_HEAD(&ctx.duplicates);
2742 pd_bo_list = kcalloc(process_info->n_vms,
2743 sizeof(struct amdgpu_bo_list_entry),
2749 mutex_lock(&process_info->lock);
2750 list_for_each_entry(peer_vm, &process_info->vm_list_head,
2752 amdgpu_vm_get_pd_bo(peer_vm, &ctx.list, &pd_bo_list[i++]);
2754 /* Reserve all BOs and page tables/directory. Add all BOs from
2755 * kfd_bo_list to ctx.list
2757 list_for_each_entry(mem, &process_info->kfd_bo_list,
2758 validate_list.head) {
2760 list_add_tail(&mem->resv_list.head, &ctx.list);
2761 mem->resv_list.bo = mem->validate_list.bo;
2762 mem->resv_list.num_shared = mem->validate_list.num_shared;
2765 ret = ttm_eu_reserve_buffers(&ctx.ticket, &ctx.list,
2766 false, &duplicate_save);
2768 pr_debug("Memory eviction: TTM Reserve Failed. Try again\n");
2769 goto ttm_reserve_fail;
2772 amdgpu_sync_create(&sync_obj);
2774 /* Validate PDs and PTs */
2775 ret = process_validate_vms(process_info);
2777 goto validate_map_fail;
2779 ret = process_sync_pds_resv(process_info, &sync_obj);
2781 pr_debug("Memory eviction: Failed to sync to PD BO moving fence. Try again\n");
2782 goto validate_map_fail;
2785 /* Validate BOs and map them to GPUVM (update VM page tables). */
2786 list_for_each_entry(mem, &process_info->kfd_bo_list,
2787 validate_list.head) {
2789 struct amdgpu_bo *bo = mem->bo;
2790 uint32_t domain = mem->domain;
2791 struct kfd_mem_attachment *attachment;
2792 struct dma_resv_iter cursor;
2793 struct dma_fence *fence;
2795 total_size += amdgpu_bo_size(bo);
2797 ret = amdgpu_amdkfd_bo_validate(bo, domain, false);
2799 pr_debug("Memory eviction: Validate BOs failed\n");
2800 failed_size += amdgpu_bo_size(bo);
2801 ret = amdgpu_amdkfd_bo_validate(bo,
2802 AMDGPU_GEM_DOMAIN_GTT, false);
2804 pr_debug("Memory eviction: Try again\n");
2805 goto validate_map_fail;
2808 dma_resv_for_each_fence(&cursor, bo->tbo.base.resv,
2809 DMA_RESV_USAGE_KERNEL, fence) {
2810 ret = amdgpu_sync_fence(&sync_obj, fence);
2812 pr_debug("Memory eviction: Sync BO fence failed. Try again\n");
2813 goto validate_map_fail;
2816 list_for_each_entry(attachment, &mem->attachments, list) {
2817 if (!attachment->is_mapped)
2820 kfd_mem_dmaunmap_attachment(mem, attachment);
2821 ret = update_gpuvm_pte(mem, attachment, &sync_obj);
2823 pr_debug("Memory eviction: update PTE failed. Try again\n");
2824 goto validate_map_fail;
2830 pr_debug("0x%lx/0x%lx in system\n", failed_size, total_size);
2832 /* Update page directories */
2833 ret = process_update_pds(process_info, &sync_obj);
2835 pr_debug("Memory eviction: update PDs failed. Try again\n");
2836 goto validate_map_fail;
2839 /* Wait for validate and PT updates to finish */
2840 amdgpu_sync_wait(&sync_obj, false);
2842 /* Release old eviction fence and create new one, because fence only
2843 * goes from unsignaled to signaled, fence cannot be reused.
2844 * Use context and mm from the old fence.
2846 new_fence = amdgpu_amdkfd_fence_create(
2847 process_info->eviction_fence->base.context,
2848 process_info->eviction_fence->mm,
2851 pr_err("Failed to create eviction fence\n");
2853 goto validate_map_fail;
2855 dma_fence_put(&process_info->eviction_fence->base);
2856 process_info->eviction_fence = new_fence;
2857 *ef = dma_fence_get(&new_fence->base);
2859 /* Attach new eviction fence to all BOs except pinned ones */
2860 list_for_each_entry(mem, &process_info->kfd_bo_list,
2861 validate_list.head) {
2862 if (mem->bo->tbo.pin_count)
2865 dma_resv_add_fence(mem->bo->tbo.base.resv,
2866 &process_info->eviction_fence->base,
2867 DMA_RESV_USAGE_BOOKKEEP);
2869 /* Attach eviction fence to PD / PT BOs */
2870 list_for_each_entry(peer_vm, &process_info->vm_list_head,
2872 struct amdgpu_bo *bo = peer_vm->root.bo;
2874 dma_resv_add_fence(bo->tbo.base.resv,
2875 &process_info->eviction_fence->base,
2876 DMA_RESV_USAGE_BOOKKEEP);
2880 ttm_eu_backoff_reservation(&ctx.ticket, &ctx.list);
2881 amdgpu_sync_free(&sync_obj);
2883 mutex_unlock(&process_info->lock);
2888 int amdgpu_amdkfd_add_gws_to_process(void *info, void *gws, struct kgd_mem **mem)
2890 struct amdkfd_process_info *process_info = (struct amdkfd_process_info *)info;
2891 struct amdgpu_bo *gws_bo = (struct amdgpu_bo *)gws;
2897 *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
2901 mutex_init(&(*mem)->lock);
2902 INIT_LIST_HEAD(&(*mem)->attachments);
2903 (*mem)->bo = amdgpu_bo_ref(gws_bo);
2904 (*mem)->domain = AMDGPU_GEM_DOMAIN_GWS;
2905 (*mem)->process_info = process_info;
2906 add_kgd_mem_to_kfd_bo_list(*mem, process_info, false);
2907 amdgpu_sync_create(&(*mem)->sync);
2910 /* Validate gws bo the first time it is added to process */
2911 mutex_lock(&(*mem)->process_info->lock);
2912 ret = amdgpu_bo_reserve(gws_bo, false);
2913 if (unlikely(ret)) {
2914 pr_err("Reserve gws bo failed %d\n", ret);
2915 goto bo_reservation_failure;
2918 ret = amdgpu_amdkfd_bo_validate(gws_bo, AMDGPU_GEM_DOMAIN_GWS, true);
2920 pr_err("GWS BO validate failed %d\n", ret);
2921 goto bo_validation_failure;
2923 /* GWS resource is shared b/t amdgpu and amdkfd
2924 * Add process eviction fence to bo so they can
2927 ret = dma_resv_reserve_fences(gws_bo->tbo.base.resv, 1);
2929 goto reserve_shared_fail;
2930 dma_resv_add_fence(gws_bo->tbo.base.resv,
2931 &process_info->eviction_fence->base,
2932 DMA_RESV_USAGE_BOOKKEEP);
2933 amdgpu_bo_unreserve(gws_bo);
2934 mutex_unlock(&(*mem)->process_info->lock);
2938 reserve_shared_fail:
2939 bo_validation_failure:
2940 amdgpu_bo_unreserve(gws_bo);
2941 bo_reservation_failure:
2942 mutex_unlock(&(*mem)->process_info->lock);
2943 amdgpu_sync_free(&(*mem)->sync);
2944 remove_kgd_mem_from_kfd_bo_list(*mem, process_info);
2945 amdgpu_bo_unref(&gws_bo);
2946 mutex_destroy(&(*mem)->lock);
2952 int amdgpu_amdkfd_remove_gws_from_process(void *info, void *mem)
2955 struct amdkfd_process_info *process_info = (struct amdkfd_process_info *)info;
2956 struct kgd_mem *kgd_mem = (struct kgd_mem *)mem;
2957 struct amdgpu_bo *gws_bo = kgd_mem->bo;
2959 /* Remove BO from process's validate list so restore worker won't touch
2962 remove_kgd_mem_from_kfd_bo_list(kgd_mem, process_info);
2964 ret = amdgpu_bo_reserve(gws_bo, false);
2965 if (unlikely(ret)) {
2966 pr_err("Reserve gws bo failed %d\n", ret);
2967 //TODO add BO back to validate_list?
2970 amdgpu_amdkfd_remove_eviction_fence(gws_bo,
2971 process_info->eviction_fence);
2972 amdgpu_bo_unreserve(gws_bo);
2973 amdgpu_sync_free(&kgd_mem->sync);
2974 amdgpu_bo_unref(&gws_bo);
2975 mutex_destroy(&kgd_mem->lock);
2980 /* Returns GPU-specific tiling mode information */
2981 int amdgpu_amdkfd_get_tile_config(struct amdgpu_device *adev,
2982 struct tile_config *config)
2984 config->gb_addr_config = adev->gfx.config.gb_addr_config;
2985 config->tile_config_ptr = adev->gfx.config.tile_mode_array;
2986 config->num_tile_configs =
2987 ARRAY_SIZE(adev->gfx.config.tile_mode_array);
2988 config->macro_tile_config_ptr =
2989 adev->gfx.config.macrotile_mode_array;
2990 config->num_macro_tile_configs =
2991 ARRAY_SIZE(adev->gfx.config.macrotile_mode_array);
2993 /* Those values are not set from GFX9 onwards */
2994 config->num_banks = adev->gfx.config.num_banks;
2995 config->num_ranks = adev->gfx.config.num_ranks;
3000 bool amdgpu_amdkfd_bo_mapped_to_dev(struct amdgpu_device *adev, struct kgd_mem *mem)
3002 struct kfd_mem_attachment *entry;
3004 list_for_each_entry(entry, &mem->attachments, list) {
3005 if (entry->is_mapped && entry->adev == adev)
3011 #if defined(CONFIG_DEBUG_FS)
3013 int kfd_debugfs_kfd_mem_limits(struct seq_file *m, void *data)
3016 spin_lock(&kfd_mem_limit.mem_limit_lock);
3017 seq_printf(m, "System mem used %lldM out of %lluM\n",
3018 (kfd_mem_limit.system_mem_used >> 20),
3019 (kfd_mem_limit.max_system_mem_limit >> 20));
3020 seq_printf(m, "TTM mem used %lldM out of %lluM\n",
3021 (kfd_mem_limit.ttm_mem_used >> 20),
3022 (kfd_mem_limit.max_ttm_mem_limit >> 20));
3023 spin_unlock(&kfd_mem_limit.mem_limit_lock);