1 // SPDX-License-Identifier: MIT
3 * Copyright 2014-2018 Advanced Micro Devices, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
23 #include <linux/dma-buf.h>
24 #include <linux/list.h>
25 #include <linux/pagemap.h>
26 #include <linux/sched/mm.h>
27 #include <linux/sched/task.h>
28 #include <drm/ttm/ttm_tt.h>
30 #include <drm/drm_exec.h>
32 #include "amdgpu_object.h"
33 #include "amdgpu_gem.h"
34 #include "amdgpu_vm.h"
35 #include "amdgpu_hmm.h"
36 #include "amdgpu_amdkfd.h"
37 #include "amdgpu_dma_buf.h"
38 #include <uapi/linux/kfd_ioctl.h>
39 #include "amdgpu_xgmi.h"
41 #include "kfd_smi_events.h"
43 /* Userptr restore delay, just long enough to allow consecutive VM
44 * changes to accumulate
46 #define AMDGPU_USERPTR_RESTORE_DELAY_MS 1
47 #define AMDGPU_RESERVE_MEM_LIMIT (3UL << 29)
50 * Align VRAM availability to 2MB to avoid fragmentation caused by 4K allocations in the tail 2MB
53 #define VRAM_AVAILABLITY_ALIGN (1 << 21)
55 /* Impose limit on how much memory KFD can use */
57 uint64_t max_system_mem_limit;
58 uint64_t max_ttm_mem_limit;
59 int64_t system_mem_used;
61 spinlock_t mem_limit_lock;
64 static const char * const domain_bit_to_string[] = {
73 #define domain_string(domain) domain_bit_to_string[ffs(domain)-1]
75 static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work);
77 static bool kfd_mem_is_attached(struct amdgpu_vm *avm,
80 struct kfd_mem_attachment *entry;
82 list_for_each_entry(entry, &mem->attachments, list)
83 if (entry->bo_va->base.vm == avm)
90 * reuse_dmamap() - Check whether adev can share the original
93 * If both adev and bo_adev are in direct mapping or
94 * in the same iommu group, they can share the original BO.
96 * @adev: Device to which can or cannot share the original BO
97 * @bo_adev: Device to which allocated BO belongs to
99 * Return: returns true if adev can share original userptr BO,
102 static bool reuse_dmamap(struct amdgpu_device *adev, struct amdgpu_device *bo_adev)
104 return (adev->ram_is_direct_mapped && bo_adev->ram_is_direct_mapped) ||
105 (adev->dev->iommu_group == bo_adev->dev->iommu_group);
108 /* Set memory usage limits. Current, limits are
109 * System (TTM + userptr) memory - 15/16th System RAM
110 * TTM memory - 3/8th System RAM
112 void amdgpu_amdkfd_gpuvm_init_mem_limits(void)
117 if (kfd_mem_limit.max_system_mem_limit)
121 mem = si.totalram - si.totalhigh;
124 spin_lock_init(&kfd_mem_limit.mem_limit_lock);
125 kfd_mem_limit.max_system_mem_limit = mem - (mem >> 6);
126 if (kfd_mem_limit.max_system_mem_limit < 2 * AMDGPU_RESERVE_MEM_LIMIT)
127 kfd_mem_limit.max_system_mem_limit >>= 1;
129 kfd_mem_limit.max_system_mem_limit -= AMDGPU_RESERVE_MEM_LIMIT;
131 kfd_mem_limit.max_ttm_mem_limit = ttm_tt_pages_limit() << PAGE_SHIFT;
132 pr_debug("Kernel memory limit %lluM, TTM limit %lluM\n",
133 (kfd_mem_limit.max_system_mem_limit >> 20),
134 (kfd_mem_limit.max_ttm_mem_limit >> 20));
137 void amdgpu_amdkfd_reserve_system_mem(uint64_t size)
139 kfd_mem_limit.system_mem_used += size;
142 /* Estimate page table size needed to represent a given memory size
144 * With 4KB pages, we need one 8 byte PTE for each 4KB of memory
145 * (factor 512, >> 9). With 2MB pages, we need one 8 byte PTE for 2MB
146 * of memory (factor 256K, >> 18). ROCm user mode tries to optimize
147 * for 2MB pages for TLB efficiency. However, small allocations and
148 * fragmented system memory still need some 4KB pages. We choose a
149 * compromise that should work in most cases without reserving too
150 * much memory for page tables unnecessarily (factor 16K, >> 14).
153 #define ESTIMATE_PT_SIZE(mem_size) max(((mem_size) >> 14), AMDGPU_VM_RESERVED_VRAM)
156 * amdgpu_amdkfd_reserve_mem_limit() - Decrease available memory by size
159 * @adev: Device to which allocated BO belongs to
160 * @size: Size of buffer, in bytes, encapsulated by B0. This should be
161 * equivalent to amdgpu_bo_size(BO)
162 * @alloc_flag: Flag used in allocating a BO as noted above
163 * @xcp_id: xcp_id is used to get xcp from xcp manager, one xcp is
164 * managed as one compute node in driver for app
167 * returns -ENOMEM in case of error, ZERO otherwise
169 int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
170 uint64_t size, u32 alloc_flag, int8_t xcp_id)
172 uint64_t reserved_for_pt =
173 ESTIMATE_PT_SIZE(amdgpu_amdkfd_total_mem_size);
174 size_t system_mem_needed, ttm_mem_needed, vram_needed;
176 uint64_t vram_size = 0;
178 system_mem_needed = 0;
181 if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_GTT) {
182 system_mem_needed = size;
183 ttm_mem_needed = size;
184 } else if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
186 * Conservatively round up the allocation requirement to 2 MB
187 * to avoid fragmentation caused by 4K allocations in the tail
192 * For GFX 9.4.3, get the VRAM size from XCP structs
194 if (WARN_ONCE(xcp_id < 0, "invalid XCP ID %d", xcp_id))
197 vram_size = KFD_XCP_MEMORY_SIZE(adev, xcp_id);
198 if (adev->gmc.is_app_apu) {
199 system_mem_needed = size;
200 ttm_mem_needed = size;
202 } else if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) {
203 system_mem_needed = size;
204 } else if (!(alloc_flag &
205 (KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL |
206 KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP))) {
207 pr_err("%s: Invalid BO type %#x\n", __func__, alloc_flag);
211 spin_lock(&kfd_mem_limit.mem_limit_lock);
213 if (kfd_mem_limit.system_mem_used + system_mem_needed >
214 kfd_mem_limit.max_system_mem_limit)
215 pr_debug("Set no_system_mem_limit=1 if using shared memory\n");
217 if ((kfd_mem_limit.system_mem_used + system_mem_needed >
218 kfd_mem_limit.max_system_mem_limit && !no_system_mem_limit) ||
219 (kfd_mem_limit.ttm_mem_used + ttm_mem_needed >
220 kfd_mem_limit.max_ttm_mem_limit) ||
221 (adev && xcp_id >= 0 && adev->kfd.vram_used[xcp_id] + vram_needed >
222 vram_size - reserved_for_pt)) {
227 /* Update memory accounting by decreasing available system
228 * memory, TTM memory and GPU memory as computed above
230 WARN_ONCE(vram_needed && !adev,
231 "adev reference can't be null when vram is used");
232 if (adev && xcp_id >= 0) {
233 adev->kfd.vram_used[xcp_id] += vram_needed;
234 adev->kfd.vram_used_aligned[xcp_id] += adev->gmc.is_app_apu ?
236 ALIGN(vram_needed, VRAM_AVAILABLITY_ALIGN);
238 kfd_mem_limit.system_mem_used += system_mem_needed;
239 kfd_mem_limit.ttm_mem_used += ttm_mem_needed;
242 spin_unlock(&kfd_mem_limit.mem_limit_lock);
246 void amdgpu_amdkfd_unreserve_mem_limit(struct amdgpu_device *adev,
247 uint64_t size, u32 alloc_flag, int8_t xcp_id)
249 spin_lock(&kfd_mem_limit.mem_limit_lock);
251 if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_GTT) {
252 kfd_mem_limit.system_mem_used -= size;
253 kfd_mem_limit.ttm_mem_used -= size;
254 } else if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
256 "adev reference can't be null when alloc mem flags vram is set");
257 if (WARN_ONCE(xcp_id < 0, "invalid XCP ID %d", xcp_id))
261 adev->kfd.vram_used[xcp_id] -= size;
262 if (adev->gmc.is_app_apu) {
263 adev->kfd.vram_used_aligned[xcp_id] -= size;
264 kfd_mem_limit.system_mem_used -= size;
265 kfd_mem_limit.ttm_mem_used -= size;
267 adev->kfd.vram_used_aligned[xcp_id] -=
268 ALIGN(size, VRAM_AVAILABLITY_ALIGN);
271 } else if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) {
272 kfd_mem_limit.system_mem_used -= size;
273 } else if (!(alloc_flag &
274 (KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL |
275 KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP))) {
276 pr_err("%s: Invalid BO type %#x\n", __func__, alloc_flag);
279 WARN_ONCE(adev && xcp_id >= 0 && adev->kfd.vram_used[xcp_id] < 0,
280 "KFD VRAM memory accounting unbalanced for xcp: %d", xcp_id);
281 WARN_ONCE(kfd_mem_limit.ttm_mem_used < 0,
282 "KFD TTM memory accounting unbalanced");
283 WARN_ONCE(kfd_mem_limit.system_mem_used < 0,
284 "KFD system memory accounting unbalanced");
287 spin_unlock(&kfd_mem_limit.mem_limit_lock);
290 void amdgpu_amdkfd_release_notify(struct amdgpu_bo *bo)
292 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
293 u32 alloc_flags = bo->kfd_bo->alloc_flags;
294 u64 size = amdgpu_bo_size(bo);
296 amdgpu_amdkfd_unreserve_mem_limit(adev, size, alloc_flags,
303 * create_dmamap_sg_bo() - Creates a amdgpu_bo object to reflect information
304 * about USERPTR or DOOREBELL or MMIO BO.
306 * @adev: Device for which dmamap BO is being created
307 * @mem: BO of peer device that is being DMA mapped. Provides parameters
308 * in building the dmamap BO
309 * @bo_out: Output parameter updated with handle of dmamap BO
312 create_dmamap_sg_bo(struct amdgpu_device *adev,
313 struct kgd_mem *mem, struct amdgpu_bo **bo_out)
315 struct drm_gem_object *gem_obj;
319 ret = amdgpu_bo_reserve(mem->bo, false);
323 if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR)
324 flags |= mem->bo->flags & (AMDGPU_GEM_CREATE_COHERENT |
325 AMDGPU_GEM_CREATE_UNCACHED);
327 ret = amdgpu_gem_object_create(adev, mem->bo->tbo.base.size, 1,
328 AMDGPU_GEM_DOMAIN_CPU, AMDGPU_GEM_CREATE_PREEMPTIBLE | flags,
329 ttm_bo_type_sg, mem->bo->tbo.base.resv, &gem_obj, 0);
331 amdgpu_bo_unreserve(mem->bo);
334 pr_err("Error in creating DMA mappable SG BO on domain: %d\n", ret);
338 *bo_out = gem_to_amdgpu_bo(gem_obj);
339 (*bo_out)->parent = amdgpu_bo_ref(mem->bo);
343 /* amdgpu_amdkfd_remove_eviction_fence - Removes eviction fence from BO's
344 * reservation object.
346 * @bo: [IN] Remove eviction fence(s) from this BO
347 * @ef: [IN] This eviction fence is removed if it
348 * is present in the shared list.
350 * NOTE: Must be called with BO reserved i.e. bo->tbo.resv->lock held.
352 static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
353 struct amdgpu_amdkfd_fence *ef)
355 struct dma_fence *replacement;
360 /* TODO: Instead of block before we should use the fence of the page
361 * table update and TLB flush here directly.
363 replacement = dma_fence_get_stub();
364 dma_resv_replace_fences(bo->tbo.base.resv, ef->base.context,
365 replacement, DMA_RESV_USAGE_BOOKKEEP);
366 dma_fence_put(replacement);
370 int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo)
372 struct amdgpu_bo *root = bo;
373 struct amdgpu_vm_bo_base *vm_bo;
374 struct amdgpu_vm *vm;
375 struct amdkfd_process_info *info;
376 struct amdgpu_amdkfd_fence *ef;
379 /* we can always get vm_bo from root PD bo.*/
391 info = vm->process_info;
392 if (!info || !info->eviction_fence)
395 ef = container_of(dma_fence_get(&info->eviction_fence->base),
396 struct amdgpu_amdkfd_fence, base);
398 BUG_ON(!dma_resv_trylock(bo->tbo.base.resv));
399 ret = amdgpu_amdkfd_remove_eviction_fence(bo, ef);
400 dma_resv_unlock(bo->tbo.base.resv);
402 dma_fence_put(&ef->base);
406 static int amdgpu_amdkfd_bo_validate(struct amdgpu_bo *bo, uint32_t domain,
409 struct ttm_operation_ctx ctx = { false, false };
412 if (WARN(amdgpu_ttm_tt_get_usermm(bo->tbo.ttm),
413 "Called with userptr BO"))
416 amdgpu_bo_placement_from_domain(bo, domain);
418 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
422 amdgpu_bo_sync_wait(bo, AMDGPU_FENCE_OWNER_KFD, false);
428 static int amdgpu_amdkfd_validate_vm_bo(void *_unused, struct amdgpu_bo *bo)
430 return amdgpu_amdkfd_bo_validate(bo, bo->allowed_domains, false);
433 /* vm_validate_pt_pd_bos - Validate page table and directory BOs
435 * Page directories are not updated here because huge page handling
436 * during page table updates can invalidate page directory entries
437 * again. Page directories are only updated after updating page
440 static int vm_validate_pt_pd_bos(struct amdgpu_vm *vm)
442 struct amdgpu_bo *pd = vm->root.bo;
443 struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
446 ret = amdgpu_vm_validate_pt_bos(adev, vm, amdgpu_amdkfd_validate_vm_bo, NULL);
448 pr_err("failed to validate PT BOs\n");
452 vm->pd_phys_addr = amdgpu_gmc_pd_addr(vm->root.bo);
457 static int vm_update_pds(struct amdgpu_vm *vm, struct amdgpu_sync *sync)
459 struct amdgpu_bo *pd = vm->root.bo;
460 struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
463 ret = amdgpu_vm_update_pdes(adev, vm, false);
467 return amdgpu_sync_fence(sync, vm->last_update);
470 static uint64_t get_pte_flags(struct amdgpu_device *adev, struct kgd_mem *mem)
472 uint32_t mapping_flags = AMDGPU_VM_PAGE_READABLE |
473 AMDGPU_VM_MTYPE_DEFAULT;
475 if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE)
476 mapping_flags |= AMDGPU_VM_PAGE_WRITEABLE;
477 if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE)
478 mapping_flags |= AMDGPU_VM_PAGE_EXECUTABLE;
480 return amdgpu_gem_va_map_flags(adev, mapping_flags);
484 * create_sg_table() - Create an sg_table for a contiguous DMA addr range
485 * @addr: The starting address to point to
486 * @size: Size of memory area in bytes being pointed to
488 * Allocates an instance of sg_table and initializes it to point to memory
489 * area specified by input parameters. The address used to build is assumed
490 * to be DMA mapped, if needed.
492 * DOORBELL or MMIO BOs use only one scatterlist node in their sg_table
493 * because they are physically contiguous.
495 * Return: Initialized instance of SG Table or NULL
497 static struct sg_table *create_sg_table(uint64_t addr, uint32_t size)
499 struct sg_table *sg = kmalloc(sizeof(*sg), GFP_KERNEL);
503 if (sg_alloc_table(sg, 1, GFP_KERNEL)) {
507 sg_dma_address(sg->sgl) = addr;
508 sg->sgl->length = size;
509 #ifdef CONFIG_NEED_SG_DMA_LENGTH
510 sg->sgl->dma_length = size;
516 kfd_mem_dmamap_userptr(struct kgd_mem *mem,
517 struct kfd_mem_attachment *attachment)
519 enum dma_data_direction direction =
520 mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ?
521 DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
522 struct ttm_operation_ctx ctx = {.interruptible = true};
523 struct amdgpu_bo *bo = attachment->bo_va->base.bo;
524 struct amdgpu_device *adev = attachment->adev;
525 struct ttm_tt *src_ttm = mem->bo->tbo.ttm;
526 struct ttm_tt *ttm = bo->tbo.ttm;
529 if (WARN_ON(ttm->num_pages != src_ttm->num_pages))
532 ttm->sg = kmalloc(sizeof(*ttm->sg), GFP_KERNEL);
533 if (unlikely(!ttm->sg))
536 /* Same sequence as in amdgpu_ttm_tt_pin_userptr */
537 ret = sg_alloc_table_from_pages(ttm->sg, src_ttm->pages,
539 (u64)ttm->num_pages << PAGE_SHIFT,
544 ret = dma_map_sgtable(adev->dev, ttm->sg, direction, 0);
548 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
549 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
556 dma_unmap_sgtable(adev->dev, ttm->sg, direction, 0);
558 pr_err("DMA map userptr failed: %d\n", ret);
559 sg_free_table(ttm->sg);
567 kfd_mem_dmamap_dmabuf(struct kfd_mem_attachment *attachment)
569 struct ttm_operation_ctx ctx = {.interruptible = true};
570 struct amdgpu_bo *bo = attachment->bo_va->base.bo;
573 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
574 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
578 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
579 return ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
583 * kfd_mem_dmamap_sg_bo() - Create DMA mapped sg_table to access DOORBELL or MMIO BO
584 * @mem: SG BO of the DOORBELL or MMIO resource on the owning device
585 * @attachment: Virtual address attachment of the BO on accessing device
587 * An access request from the device that owns DOORBELL does not require DMA mapping.
588 * This is because the request doesn't go through PCIe root complex i.e. it instead
589 * loops back. The need to DMA map arises only when accessing peer device's DOORBELL
591 * In contrast, all access requests for MMIO need to be DMA mapped without regard to
592 * device ownership. This is because access requests for MMIO go through PCIe root
595 * This is accomplished in two steps:
596 * - Obtain DMA mapped address of DOORBELL or MMIO memory that could be used
597 * in updating requesting device's page table
598 * - Signal TTM to mark memory pointed to by requesting device's BO as GPU
599 * accessible. This allows an update of requesting device's page table
600 * with entries associated with DOOREBELL or MMIO memory
602 * This method is invoked in the following contexts:
603 * - Mapping of DOORBELL or MMIO BO of same or peer device
604 * - Validating an evicted DOOREBELL or MMIO BO on device seeking access
606 * Return: ZERO if successful, NON-ZERO otherwise
609 kfd_mem_dmamap_sg_bo(struct kgd_mem *mem,
610 struct kfd_mem_attachment *attachment)
612 struct ttm_operation_ctx ctx = {.interruptible = true};
613 struct amdgpu_bo *bo = attachment->bo_va->base.bo;
614 struct amdgpu_device *adev = attachment->adev;
615 struct ttm_tt *ttm = bo->tbo.ttm;
616 enum dma_data_direction dir;
621 /* Expect SG Table of dmapmap BO to be NULL */
622 mmio = (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP);
623 if (unlikely(ttm->sg)) {
624 pr_err("SG Table of %d BO for peer device is UNEXPECTEDLY NON-NULL", mmio);
628 dir = mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ?
629 DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
630 dma_addr = mem->bo->tbo.sg->sgl->dma_address;
631 pr_debug("%d BO size: %d\n", mmio, mem->bo->tbo.sg->sgl->length);
632 pr_debug("%d BO address before DMA mapping: %llx\n", mmio, dma_addr);
633 dma_addr = dma_map_resource(adev->dev, dma_addr,
634 mem->bo->tbo.sg->sgl->length, dir, DMA_ATTR_SKIP_CPU_SYNC);
635 ret = dma_mapping_error(adev->dev, dma_addr);
638 pr_debug("%d BO address after DMA mapping: %llx\n", mmio, dma_addr);
640 ttm->sg = create_sg_table(dma_addr, mem->bo->tbo.sg->sgl->length);
641 if (unlikely(!ttm->sg)) {
646 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
647 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
654 sg_free_table(ttm->sg);
658 dma_unmap_resource(adev->dev, dma_addr, mem->bo->tbo.sg->sgl->length,
659 dir, DMA_ATTR_SKIP_CPU_SYNC);
664 kfd_mem_dmamap_attachment(struct kgd_mem *mem,
665 struct kfd_mem_attachment *attachment)
667 switch (attachment->type) {
668 case KFD_MEM_ATT_SHARED:
670 case KFD_MEM_ATT_USERPTR:
671 return kfd_mem_dmamap_userptr(mem, attachment);
672 case KFD_MEM_ATT_DMABUF:
673 return kfd_mem_dmamap_dmabuf(attachment);
675 return kfd_mem_dmamap_sg_bo(mem, attachment);
683 kfd_mem_dmaunmap_userptr(struct kgd_mem *mem,
684 struct kfd_mem_attachment *attachment)
686 enum dma_data_direction direction =
687 mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ?
688 DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
689 struct ttm_operation_ctx ctx = {.interruptible = false};
690 struct amdgpu_bo *bo = attachment->bo_va->base.bo;
691 struct amdgpu_device *adev = attachment->adev;
692 struct ttm_tt *ttm = bo->tbo.ttm;
694 if (unlikely(!ttm->sg))
697 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
698 ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
700 dma_unmap_sgtable(adev->dev, ttm->sg, direction, 0);
701 sg_free_table(ttm->sg);
707 kfd_mem_dmaunmap_dmabuf(struct kfd_mem_attachment *attachment)
709 /* This is a no-op. We don't want to trigger eviction fences when
710 * unmapping DMABufs. Therefore the invalidation (moving to system
711 * domain) is done in kfd_mem_dmamap_dmabuf.
716 * kfd_mem_dmaunmap_sg_bo() - Free DMA mapped sg_table of DOORBELL or MMIO BO
717 * @mem: SG BO of the DOORBELL or MMIO resource on the owning device
718 * @attachment: Virtual address attachment of the BO on accessing device
720 * The method performs following steps:
721 * - Signal TTM to mark memory pointed to by BO as GPU inaccessible
722 * - Free SG Table that is used to encapsulate DMA mapped memory of
723 * peer device's DOORBELL or MMIO memory
725 * This method is invoked in the following contexts:
726 * UNMapping of DOORBELL or MMIO BO on a device having access to its memory
727 * Eviction of DOOREBELL or MMIO BO on device having access to its memory
732 kfd_mem_dmaunmap_sg_bo(struct kgd_mem *mem,
733 struct kfd_mem_attachment *attachment)
735 struct ttm_operation_ctx ctx = {.interruptible = true};
736 struct amdgpu_bo *bo = attachment->bo_va->base.bo;
737 struct amdgpu_device *adev = attachment->adev;
738 struct ttm_tt *ttm = bo->tbo.ttm;
739 enum dma_data_direction dir;
741 if (unlikely(!ttm->sg)) {
742 pr_debug("SG Table of BO is NULL");
746 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
747 ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
749 dir = mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ?
750 DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
751 dma_unmap_resource(adev->dev, ttm->sg->sgl->dma_address,
752 ttm->sg->sgl->length, dir, DMA_ATTR_SKIP_CPU_SYNC);
753 sg_free_table(ttm->sg);
760 kfd_mem_dmaunmap_attachment(struct kgd_mem *mem,
761 struct kfd_mem_attachment *attachment)
763 switch (attachment->type) {
764 case KFD_MEM_ATT_SHARED:
766 case KFD_MEM_ATT_USERPTR:
767 kfd_mem_dmaunmap_userptr(mem, attachment);
769 case KFD_MEM_ATT_DMABUF:
770 kfd_mem_dmaunmap_dmabuf(attachment);
773 kfd_mem_dmaunmap_sg_bo(mem, attachment);
780 static int kfd_mem_export_dmabuf(struct kgd_mem *mem)
783 struct dma_buf *ret = amdgpu_gem_prime_export(
785 mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ?
796 kfd_mem_attach_dmabuf(struct amdgpu_device *adev, struct kgd_mem *mem,
797 struct amdgpu_bo **bo)
799 struct drm_gem_object *gobj;
802 ret = kfd_mem_export_dmabuf(mem);
806 gobj = amdgpu_gem_prime_import(adev_to_drm(adev), mem->dmabuf);
808 return PTR_ERR(gobj);
810 *bo = gem_to_amdgpu_bo(gobj);
811 (*bo)->flags |= AMDGPU_GEM_CREATE_PREEMPTIBLE;
816 /* kfd_mem_attach - Add a BO to a VM
818 * Everything that needs to bo done only once when a BO is first added
819 * to a VM. It can later be mapped and unmapped many times without
820 * repeating these steps.
822 * 0. Create BO for DMA mapping, if needed
823 * 1. Allocate and initialize BO VA entry data structure
824 * 2. Add BO to the VM
825 * 3. Determine ASIC-specific PTE flags
826 * 4. Alloc page tables and directories if needed
827 * 4a. Validate new page tables and directories
829 static int kfd_mem_attach(struct amdgpu_device *adev, struct kgd_mem *mem,
830 struct amdgpu_vm *vm, bool is_aql)
832 struct amdgpu_device *bo_adev = amdgpu_ttm_adev(mem->bo->tbo.bdev);
833 unsigned long bo_size = mem->bo->tbo.base.size;
834 uint64_t va = mem->va;
835 struct kfd_mem_attachment *attachment[2] = {NULL, NULL};
836 struct amdgpu_bo *bo[2] = {NULL, NULL};
837 struct amdgpu_bo_va *bo_va;
838 bool same_hive = false;
842 pr_err("Invalid VA when adding BO to VM\n");
846 /* Determine access to VRAM, MMIO and DOORBELL BOs of peer devices
848 * The access path of MMIO and DOORBELL BOs of is always over PCIe.
849 * In contrast the access path of VRAM BOs depens upon the type of
850 * link that connects the peer device. Access over PCIe is allowed
851 * if peer device has large BAR. In contrast, access over xGMI is
852 * allowed for both small and large BAR configurations of peer device
854 if ((adev != bo_adev && !adev->gmc.is_app_apu) &&
855 ((mem->domain == AMDGPU_GEM_DOMAIN_VRAM) ||
856 (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL) ||
857 (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP))) {
858 if (mem->domain == AMDGPU_GEM_DOMAIN_VRAM)
859 same_hive = amdgpu_xgmi_same_hive(adev, bo_adev);
860 if (!same_hive && !amdgpu_device_is_peer_accessible(bo_adev, adev))
864 for (i = 0; i <= is_aql; i++) {
865 attachment[i] = kzalloc(sizeof(*attachment[i]), GFP_KERNEL);
866 if (unlikely(!attachment[i])) {
871 pr_debug("\t add VA 0x%llx - 0x%llx to vm %p\n", va,
874 if ((adev == bo_adev && !(mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)) ||
875 (amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm) && reuse_dmamap(adev, bo_adev)) ||
876 (mem->domain == AMDGPU_GEM_DOMAIN_GTT && reuse_dmamap(adev, bo_adev)) ||
878 /* Mappings on the local GPU, or VRAM mappings in the
879 * local hive, or userptr, or GTT mapping can reuse dma map
880 * address space share the original BO
882 attachment[i]->type = KFD_MEM_ATT_SHARED;
884 drm_gem_object_get(&bo[i]->tbo.base);
886 /* Multiple mappings on the same GPU share the BO */
887 attachment[i]->type = KFD_MEM_ATT_SHARED;
889 drm_gem_object_get(&bo[i]->tbo.base);
890 } else if (amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm)) {
891 /* Create an SG BO to DMA-map userptrs on other GPUs */
892 attachment[i]->type = KFD_MEM_ATT_USERPTR;
893 ret = create_dmamap_sg_bo(adev, mem, &bo[i]);
896 /* Handle DOORBELL BOs of peer devices and MMIO BOs of local and peer devices */
897 } else if (mem->bo->tbo.type == ttm_bo_type_sg) {
898 WARN_ONCE(!(mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL ||
899 mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP),
900 "Handing invalid SG BO in ATTACH request");
901 attachment[i]->type = KFD_MEM_ATT_SG;
902 ret = create_dmamap_sg_bo(adev, mem, &bo[i]);
905 /* Enable acces to GTT and VRAM BOs of peer devices */
906 } else if (mem->domain == AMDGPU_GEM_DOMAIN_GTT ||
907 mem->domain == AMDGPU_GEM_DOMAIN_VRAM) {
908 attachment[i]->type = KFD_MEM_ATT_DMABUF;
909 ret = kfd_mem_attach_dmabuf(adev, mem, &bo[i]);
912 pr_debug("Employ DMABUF mechanism to enable peer GPU access\n");
914 WARN_ONCE(true, "Handling invalid ATTACH request");
919 /* Add BO to VM internal data structures */
920 ret = amdgpu_bo_reserve(bo[i], false);
922 pr_debug("Unable to reserve BO during memory attach");
925 bo_va = amdgpu_vm_bo_find(vm, bo[i]);
927 bo_va = amdgpu_vm_bo_add(adev, vm, bo[i]);
930 attachment[i]->bo_va = bo_va;
931 amdgpu_bo_unreserve(bo[i]);
932 if (unlikely(!attachment[i]->bo_va)) {
934 pr_err("Failed to add BO object to VM. ret == %d\n",
938 attachment[i]->va = va;
939 attachment[i]->pte_flags = get_pte_flags(adev, mem);
940 attachment[i]->adev = adev;
941 list_add(&attachment[i]->list, &mem->attachments);
949 for (; i >= 0; i--) {
952 if (attachment[i]->bo_va) {
953 amdgpu_bo_reserve(bo[i], true);
954 if (--attachment[i]->bo_va->ref_count == 0)
955 amdgpu_vm_bo_del(adev, attachment[i]->bo_va);
956 amdgpu_bo_unreserve(bo[i]);
957 list_del(&attachment[i]->list);
960 drm_gem_object_put(&bo[i]->tbo.base);
961 kfree(attachment[i]);
966 static void kfd_mem_detach(struct kfd_mem_attachment *attachment)
968 struct amdgpu_bo *bo = attachment->bo_va->base.bo;
970 pr_debug("\t remove VA 0x%llx in entry %p\n",
971 attachment->va, attachment);
972 if (--attachment->bo_va->ref_count == 0)
973 amdgpu_vm_bo_del(attachment->adev, attachment->bo_va);
974 drm_gem_object_put(&bo->tbo.base);
975 list_del(&attachment->list);
979 static void add_kgd_mem_to_kfd_bo_list(struct kgd_mem *mem,
980 struct amdkfd_process_info *process_info,
983 mutex_lock(&process_info->lock);
985 list_add_tail(&mem->validate_list,
986 &process_info->userptr_valid_list);
988 list_add_tail(&mem->validate_list, &process_info->kfd_bo_list);
989 mutex_unlock(&process_info->lock);
992 static void remove_kgd_mem_from_kfd_bo_list(struct kgd_mem *mem,
993 struct amdkfd_process_info *process_info)
995 mutex_lock(&process_info->lock);
996 list_del(&mem->validate_list);
997 mutex_unlock(&process_info->lock);
1000 /* Initializes user pages. It registers the MMU notifier and validates
1001 * the userptr BO in the GTT domain.
1003 * The BO must already be on the userptr_valid_list. Otherwise an
1004 * eviction and restore may happen that leaves the new BO unmapped
1005 * with the user mode queues running.
1007 * Takes the process_info->lock to protect against concurrent restore
1010 * Returns 0 for success, negative errno for errors.
1012 static int init_user_pages(struct kgd_mem *mem, uint64_t user_addr,
1015 struct amdkfd_process_info *process_info = mem->process_info;
1016 struct amdgpu_bo *bo = mem->bo;
1017 struct ttm_operation_ctx ctx = { true, false };
1018 struct hmm_range *range;
1021 mutex_lock(&process_info->lock);
1023 ret = amdgpu_ttm_tt_set_userptr(&bo->tbo, user_addr, 0);
1025 pr_err("%s: Failed to set userptr: %d\n", __func__, ret);
1029 ret = amdgpu_hmm_register(bo, user_addr);
1031 pr_err("%s: Failed to register MMU notifier: %d\n",
1038 * During a CRIU restore operation, the userptr buffer objects
1039 * will be validated in the restore_userptr_work worker at a
1040 * later stage when it is scheduled by another ioctl called by
1041 * CRIU master process for the target pid for restore.
1043 mutex_lock(&process_info->notifier_lock);
1045 mutex_unlock(&process_info->notifier_lock);
1046 mutex_unlock(&process_info->lock);
1050 ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages, &range);
1052 pr_err("%s: Failed to get user pages: %d\n", __func__, ret);
1053 goto unregister_out;
1056 ret = amdgpu_bo_reserve(bo, true);
1058 pr_err("%s: Failed to reserve BO\n", __func__);
1061 amdgpu_bo_placement_from_domain(bo, mem->domain);
1062 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
1064 pr_err("%s: failed to validate BO\n", __func__);
1065 amdgpu_bo_unreserve(bo);
1068 amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm, range);
1071 amdgpu_hmm_unregister(bo);
1073 mutex_unlock(&process_info->lock);
1077 /* Reserving a BO and its page table BOs must happen atomically to
1078 * avoid deadlocks. Some operations update multiple VMs at once. Track
1079 * all the reservation info in a context structure. Optionally a sync
1080 * object can track VM updates.
1082 struct bo_vm_reservation_context {
1083 /* DRM execution context for the reservation */
1084 struct drm_exec exec;
1085 /* Number of VMs reserved */
1087 /* Pointer to sync object */
1088 struct amdgpu_sync *sync;
1092 BO_VM_NOT_MAPPED = 0, /* Match VMs where a BO is not mapped */
1093 BO_VM_MAPPED, /* Match VMs where a BO is mapped */
1094 BO_VM_ALL, /* Match all VMs a BO was added to */
1098 * reserve_bo_and_vm - reserve a BO and a VM unconditionally.
1099 * @mem: KFD BO structure.
1100 * @vm: the VM to reserve.
1101 * @ctx: the struct that will be used in unreserve_bo_and_vms().
1103 static int reserve_bo_and_vm(struct kgd_mem *mem,
1104 struct amdgpu_vm *vm,
1105 struct bo_vm_reservation_context *ctx)
1107 struct amdgpu_bo *bo = mem->bo;
1113 ctx->sync = &mem->sync;
1114 drm_exec_init(&ctx->exec, DRM_EXEC_INTERRUPTIBLE_WAIT);
1115 drm_exec_until_all_locked(&ctx->exec) {
1116 ret = amdgpu_vm_lock_pd(vm, &ctx->exec, 2);
1117 drm_exec_retry_on_contention(&ctx->exec);
1121 ret = drm_exec_prepare_obj(&ctx->exec, &bo->tbo.base, 1);
1122 drm_exec_retry_on_contention(&ctx->exec);
1129 pr_err("Failed to reserve buffers in ttm.\n");
1130 drm_exec_fini(&ctx->exec);
1135 * reserve_bo_and_cond_vms - reserve a BO and some VMs conditionally
1136 * @mem: KFD BO structure.
1137 * @vm: the VM to reserve. If NULL, then all VMs associated with the BO
1138 * is used. Otherwise, a single VM associated with the BO.
1139 * @map_type: the mapping status that will be used to filter the VMs.
1140 * @ctx: the struct that will be used in unreserve_bo_and_vms().
1142 * Returns 0 for success, negative for failure.
1144 static int reserve_bo_and_cond_vms(struct kgd_mem *mem,
1145 struct amdgpu_vm *vm, enum bo_vm_match map_type,
1146 struct bo_vm_reservation_context *ctx)
1148 struct kfd_mem_attachment *entry;
1149 struct amdgpu_bo *bo = mem->bo;
1152 ctx->sync = &mem->sync;
1153 drm_exec_init(&ctx->exec, DRM_EXEC_INTERRUPTIBLE_WAIT);
1154 drm_exec_until_all_locked(&ctx->exec) {
1156 list_for_each_entry(entry, &mem->attachments, list) {
1157 if ((vm && vm != entry->bo_va->base.vm) ||
1158 (entry->is_mapped != map_type
1159 && map_type != BO_VM_ALL))
1162 ret = amdgpu_vm_lock_pd(entry->bo_va->base.vm,
1164 drm_exec_retry_on_contention(&ctx->exec);
1170 ret = drm_exec_prepare_obj(&ctx->exec, &bo->tbo.base, 1);
1171 drm_exec_retry_on_contention(&ctx->exec);
1178 pr_err("Failed to reserve buffers in ttm.\n");
1179 drm_exec_fini(&ctx->exec);
1184 * unreserve_bo_and_vms - Unreserve BO and VMs from a reservation context
1185 * @ctx: Reservation context to unreserve
1186 * @wait: Optionally wait for a sync object representing pending VM updates
1187 * @intr: Whether the wait is interruptible
1189 * Also frees any resources allocated in
1190 * reserve_bo_and_(cond_)vm(s). Returns the status from
1193 static int unreserve_bo_and_vms(struct bo_vm_reservation_context *ctx,
1194 bool wait, bool intr)
1199 ret = amdgpu_sync_wait(ctx->sync, intr);
1201 drm_exec_fini(&ctx->exec);
1206 static void unmap_bo_from_gpuvm(struct kgd_mem *mem,
1207 struct kfd_mem_attachment *entry,
1208 struct amdgpu_sync *sync)
1210 struct amdgpu_bo_va *bo_va = entry->bo_va;
1211 struct amdgpu_device *adev = entry->adev;
1212 struct amdgpu_vm *vm = bo_va->base.vm;
1214 amdgpu_vm_bo_unmap(adev, bo_va, entry->va);
1216 amdgpu_vm_clear_freed(adev, vm, &bo_va->last_pt_update);
1218 amdgpu_sync_fence(sync, bo_va->last_pt_update);
1221 static int update_gpuvm_pte(struct kgd_mem *mem,
1222 struct kfd_mem_attachment *entry,
1223 struct amdgpu_sync *sync)
1225 struct amdgpu_bo_va *bo_va = entry->bo_va;
1226 struct amdgpu_device *adev = entry->adev;
1229 ret = kfd_mem_dmamap_attachment(mem, entry);
1233 /* Update the page tables */
1234 ret = amdgpu_vm_bo_update(adev, bo_va, false);
1236 pr_err("amdgpu_vm_bo_update failed\n");
1240 return amdgpu_sync_fence(sync, bo_va->last_pt_update);
1243 static int map_bo_to_gpuvm(struct kgd_mem *mem,
1244 struct kfd_mem_attachment *entry,
1245 struct amdgpu_sync *sync,
1250 /* Set virtual address for the allocation */
1251 ret = amdgpu_vm_bo_map(entry->adev, entry->bo_va, entry->va, 0,
1252 amdgpu_bo_size(entry->bo_va->base.bo),
1255 pr_err("Failed to map VA 0x%llx in vm. ret %d\n",
1263 ret = update_gpuvm_pte(mem, entry, sync);
1265 pr_err("update_gpuvm_pte() failed\n");
1266 goto update_gpuvm_pte_failed;
1271 update_gpuvm_pte_failed:
1272 unmap_bo_from_gpuvm(mem, entry, sync);
1273 kfd_mem_dmaunmap_attachment(mem, entry);
1277 static int process_validate_vms(struct amdkfd_process_info *process_info)
1279 struct amdgpu_vm *peer_vm;
1282 list_for_each_entry(peer_vm, &process_info->vm_list_head,
1284 ret = vm_validate_pt_pd_bos(peer_vm);
1292 static int process_sync_pds_resv(struct amdkfd_process_info *process_info,
1293 struct amdgpu_sync *sync)
1295 struct amdgpu_vm *peer_vm;
1298 list_for_each_entry(peer_vm, &process_info->vm_list_head,
1300 struct amdgpu_bo *pd = peer_vm->root.bo;
1302 ret = amdgpu_sync_resv(NULL, sync, pd->tbo.base.resv,
1303 AMDGPU_SYNC_NE_OWNER,
1304 AMDGPU_FENCE_OWNER_KFD);
1312 static int process_update_pds(struct amdkfd_process_info *process_info,
1313 struct amdgpu_sync *sync)
1315 struct amdgpu_vm *peer_vm;
1318 list_for_each_entry(peer_vm, &process_info->vm_list_head,
1320 ret = vm_update_pds(peer_vm, sync);
1328 static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info,
1329 struct dma_fence **ef)
1331 struct amdkfd_process_info *info = NULL;
1334 if (!*process_info) {
1335 info = kzalloc(sizeof(*info), GFP_KERNEL);
1339 mutex_init(&info->lock);
1340 mutex_init(&info->notifier_lock);
1341 INIT_LIST_HEAD(&info->vm_list_head);
1342 INIT_LIST_HEAD(&info->kfd_bo_list);
1343 INIT_LIST_HEAD(&info->userptr_valid_list);
1344 INIT_LIST_HEAD(&info->userptr_inval_list);
1346 info->eviction_fence =
1347 amdgpu_amdkfd_fence_create(dma_fence_context_alloc(1),
1350 if (!info->eviction_fence) {
1351 pr_err("Failed to create eviction fence\n");
1353 goto create_evict_fence_fail;
1356 info->pid = get_task_pid(current->group_leader, PIDTYPE_PID);
1357 INIT_DELAYED_WORK(&info->restore_userptr_work,
1358 amdgpu_amdkfd_restore_userptr_worker);
1360 *process_info = info;
1361 *ef = dma_fence_get(&info->eviction_fence->base);
1364 vm->process_info = *process_info;
1366 /* Validate page directory and attach eviction fence */
1367 ret = amdgpu_bo_reserve(vm->root.bo, true);
1369 goto reserve_pd_fail;
1370 ret = vm_validate_pt_pd_bos(vm);
1372 pr_err("validate_pt_pd_bos() failed\n");
1373 goto validate_pd_fail;
1375 ret = amdgpu_bo_sync_wait(vm->root.bo,
1376 AMDGPU_FENCE_OWNER_KFD, false);
1379 ret = dma_resv_reserve_fences(vm->root.bo->tbo.base.resv, 1);
1381 goto reserve_shared_fail;
1382 dma_resv_add_fence(vm->root.bo->tbo.base.resv,
1383 &vm->process_info->eviction_fence->base,
1384 DMA_RESV_USAGE_BOOKKEEP);
1385 amdgpu_bo_unreserve(vm->root.bo);
1387 /* Update process info */
1388 mutex_lock(&vm->process_info->lock);
1389 list_add_tail(&vm->vm_list_node,
1390 &(vm->process_info->vm_list_head));
1391 vm->process_info->n_vms++;
1392 mutex_unlock(&vm->process_info->lock);
1396 reserve_shared_fail:
1399 amdgpu_bo_unreserve(vm->root.bo);
1401 vm->process_info = NULL;
1403 /* Two fence references: one in info and one in *ef */
1404 dma_fence_put(&info->eviction_fence->base);
1407 *process_info = NULL;
1409 create_evict_fence_fail:
1410 mutex_destroy(&info->lock);
1411 mutex_destroy(&info->notifier_lock);
1418 * amdgpu_amdkfd_gpuvm_pin_bo() - Pins a BO using following criteria
1419 * @bo: Handle of buffer object being pinned
1420 * @domain: Domain into which BO should be pinned
1422 * - USERPTR BOs are UNPINNABLE and will return error
1423 * - All other BO types (GTT, VRAM, MMIO and DOORBELL) will have their
1424 * PIN count incremented. It is valid to PIN a BO multiple times
1426 * Return: ZERO if successful in pinning, Non-Zero in case of error.
1428 static int amdgpu_amdkfd_gpuvm_pin_bo(struct amdgpu_bo *bo, u32 domain)
1432 ret = amdgpu_bo_reserve(bo, false);
1436 ret = amdgpu_bo_pin_restricted(bo, domain, 0, 0);
1438 pr_err("Error in Pinning BO to domain: %d\n", domain);
1440 amdgpu_bo_sync_wait(bo, AMDGPU_FENCE_OWNER_KFD, false);
1441 amdgpu_bo_unreserve(bo);
1447 * amdgpu_amdkfd_gpuvm_unpin_bo() - Unpins BO using following criteria
1448 * @bo: Handle of buffer object being unpinned
1450 * - Is a illegal request for USERPTR BOs and is ignored
1451 * - All other BO types (GTT, VRAM, MMIO and DOORBELL) will have their
1452 * PIN count decremented. Calls to UNPIN must balance calls to PIN
1454 static void amdgpu_amdkfd_gpuvm_unpin_bo(struct amdgpu_bo *bo)
1458 ret = amdgpu_bo_reserve(bo, false);
1462 amdgpu_bo_unpin(bo);
1463 amdgpu_bo_unreserve(bo);
1466 int amdgpu_amdkfd_gpuvm_set_vm_pasid(struct amdgpu_device *adev,
1467 struct amdgpu_vm *avm, u32 pasid)
1472 /* Free the original amdgpu allocated pasid,
1473 * will be replaced with kfd allocated pasid.
1476 amdgpu_pasid_free(avm->pasid);
1477 amdgpu_vm_set_pasid(adev, avm, 0);
1480 ret = amdgpu_vm_set_pasid(adev, avm, pasid);
1487 int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct amdgpu_device *adev,
1488 struct amdgpu_vm *avm,
1489 void **process_info,
1490 struct dma_fence **ef)
1494 /* Already a compute VM? */
1495 if (avm->process_info)
1498 /* Convert VM into a compute VM */
1499 ret = amdgpu_vm_make_compute(adev, avm);
1503 /* Initialize KFD part of the VM and process info */
1504 ret = init_kfd_vm(avm, process_info, ef);
1508 amdgpu_vm_set_task_info(avm);
1513 void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
1514 struct amdgpu_vm *vm)
1516 struct amdkfd_process_info *process_info = vm->process_info;
1521 /* Update process info */
1522 mutex_lock(&process_info->lock);
1523 process_info->n_vms--;
1524 list_del(&vm->vm_list_node);
1525 mutex_unlock(&process_info->lock);
1527 vm->process_info = NULL;
1529 /* Release per-process resources when last compute VM is destroyed */
1530 if (!process_info->n_vms) {
1531 WARN_ON(!list_empty(&process_info->kfd_bo_list));
1532 WARN_ON(!list_empty(&process_info->userptr_valid_list));
1533 WARN_ON(!list_empty(&process_info->userptr_inval_list));
1535 dma_fence_put(&process_info->eviction_fence->base);
1536 cancel_delayed_work_sync(&process_info->restore_userptr_work);
1537 put_pid(process_info->pid);
1538 mutex_destroy(&process_info->lock);
1539 mutex_destroy(&process_info->notifier_lock);
1540 kfree(process_info);
1544 void amdgpu_amdkfd_gpuvm_release_process_vm(struct amdgpu_device *adev,
1547 struct amdgpu_vm *avm;
1549 if (WARN_ON(!adev || !drm_priv))
1552 avm = drm_priv_to_vm(drm_priv);
1554 pr_debug("Releasing process vm %p\n", avm);
1556 /* The original pasid of amdgpu vm has already been
1557 * released during making a amdgpu vm to a compute vm
1558 * The current pasid is managed by kfd and will be
1559 * released on kfd process destroy. Set amdgpu pasid
1560 * to 0 to avoid duplicate release.
1562 amdgpu_vm_release_compute(adev, avm);
1565 uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *drm_priv)
1567 struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
1568 struct amdgpu_bo *pd = avm->root.bo;
1569 struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
1571 if (adev->asic_type < CHIP_VEGA10)
1572 return avm->pd_phys_addr >> AMDGPU_GPU_PAGE_SHIFT;
1573 return avm->pd_phys_addr;
1576 void amdgpu_amdkfd_block_mmu_notifications(void *p)
1578 struct amdkfd_process_info *pinfo = (struct amdkfd_process_info *)p;
1580 mutex_lock(&pinfo->lock);
1581 WRITE_ONCE(pinfo->block_mmu_notifications, true);
1582 mutex_unlock(&pinfo->lock);
1585 int amdgpu_amdkfd_criu_resume(void *p)
1588 struct amdkfd_process_info *pinfo = (struct amdkfd_process_info *)p;
1590 mutex_lock(&pinfo->lock);
1591 pr_debug("scheduling work\n");
1592 mutex_lock(&pinfo->notifier_lock);
1593 pinfo->evicted_bos++;
1594 mutex_unlock(&pinfo->notifier_lock);
1595 if (!READ_ONCE(pinfo->block_mmu_notifications)) {
1599 WRITE_ONCE(pinfo->block_mmu_notifications, false);
1600 schedule_delayed_work(&pinfo->restore_userptr_work, 0);
1603 mutex_unlock(&pinfo->lock);
1607 size_t amdgpu_amdkfd_get_available_memory(struct amdgpu_device *adev,
1610 uint64_t reserved_for_pt =
1611 ESTIMATE_PT_SIZE(amdgpu_amdkfd_total_mem_size);
1613 uint64_t vram_available, system_mem_available, ttm_mem_available;
1615 spin_lock(&kfd_mem_limit.mem_limit_lock);
1616 vram_available = KFD_XCP_MEMORY_SIZE(adev, xcp_id)
1617 - adev->kfd.vram_used_aligned[xcp_id]
1618 - atomic64_read(&adev->vram_pin_size)
1621 if (adev->gmc.is_app_apu) {
1622 system_mem_available = no_system_mem_limit ?
1623 kfd_mem_limit.max_system_mem_limit :
1624 kfd_mem_limit.max_system_mem_limit -
1625 kfd_mem_limit.system_mem_used;
1627 ttm_mem_available = kfd_mem_limit.max_ttm_mem_limit -
1628 kfd_mem_limit.ttm_mem_used;
1630 available = min3(system_mem_available, ttm_mem_available,
1632 available = ALIGN_DOWN(available, PAGE_SIZE);
1634 available = ALIGN_DOWN(vram_available, VRAM_AVAILABLITY_ALIGN);
1637 spin_unlock(&kfd_mem_limit.mem_limit_lock);
1645 int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
1646 struct amdgpu_device *adev, uint64_t va, uint64_t size,
1647 void *drm_priv, struct kgd_mem **mem,
1648 uint64_t *offset, uint32_t flags, bool criu_resume)
1650 struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
1651 struct amdgpu_fpriv *fpriv = container_of(avm, struct amdgpu_fpriv, vm);
1652 enum ttm_bo_type bo_type = ttm_bo_type_device;
1653 struct sg_table *sg = NULL;
1654 uint64_t user_addr = 0;
1655 struct amdgpu_bo *bo;
1656 struct drm_gem_object *gobj = NULL;
1657 u32 domain, alloc_domain;
1658 uint64_t aligned_size;
1664 * Check on which domain to allocate BO
1666 if (flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
1667 domain = alloc_domain = AMDGPU_GEM_DOMAIN_VRAM;
1669 if (adev->gmc.is_app_apu) {
1670 domain = AMDGPU_GEM_DOMAIN_GTT;
1671 alloc_domain = AMDGPU_GEM_DOMAIN_GTT;
1674 alloc_flags = AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE;
1675 alloc_flags |= (flags & KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC) ?
1676 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED : 0;
1678 xcp_id = fpriv->xcp_id == AMDGPU_XCP_NO_PARTITION ?
1680 } else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_GTT) {
1681 domain = alloc_domain = AMDGPU_GEM_DOMAIN_GTT;
1684 domain = AMDGPU_GEM_DOMAIN_GTT;
1685 alloc_domain = AMDGPU_GEM_DOMAIN_CPU;
1686 alloc_flags = AMDGPU_GEM_CREATE_PREEMPTIBLE;
1688 if (flags & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) {
1689 if (!offset || !*offset)
1691 user_addr = untagged_addr(*offset);
1692 } else if (flags & (KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL |
1693 KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)) {
1694 bo_type = ttm_bo_type_sg;
1695 if (size > UINT_MAX)
1697 sg = create_sg_table(*offset, size);
1705 if (flags & KFD_IOC_ALLOC_MEM_FLAGS_COHERENT)
1706 alloc_flags |= AMDGPU_GEM_CREATE_COHERENT;
1707 if (flags & KFD_IOC_ALLOC_MEM_FLAGS_EXT_COHERENT)
1708 alloc_flags |= AMDGPU_GEM_CREATE_EXT_COHERENT;
1709 if (flags & KFD_IOC_ALLOC_MEM_FLAGS_UNCACHED)
1710 alloc_flags |= AMDGPU_GEM_CREATE_UNCACHED;
1712 *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
1717 INIT_LIST_HEAD(&(*mem)->attachments);
1718 mutex_init(&(*mem)->lock);
1719 (*mem)->aql_queue = !!(flags & KFD_IOC_ALLOC_MEM_FLAGS_AQL_QUEUE_MEM);
1721 /* Workaround for AQL queue wraparound bug. Map the same
1722 * memory twice. That means we only actually allocate half
1725 if ((*mem)->aql_queue)
1727 aligned_size = PAGE_ALIGN(size);
1729 (*mem)->alloc_flags = flags;
1731 amdgpu_sync_create(&(*mem)->sync);
1733 ret = amdgpu_amdkfd_reserve_mem_limit(adev, aligned_size, flags,
1736 pr_debug("Insufficient memory\n");
1737 goto err_reserve_limit;
1740 pr_debug("\tcreate BO VA 0x%llx size 0x%llx domain %s xcp_id %d\n",
1741 va, (*mem)->aql_queue ? size << 1 : size,
1742 domain_string(alloc_domain), xcp_id);
1744 ret = amdgpu_gem_object_create(adev, aligned_size, 1, alloc_domain, alloc_flags,
1745 bo_type, NULL, &gobj, xcp_id + 1);
1747 pr_debug("Failed to create BO on domain %s. ret %d\n",
1748 domain_string(alloc_domain), ret);
1751 ret = drm_vma_node_allow(&gobj->vma_node, drm_priv);
1753 pr_debug("Failed to allow vma node access. ret %d\n", ret);
1754 goto err_node_allow;
1756 bo = gem_to_amdgpu_bo(gobj);
1757 if (bo_type == ttm_bo_type_sg) {
1759 bo->tbo.ttm->sg = sg;
1764 bo->flags |= AMDGPU_AMDKFD_CREATE_USERPTR_BO;
1767 (*mem)->domain = domain;
1768 (*mem)->mapped_to_gpu_memory = 0;
1769 (*mem)->process_info = avm->process_info;
1771 add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, user_addr);
1774 pr_debug("creating userptr BO for user_addr = %llx\n", user_addr);
1775 ret = init_user_pages(*mem, user_addr, criu_resume);
1777 goto allocate_init_user_pages_failed;
1778 } else if (flags & (KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL |
1779 KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)) {
1780 ret = amdgpu_amdkfd_gpuvm_pin_bo(bo, AMDGPU_GEM_DOMAIN_GTT);
1782 pr_err("Pinning MMIO/DOORBELL BO during ALLOC FAILED\n");
1785 bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
1786 bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT;
1790 *offset = amdgpu_bo_mmap_offset(bo);
1794 allocate_init_user_pages_failed:
1796 remove_kgd_mem_from_kfd_bo_list(*mem, avm->process_info);
1797 drm_vma_node_revoke(&gobj->vma_node, drm_priv);
1799 /* Don't unreserve system mem limit twice */
1800 goto err_reserve_limit;
1802 amdgpu_amdkfd_unreserve_mem_limit(adev, aligned_size, flags, xcp_id);
1804 mutex_destroy(&(*mem)->lock);
1806 drm_gem_object_put(gobj);
1817 int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
1818 struct amdgpu_device *adev, struct kgd_mem *mem, void *drm_priv,
1821 struct amdkfd_process_info *process_info = mem->process_info;
1822 unsigned long bo_size = mem->bo->tbo.base.size;
1823 bool use_release_notifier = (mem->bo->kfd_bo == mem);
1824 struct kfd_mem_attachment *entry, *tmp;
1825 struct bo_vm_reservation_context ctx;
1826 unsigned int mapped_to_gpu_memory;
1828 bool is_imported = false;
1830 mutex_lock(&mem->lock);
1832 /* Unpin MMIO/DOORBELL BO's that were pinned during allocation */
1833 if (mem->alloc_flags &
1834 (KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL |
1835 KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)) {
1836 amdgpu_amdkfd_gpuvm_unpin_bo(mem->bo);
1839 mapped_to_gpu_memory = mem->mapped_to_gpu_memory;
1840 is_imported = mem->is_imported;
1841 mutex_unlock(&mem->lock);
1842 /* lock is not needed after this, since mem is unused and will
1846 if (mapped_to_gpu_memory > 0) {
1847 pr_debug("BO VA 0x%llx size 0x%lx is still mapped.\n",
1852 /* Make sure restore workers don't access the BO any more */
1853 mutex_lock(&process_info->lock);
1854 list_del(&mem->validate_list);
1855 mutex_unlock(&process_info->lock);
1857 /* Cleanup user pages and MMU notifiers */
1858 if (amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm)) {
1859 amdgpu_hmm_unregister(mem->bo);
1860 mutex_lock(&process_info->notifier_lock);
1861 amdgpu_ttm_tt_discard_user_pages(mem->bo->tbo.ttm, mem->range);
1862 mutex_unlock(&process_info->notifier_lock);
1865 ret = reserve_bo_and_cond_vms(mem, NULL, BO_VM_ALL, &ctx);
1869 /* The eviction fence should be removed by the last unmap.
1870 * TODO: Log an error condition if the bo still has the eviction fence
1873 amdgpu_amdkfd_remove_eviction_fence(mem->bo,
1874 process_info->eviction_fence);
1875 pr_debug("Release VA 0x%llx - 0x%llx\n", mem->va,
1876 mem->va + bo_size * (1 + mem->aql_queue));
1878 /* Remove from VM internal data structures */
1879 list_for_each_entry_safe(entry, tmp, &mem->attachments, list) {
1880 kfd_mem_dmaunmap_attachment(mem, entry);
1881 kfd_mem_detach(entry);
1884 ret = unreserve_bo_and_vms(&ctx, false, false);
1886 /* Free the sync object */
1887 amdgpu_sync_free(&mem->sync);
1889 /* If the SG is not NULL, it's one we created for a doorbell or mmio
1890 * remap BO. We need to free it.
1892 if (mem->bo->tbo.sg) {
1893 sg_free_table(mem->bo->tbo.sg);
1894 kfree(mem->bo->tbo.sg);
1897 /* Update the size of the BO being freed if it was allocated from
1898 * VRAM and is not imported. For APP APU VRAM allocations are done
1903 (mem->bo->preferred_domains == AMDGPU_GEM_DOMAIN_VRAM ||
1904 (adev->gmc.is_app_apu &&
1905 mem->bo->preferred_domains == AMDGPU_GEM_DOMAIN_GTT)))
1912 drm_vma_node_revoke(&mem->bo->tbo.base.vma_node, drm_priv);
1914 dma_buf_put(mem->dmabuf);
1915 mutex_destroy(&mem->lock);
1917 /* If this releases the last reference, it will end up calling
1918 * amdgpu_amdkfd_release_notify and kfree the mem struct. That's why
1919 * this needs to be the last call here.
1921 drm_gem_object_put(&mem->bo->tbo.base);
1924 * For kgd_mem allocated in amdgpu_amdkfd_gpuvm_import_dmabuf(),
1925 * explicitly free it here.
1927 if (!use_release_notifier)
1933 int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
1934 struct amdgpu_device *adev, struct kgd_mem *mem,
1937 struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
1939 struct amdgpu_bo *bo;
1941 struct kfd_mem_attachment *entry;
1942 struct bo_vm_reservation_context ctx;
1943 unsigned long bo_size;
1944 bool is_invalid_userptr = false;
1948 pr_err("Invalid BO when mapping memory to GPU\n");
1952 /* Make sure restore is not running concurrently. Since we
1953 * don't map invalid userptr BOs, we rely on the next restore
1954 * worker to do the mapping
1956 mutex_lock(&mem->process_info->lock);
1958 /* Lock notifier lock. If we find an invalid userptr BO, we can be
1959 * sure that the MMU notifier is no longer running
1960 * concurrently and the queues are actually stopped
1962 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
1963 mutex_lock(&mem->process_info->notifier_lock);
1964 is_invalid_userptr = !!mem->invalid;
1965 mutex_unlock(&mem->process_info->notifier_lock);
1968 mutex_lock(&mem->lock);
1970 domain = mem->domain;
1971 bo_size = bo->tbo.base.size;
1973 pr_debug("Map VA 0x%llx - 0x%llx to vm %p domain %s\n",
1975 mem->va + bo_size * (1 + mem->aql_queue),
1976 avm, domain_string(domain));
1978 if (!kfd_mem_is_attached(avm, mem)) {
1979 ret = kfd_mem_attach(adev, mem, avm, mem->aql_queue);
1984 ret = reserve_bo_and_vm(mem, avm, &ctx);
1988 /* Userptr can be marked as "not invalid", but not actually be
1989 * validated yet (still in the system domain). In that case
1990 * the queues are still stopped and we can leave mapping for
1991 * the next restore worker
1993 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) &&
1994 bo->tbo.resource->mem_type == TTM_PL_SYSTEM)
1995 is_invalid_userptr = true;
1997 ret = vm_validate_pt_pd_bos(avm);
2001 if (mem->mapped_to_gpu_memory == 0 &&
2002 !amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
2003 /* Validate BO only once. The eviction fence gets added to BO
2004 * the first time it is mapped. Validate will wait for all
2005 * background evictions to complete.
2007 ret = amdgpu_amdkfd_bo_validate(bo, domain, true);
2009 pr_debug("Validate failed\n");
2014 list_for_each_entry(entry, &mem->attachments, list) {
2015 if (entry->bo_va->base.vm != avm || entry->is_mapped)
2018 pr_debug("\t map VA 0x%llx - 0x%llx in entry %p\n",
2019 entry->va, entry->va + bo_size, entry);
2021 ret = map_bo_to_gpuvm(mem, entry, ctx.sync,
2022 is_invalid_userptr);
2024 pr_err("Failed to map bo to gpuvm\n");
2028 ret = vm_update_pds(avm, ctx.sync);
2030 pr_err("Failed to update page directories\n");
2034 entry->is_mapped = true;
2035 mem->mapped_to_gpu_memory++;
2036 pr_debug("\t INC mapping count %d\n",
2037 mem->mapped_to_gpu_memory);
2040 if (!amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) && !bo->tbo.pin_count)
2041 dma_resv_add_fence(bo->tbo.base.resv,
2042 &avm->process_info->eviction_fence->base,
2043 DMA_RESV_USAGE_BOOKKEEP);
2044 ret = unreserve_bo_and_vms(&ctx, false, false);
2049 unreserve_bo_and_vms(&ctx, false, false);
2051 mutex_unlock(&mem->process_info->lock);
2052 mutex_unlock(&mem->lock);
2056 void amdgpu_amdkfd_gpuvm_dmaunmap_mem(struct kgd_mem *mem, void *drm_priv)
2058 struct kfd_mem_attachment *entry;
2059 struct amdgpu_vm *vm;
2061 vm = drm_priv_to_vm(drm_priv);
2063 mutex_lock(&mem->lock);
2065 list_for_each_entry(entry, &mem->attachments, list) {
2066 if (entry->bo_va->base.vm == vm)
2067 kfd_mem_dmaunmap_attachment(mem, entry);
2070 mutex_unlock(&mem->lock);
2073 int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
2074 struct amdgpu_device *adev, struct kgd_mem *mem, void *drm_priv)
2076 struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
2077 struct amdkfd_process_info *process_info = avm->process_info;
2078 unsigned long bo_size = mem->bo->tbo.base.size;
2079 struct kfd_mem_attachment *entry;
2080 struct bo_vm_reservation_context ctx;
2083 mutex_lock(&mem->lock);
2085 ret = reserve_bo_and_cond_vms(mem, avm, BO_VM_MAPPED, &ctx);
2088 /* If no VMs were reserved, it means the BO wasn't actually mapped */
2089 if (ctx.n_vms == 0) {
2094 ret = vm_validate_pt_pd_bos(avm);
2098 pr_debug("Unmap VA 0x%llx - 0x%llx from vm %p\n",
2100 mem->va + bo_size * (1 + mem->aql_queue),
2103 list_for_each_entry(entry, &mem->attachments, list) {
2104 if (entry->bo_va->base.vm != avm || !entry->is_mapped)
2107 pr_debug("\t unmap VA 0x%llx - 0x%llx from entry %p\n",
2108 entry->va, entry->va + bo_size, entry);
2110 unmap_bo_from_gpuvm(mem, entry, ctx.sync);
2111 entry->is_mapped = false;
2113 mem->mapped_to_gpu_memory--;
2114 pr_debug("\t DEC mapping count %d\n",
2115 mem->mapped_to_gpu_memory);
2118 /* If BO is unmapped from all VMs, unfence it. It can be evicted if
2121 if (mem->mapped_to_gpu_memory == 0 &&
2122 !amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm) &&
2123 !mem->bo->tbo.pin_count)
2124 amdgpu_amdkfd_remove_eviction_fence(mem->bo,
2125 process_info->eviction_fence);
2128 unreserve_bo_and_vms(&ctx, false, false);
2130 mutex_unlock(&mem->lock);
2134 int amdgpu_amdkfd_gpuvm_sync_memory(
2135 struct amdgpu_device *adev, struct kgd_mem *mem, bool intr)
2137 struct amdgpu_sync sync;
2140 amdgpu_sync_create(&sync);
2142 mutex_lock(&mem->lock);
2143 amdgpu_sync_clone(&mem->sync, &sync);
2144 mutex_unlock(&mem->lock);
2146 ret = amdgpu_sync_wait(&sync, intr);
2147 amdgpu_sync_free(&sync);
2152 * amdgpu_amdkfd_map_gtt_bo_to_gart - Map BO to GART and increment reference count
2153 * @adev: Device to which allocated BO belongs
2154 * @bo: Buffer object to be mapped
2156 * Before return, bo reference count is incremented. To release the reference and unpin/
2157 * unmap the BO, call amdgpu_amdkfd_free_gtt_mem.
2159 int amdgpu_amdkfd_map_gtt_bo_to_gart(struct amdgpu_device *adev, struct amdgpu_bo *bo)
2163 ret = amdgpu_bo_reserve(bo, true);
2165 pr_err("Failed to reserve bo. ret %d\n", ret);
2166 goto err_reserve_bo_failed;
2169 ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
2171 pr_err("Failed to pin bo. ret %d\n", ret);
2172 goto err_pin_bo_failed;
2175 ret = amdgpu_ttm_alloc_gart(&bo->tbo);
2177 pr_err("Failed to bind bo to GART. ret %d\n", ret);
2178 goto err_map_bo_gart_failed;
2181 amdgpu_amdkfd_remove_eviction_fence(
2182 bo, bo->vm_bo->vm->process_info->eviction_fence);
2184 amdgpu_bo_unreserve(bo);
2186 bo = amdgpu_bo_ref(bo);
2190 err_map_bo_gart_failed:
2191 amdgpu_bo_unpin(bo);
2193 amdgpu_bo_unreserve(bo);
2194 err_reserve_bo_failed:
2199 /** amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel() - Map a GTT BO for kernel CPU access
2201 * @mem: Buffer object to be mapped for CPU access
2202 * @kptr[out]: pointer in kernel CPU address space
2203 * @size[out]: size of the buffer
2205 * Pins the BO and maps it for kernel CPU access. The eviction fence is removed
2206 * from the BO, since pinned BOs cannot be evicted. The bo must remain on the
2207 * validate_list, so the GPU mapping can be restored after a page table was
2210 * Return: 0 on success, error code on failure
2212 int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_mem *mem,
2213 void **kptr, uint64_t *size)
2216 struct amdgpu_bo *bo = mem->bo;
2218 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
2219 pr_err("userptr can't be mapped to kernel\n");
2223 mutex_lock(&mem->process_info->lock);
2225 ret = amdgpu_bo_reserve(bo, true);
2227 pr_err("Failed to reserve bo. ret %d\n", ret);
2228 goto bo_reserve_failed;
2231 ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
2233 pr_err("Failed to pin bo. ret %d\n", ret);
2237 ret = amdgpu_bo_kmap(bo, kptr);
2239 pr_err("Failed to map bo to kernel. ret %d\n", ret);
2243 amdgpu_amdkfd_remove_eviction_fence(
2244 bo, mem->process_info->eviction_fence);
2247 *size = amdgpu_bo_size(bo);
2249 amdgpu_bo_unreserve(bo);
2251 mutex_unlock(&mem->process_info->lock);
2255 amdgpu_bo_unpin(bo);
2257 amdgpu_bo_unreserve(bo);
2259 mutex_unlock(&mem->process_info->lock);
2264 /** amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel() - Unmap a GTT BO for kernel CPU access
2266 * @mem: Buffer object to be unmapped for CPU access
2268 * Removes the kernel CPU mapping and unpins the BO. It does not restore the
2269 * eviction fence, so this function should only be used for cleanup before the
2272 void amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(struct kgd_mem *mem)
2274 struct amdgpu_bo *bo = mem->bo;
2276 amdgpu_bo_reserve(bo, true);
2277 amdgpu_bo_kunmap(bo);
2278 amdgpu_bo_unpin(bo);
2279 amdgpu_bo_unreserve(bo);
2282 int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct amdgpu_device *adev,
2283 struct kfd_vm_fault_info *mem)
2285 if (atomic_read(&adev->gmc.vm_fault_info_updated) == 1) {
2286 *mem = *adev->gmc.vm_fault_info;
2287 mb(); /* make sure read happened */
2288 atomic_set(&adev->gmc.vm_fault_info_updated, 0);
2293 int amdgpu_amdkfd_gpuvm_import_dmabuf(struct amdgpu_device *adev,
2294 struct dma_buf *dma_buf,
2295 uint64_t va, void *drm_priv,
2296 struct kgd_mem **mem, uint64_t *size,
2297 uint64_t *mmap_offset)
2299 struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
2300 struct drm_gem_object *obj;
2301 struct amdgpu_bo *bo;
2304 obj = amdgpu_gem_prime_import(adev_to_drm(adev), dma_buf);
2306 return PTR_ERR(obj);
2308 bo = gem_to_amdgpu_bo(obj);
2309 if (!(bo->preferred_domains & (AMDGPU_GEM_DOMAIN_VRAM |
2310 AMDGPU_GEM_DOMAIN_GTT))) {
2311 /* Only VRAM and GTT BOs are supported */
2316 *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
2322 ret = drm_vma_node_allow(&obj->vma_node, drm_priv);
2327 *size = amdgpu_bo_size(bo);
2330 *mmap_offset = amdgpu_bo_mmap_offset(bo);
2332 INIT_LIST_HEAD(&(*mem)->attachments);
2333 mutex_init(&(*mem)->lock);
2335 (*mem)->alloc_flags =
2336 ((bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ?
2337 KFD_IOC_ALLOC_MEM_FLAGS_VRAM : KFD_IOC_ALLOC_MEM_FLAGS_GTT)
2338 | KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE
2339 | KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE;
2341 get_dma_buf(dma_buf);
2342 (*mem)->dmabuf = dma_buf;
2345 (*mem)->domain = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) && !adev->gmc.is_app_apu ?
2346 AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT;
2348 (*mem)->mapped_to_gpu_memory = 0;
2349 (*mem)->process_info = avm->process_info;
2350 add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, false);
2351 amdgpu_sync_create(&(*mem)->sync);
2352 (*mem)->is_imported = true;
2359 drm_gem_object_put(obj);
2363 int amdgpu_amdkfd_gpuvm_export_dmabuf(struct kgd_mem *mem,
2364 struct dma_buf **dma_buf)
2368 mutex_lock(&mem->lock);
2369 ret = kfd_mem_export_dmabuf(mem);
2373 get_dma_buf(mem->dmabuf);
2374 *dma_buf = mem->dmabuf;
2376 mutex_unlock(&mem->lock);
2380 /* Evict a userptr BO by stopping the queues if necessary
2382 * Runs in MMU notifier, may be in RECLAIM_FS context. This means it
2383 * cannot do any memory allocations, and cannot take any locks that
2384 * are held elsewhere while allocating memory.
2386 * It doesn't do anything to the BO itself. The real work happens in
2387 * restore, where we get updated page addresses. This function only
2388 * ensures that GPU access to the BO is stopped.
2390 int amdgpu_amdkfd_evict_userptr(struct mmu_interval_notifier *mni,
2391 unsigned long cur_seq, struct kgd_mem *mem)
2393 struct amdkfd_process_info *process_info = mem->process_info;
2396 /* Do not process MMU notifications during CRIU restore until
2397 * KFD_CRIU_OP_RESUME IOCTL is received
2399 if (READ_ONCE(process_info->block_mmu_notifications))
2402 mutex_lock(&process_info->notifier_lock);
2403 mmu_interval_set_seq(mni, cur_seq);
2406 if (++process_info->evicted_bos == 1) {
2407 /* First eviction, stop the queues */
2408 r = kgd2kfd_quiesce_mm(mni->mm,
2409 KFD_QUEUE_EVICTION_TRIGGER_USERPTR);
2411 pr_err("Failed to quiesce KFD\n");
2412 schedule_delayed_work(&process_info->restore_userptr_work,
2413 msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS));
2415 mutex_unlock(&process_info->notifier_lock);
2420 /* Update invalid userptr BOs
2422 * Moves invalidated (evicted) userptr BOs from userptr_valid_list to
2423 * userptr_inval_list and updates user pages for all BOs that have
2424 * been invalidated since their last update.
2426 static int update_invalid_user_pages(struct amdkfd_process_info *process_info,
2427 struct mm_struct *mm)
2429 struct kgd_mem *mem, *tmp_mem;
2430 struct amdgpu_bo *bo;
2431 struct ttm_operation_ctx ctx = { false, false };
2435 mutex_lock(&process_info->notifier_lock);
2437 /* Move all invalidated BOs to the userptr_inval_list */
2438 list_for_each_entry_safe(mem, tmp_mem,
2439 &process_info->userptr_valid_list,
2442 list_move_tail(&mem->validate_list,
2443 &process_info->userptr_inval_list);
2445 /* Go through userptr_inval_list and update any invalid user_pages */
2446 list_for_each_entry(mem, &process_info->userptr_inval_list,
2448 invalid = mem->invalid;
2450 /* BO hasn't been invalidated since the last
2451 * revalidation attempt. Keep its page list.
2457 amdgpu_ttm_tt_discard_user_pages(bo->tbo.ttm, mem->range);
2460 /* BO reservations and getting user pages (hmm_range_fault)
2461 * must happen outside the notifier lock
2463 mutex_unlock(&process_info->notifier_lock);
2465 /* Move the BO to system (CPU) domain if necessary to unmap
2466 * and free the SG table
2468 if (bo->tbo.resource->mem_type != TTM_PL_SYSTEM) {
2469 if (amdgpu_bo_reserve(bo, true))
2471 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
2472 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
2473 amdgpu_bo_unreserve(bo);
2475 pr_err("%s: Failed to invalidate userptr BO\n",
2481 /* Get updated user pages */
2482 ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages,
2485 pr_debug("Failed %d to get user pages\n", ret);
2487 /* Return -EFAULT bad address error as success. It will
2488 * fail later with a VM fault if the GPU tries to access
2489 * it. Better than hanging indefinitely with stalled
2492 * Return other error -EBUSY or -ENOMEM to retry restore
2500 mutex_lock(&process_info->notifier_lock);
2502 /* Mark the BO as valid unless it was invalidated
2503 * again concurrently.
2505 if (mem->invalid != invalid) {
2509 /* set mem valid if mem has hmm range associated */
2515 mutex_unlock(&process_info->notifier_lock);
2520 /* Validate invalid userptr BOs
2522 * Validates BOs on the userptr_inval_list. Also updates GPUVM page tables
2523 * with new page addresses and waits for the page table updates to complete.
2525 static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
2527 struct ttm_operation_ctx ctx = { false, false };
2528 struct amdgpu_sync sync;
2529 struct drm_exec exec;
2531 struct amdgpu_vm *peer_vm;
2532 struct kgd_mem *mem, *tmp_mem;
2533 struct amdgpu_bo *bo;
2536 amdgpu_sync_create(&sync);
2538 drm_exec_init(&exec, 0);
2539 /* Reserve all BOs and page tables for validation */
2540 drm_exec_until_all_locked(&exec) {
2541 /* Reserve all the page directories */
2542 list_for_each_entry(peer_vm, &process_info->vm_list_head,
2544 ret = amdgpu_vm_lock_pd(peer_vm, &exec, 2);
2545 drm_exec_retry_on_contention(&exec);
2550 /* Reserve the userptr_inval_list entries to resv_list */
2551 list_for_each_entry(mem, &process_info->userptr_inval_list,
2553 struct drm_gem_object *gobj;
2555 gobj = &mem->bo->tbo.base;
2556 ret = drm_exec_prepare_obj(&exec, gobj, 1);
2557 drm_exec_retry_on_contention(&exec);
2563 ret = process_validate_vms(process_info);
2567 /* Validate BOs and update GPUVM page tables */
2568 list_for_each_entry_safe(mem, tmp_mem,
2569 &process_info->userptr_inval_list,
2571 struct kfd_mem_attachment *attachment;
2575 /* Validate the BO if we got user pages */
2576 if (bo->tbo.ttm->pages[0]) {
2577 amdgpu_bo_placement_from_domain(bo, mem->domain);
2578 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
2580 pr_err("%s: failed to validate BO\n", __func__);
2585 /* Update mapping. If the BO was not validated
2586 * (because we couldn't get user pages), this will
2587 * clear the page table entries, which will result in
2588 * VM faults if the GPU tries to access the invalid
2591 list_for_each_entry(attachment, &mem->attachments, list) {
2592 if (!attachment->is_mapped)
2595 kfd_mem_dmaunmap_attachment(mem, attachment);
2596 ret = update_gpuvm_pte(mem, attachment, &sync);
2598 pr_err("%s: update PTE failed\n", __func__);
2599 /* make sure this gets validated again */
2600 mutex_lock(&process_info->notifier_lock);
2602 mutex_unlock(&process_info->notifier_lock);
2608 /* Update page directories */
2609 ret = process_update_pds(process_info, &sync);
2612 drm_exec_fini(&exec);
2613 amdgpu_sync_wait(&sync, false);
2614 amdgpu_sync_free(&sync);
2619 /* Confirm that all user pages are valid while holding the notifier lock
2621 * Moves valid BOs from the userptr_inval_list back to userptr_val_list.
2623 static int confirm_valid_user_pages_locked(struct amdkfd_process_info *process_info)
2625 struct kgd_mem *mem, *tmp_mem;
2628 list_for_each_entry_safe(mem, tmp_mem,
2629 &process_info->userptr_inval_list,
2633 /* keep mem without hmm range at userptr_inval_list */
2637 /* Only check mem with hmm range associated */
2638 valid = amdgpu_ttm_tt_get_user_pages_done(
2639 mem->bo->tbo.ttm, mem->range);
2643 WARN(!mem->invalid, "Invalid BO not marked invalid");
2649 WARN(1, "Valid BO is marked invalid");
2654 list_move_tail(&mem->validate_list,
2655 &process_info->userptr_valid_list);
2661 /* Worker callback to restore evicted userptr BOs
2663 * Tries to update and validate all userptr BOs. If successful and no
2664 * concurrent evictions happened, the queues are restarted. Otherwise,
2665 * reschedule for another attempt later.
2667 static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work)
2669 struct delayed_work *dwork = to_delayed_work(work);
2670 struct amdkfd_process_info *process_info =
2671 container_of(dwork, struct amdkfd_process_info,
2672 restore_userptr_work);
2673 struct task_struct *usertask;
2674 struct mm_struct *mm;
2675 uint32_t evicted_bos;
2677 mutex_lock(&process_info->notifier_lock);
2678 evicted_bos = process_info->evicted_bos;
2679 mutex_unlock(&process_info->notifier_lock);
2683 /* Reference task and mm in case of concurrent process termination */
2684 usertask = get_pid_task(process_info->pid, PIDTYPE_PID);
2687 mm = get_task_mm(usertask);
2689 put_task_struct(usertask);
2693 mutex_lock(&process_info->lock);
2695 if (update_invalid_user_pages(process_info, mm))
2697 /* userptr_inval_list can be empty if all evicted userptr BOs
2698 * have been freed. In that case there is nothing to validate
2699 * and we can just restart the queues.
2701 if (!list_empty(&process_info->userptr_inval_list)) {
2702 if (validate_invalid_user_pages(process_info))
2705 /* Final check for concurrent evicton and atomic update. If
2706 * another eviction happens after successful update, it will
2707 * be a first eviction that calls quiesce_mm. The eviction
2708 * reference counting inside KFD will handle this case.
2710 mutex_lock(&process_info->notifier_lock);
2711 if (process_info->evicted_bos != evicted_bos)
2712 goto unlock_notifier_out;
2714 if (confirm_valid_user_pages_locked(process_info)) {
2715 WARN(1, "User pages unexpectedly invalid");
2716 goto unlock_notifier_out;
2719 process_info->evicted_bos = evicted_bos = 0;
2721 if (kgd2kfd_resume_mm(mm)) {
2722 pr_err("%s: Failed to resume KFD\n", __func__);
2723 /* No recovery from this failure. Probably the CP is
2724 * hanging. No point trying again.
2728 unlock_notifier_out:
2729 mutex_unlock(&process_info->notifier_lock);
2731 mutex_unlock(&process_info->lock);
2733 /* If validation failed, reschedule another attempt */
2735 schedule_delayed_work(&process_info->restore_userptr_work,
2736 msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS));
2738 kfd_smi_event_queue_restore_rescheduled(mm);
2741 put_task_struct(usertask);
2744 /** amdgpu_amdkfd_gpuvm_restore_process_bos - Restore all BOs for the given
2745 * KFD process identified by process_info
2747 * @process_info: amdkfd_process_info of the KFD process
2749 * After memory eviction, restore thread calls this function. The function
2750 * should be called when the Process is still valid. BO restore involves -
2752 * 1. Release old eviction fence and create new one
2753 * 2. Get two copies of PD BO list from all the VMs. Keep one copy as pd_list.
2754 * 3 Use the second PD list and kfd_bo_list to create a list (ctx.list) of
2755 * BOs that need to be reserved.
2756 * 4. Reserve all the BOs
2757 * 5. Validate of PD and PT BOs.
2758 * 6. Validate all KFD BOs using kfd_bo_list and Map them and add new fence
2759 * 7. Add fence to all PD and PT BOs.
2760 * 8. Unreserve all BOs
2762 int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
2764 struct amdkfd_process_info *process_info = info;
2765 struct amdgpu_vm *peer_vm;
2766 struct kgd_mem *mem;
2767 struct amdgpu_amdkfd_fence *new_fence;
2768 struct list_head duplicate_save;
2769 struct amdgpu_sync sync_obj;
2770 unsigned long failed_size = 0;
2771 unsigned long total_size = 0;
2772 struct drm_exec exec;
2775 INIT_LIST_HEAD(&duplicate_save);
2777 mutex_lock(&process_info->lock);
2779 drm_exec_init(&exec, 0);
2780 drm_exec_until_all_locked(&exec) {
2781 list_for_each_entry(peer_vm, &process_info->vm_list_head,
2783 ret = amdgpu_vm_lock_pd(peer_vm, &exec, 2);
2784 drm_exec_retry_on_contention(&exec);
2786 goto ttm_reserve_fail;
2789 /* Reserve all BOs and page tables/directory. Add all BOs from
2790 * kfd_bo_list to ctx.list
2792 list_for_each_entry(mem, &process_info->kfd_bo_list,
2794 struct drm_gem_object *gobj;
2796 gobj = &mem->bo->tbo.base;
2797 ret = drm_exec_prepare_obj(&exec, gobj, 1);
2798 drm_exec_retry_on_contention(&exec);
2800 goto ttm_reserve_fail;
2804 amdgpu_sync_create(&sync_obj);
2806 /* Validate PDs and PTs */
2807 ret = process_validate_vms(process_info);
2809 goto validate_map_fail;
2811 ret = process_sync_pds_resv(process_info, &sync_obj);
2813 pr_debug("Memory eviction: Failed to sync to PD BO moving fence. Try again\n");
2814 goto validate_map_fail;
2817 /* Validate BOs and map them to GPUVM (update VM page tables). */
2818 list_for_each_entry(mem, &process_info->kfd_bo_list,
2821 struct amdgpu_bo *bo = mem->bo;
2822 uint32_t domain = mem->domain;
2823 struct kfd_mem_attachment *attachment;
2824 struct dma_resv_iter cursor;
2825 struct dma_fence *fence;
2827 total_size += amdgpu_bo_size(bo);
2829 ret = amdgpu_amdkfd_bo_validate(bo, domain, false);
2831 pr_debug("Memory eviction: Validate BOs failed\n");
2832 failed_size += amdgpu_bo_size(bo);
2833 ret = amdgpu_amdkfd_bo_validate(bo,
2834 AMDGPU_GEM_DOMAIN_GTT, false);
2836 pr_debug("Memory eviction: Try again\n");
2837 goto validate_map_fail;
2840 dma_resv_for_each_fence(&cursor, bo->tbo.base.resv,
2841 DMA_RESV_USAGE_KERNEL, fence) {
2842 ret = amdgpu_sync_fence(&sync_obj, fence);
2844 pr_debug("Memory eviction: Sync BO fence failed. Try again\n");
2845 goto validate_map_fail;
2848 list_for_each_entry(attachment, &mem->attachments, list) {
2849 if (!attachment->is_mapped)
2852 if (attachment->bo_va->base.bo->tbo.pin_count)
2855 kfd_mem_dmaunmap_attachment(mem, attachment);
2856 ret = update_gpuvm_pte(mem, attachment, &sync_obj);
2858 pr_debug("Memory eviction: update PTE failed. Try again\n");
2859 goto validate_map_fail;
2865 pr_debug("0x%lx/0x%lx in system\n", failed_size, total_size);
2867 /* Update page directories */
2868 ret = process_update_pds(process_info, &sync_obj);
2870 pr_debug("Memory eviction: update PDs failed. Try again\n");
2871 goto validate_map_fail;
2874 /* Wait for validate and PT updates to finish */
2875 amdgpu_sync_wait(&sync_obj, false);
2877 /* Release old eviction fence and create new one, because fence only
2878 * goes from unsignaled to signaled, fence cannot be reused.
2879 * Use context and mm from the old fence.
2881 new_fence = amdgpu_amdkfd_fence_create(
2882 process_info->eviction_fence->base.context,
2883 process_info->eviction_fence->mm,
2886 pr_err("Failed to create eviction fence\n");
2888 goto validate_map_fail;
2890 dma_fence_put(&process_info->eviction_fence->base);
2891 process_info->eviction_fence = new_fence;
2892 *ef = dma_fence_get(&new_fence->base);
2894 /* Attach new eviction fence to all BOs except pinned ones */
2895 list_for_each_entry(mem, &process_info->kfd_bo_list, validate_list) {
2896 if (mem->bo->tbo.pin_count)
2899 dma_resv_add_fence(mem->bo->tbo.base.resv,
2900 &process_info->eviction_fence->base,
2901 DMA_RESV_USAGE_BOOKKEEP);
2903 /* Attach eviction fence to PD / PT BOs */
2904 list_for_each_entry(peer_vm, &process_info->vm_list_head,
2906 struct amdgpu_bo *bo = peer_vm->root.bo;
2908 dma_resv_add_fence(bo->tbo.base.resv,
2909 &process_info->eviction_fence->base,
2910 DMA_RESV_USAGE_BOOKKEEP);
2914 amdgpu_sync_free(&sync_obj);
2916 drm_exec_fini(&exec);
2917 mutex_unlock(&process_info->lock);
2921 int amdgpu_amdkfd_add_gws_to_process(void *info, void *gws, struct kgd_mem **mem)
2923 struct amdkfd_process_info *process_info = (struct amdkfd_process_info *)info;
2924 struct amdgpu_bo *gws_bo = (struct amdgpu_bo *)gws;
2930 *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
2934 mutex_init(&(*mem)->lock);
2935 INIT_LIST_HEAD(&(*mem)->attachments);
2936 (*mem)->bo = amdgpu_bo_ref(gws_bo);
2937 (*mem)->domain = AMDGPU_GEM_DOMAIN_GWS;
2938 (*mem)->process_info = process_info;
2939 add_kgd_mem_to_kfd_bo_list(*mem, process_info, false);
2940 amdgpu_sync_create(&(*mem)->sync);
2943 /* Validate gws bo the first time it is added to process */
2944 mutex_lock(&(*mem)->process_info->lock);
2945 ret = amdgpu_bo_reserve(gws_bo, false);
2946 if (unlikely(ret)) {
2947 pr_err("Reserve gws bo failed %d\n", ret);
2948 goto bo_reservation_failure;
2951 ret = amdgpu_amdkfd_bo_validate(gws_bo, AMDGPU_GEM_DOMAIN_GWS, true);
2953 pr_err("GWS BO validate failed %d\n", ret);
2954 goto bo_validation_failure;
2956 /* GWS resource is shared b/t amdgpu and amdkfd
2957 * Add process eviction fence to bo so they can
2960 ret = dma_resv_reserve_fences(gws_bo->tbo.base.resv, 1);
2962 goto reserve_shared_fail;
2963 dma_resv_add_fence(gws_bo->tbo.base.resv,
2964 &process_info->eviction_fence->base,
2965 DMA_RESV_USAGE_BOOKKEEP);
2966 amdgpu_bo_unreserve(gws_bo);
2967 mutex_unlock(&(*mem)->process_info->lock);
2971 reserve_shared_fail:
2972 bo_validation_failure:
2973 amdgpu_bo_unreserve(gws_bo);
2974 bo_reservation_failure:
2975 mutex_unlock(&(*mem)->process_info->lock);
2976 amdgpu_sync_free(&(*mem)->sync);
2977 remove_kgd_mem_from_kfd_bo_list(*mem, process_info);
2978 amdgpu_bo_unref(&gws_bo);
2979 mutex_destroy(&(*mem)->lock);
2985 int amdgpu_amdkfd_remove_gws_from_process(void *info, void *mem)
2988 struct amdkfd_process_info *process_info = (struct amdkfd_process_info *)info;
2989 struct kgd_mem *kgd_mem = (struct kgd_mem *)mem;
2990 struct amdgpu_bo *gws_bo = kgd_mem->bo;
2992 /* Remove BO from process's validate list so restore worker won't touch
2995 remove_kgd_mem_from_kfd_bo_list(kgd_mem, process_info);
2997 ret = amdgpu_bo_reserve(gws_bo, false);
2998 if (unlikely(ret)) {
2999 pr_err("Reserve gws bo failed %d\n", ret);
3000 //TODO add BO back to validate_list?
3003 amdgpu_amdkfd_remove_eviction_fence(gws_bo,
3004 process_info->eviction_fence);
3005 amdgpu_bo_unreserve(gws_bo);
3006 amdgpu_sync_free(&kgd_mem->sync);
3007 amdgpu_bo_unref(&gws_bo);
3008 mutex_destroy(&kgd_mem->lock);
3013 /* Returns GPU-specific tiling mode information */
3014 int amdgpu_amdkfd_get_tile_config(struct amdgpu_device *adev,
3015 struct tile_config *config)
3017 config->gb_addr_config = adev->gfx.config.gb_addr_config;
3018 config->tile_config_ptr = adev->gfx.config.tile_mode_array;
3019 config->num_tile_configs =
3020 ARRAY_SIZE(adev->gfx.config.tile_mode_array);
3021 config->macro_tile_config_ptr =
3022 adev->gfx.config.macrotile_mode_array;
3023 config->num_macro_tile_configs =
3024 ARRAY_SIZE(adev->gfx.config.macrotile_mode_array);
3026 /* Those values are not set from GFX9 onwards */
3027 config->num_banks = adev->gfx.config.num_banks;
3028 config->num_ranks = adev->gfx.config.num_ranks;
3033 bool amdgpu_amdkfd_bo_mapped_to_dev(struct amdgpu_device *adev, struct kgd_mem *mem)
3035 struct kfd_mem_attachment *entry;
3037 list_for_each_entry(entry, &mem->attachments, list) {
3038 if (entry->is_mapped && entry->adev == adev)
3044 #if defined(CONFIG_DEBUG_FS)
3046 int kfd_debugfs_kfd_mem_limits(struct seq_file *m, void *data)
3049 spin_lock(&kfd_mem_limit.mem_limit_lock);
3050 seq_printf(m, "System mem used %lldM out of %lluM\n",
3051 (kfd_mem_limit.system_mem_used >> 20),
3052 (kfd_mem_limit.max_system_mem_limit >> 20));
3053 seq_printf(m, "TTM mem used %lldM out of %lluM\n",
3054 (kfd_mem_limit.ttm_mem_used >> 20),
3055 (kfd_mem_limit.max_ttm_mem_limit >> 20));
3056 spin_unlock(&kfd_mem_limit.mem_limit_lock);