Merge tag 'sound-5.14-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tiwai...
[linux-2.6-microblaze.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_amdkfd_gpuvm.c
index 668a28b..3b8e1ee 100644 (file)
@@ -47,12 +47,6 @@ static struct {
        spinlock_t mem_limit_lock;
 } kfd_mem_limit;
 
-/* Struct used for amdgpu_amdkfd_bo_validate */
-struct amdgpu_vm_parser {
-       uint32_t        domain;
-       bool            wait;
-};
-
 static const char * const domain_bit_to_string[] = {
                "CPU",
                "GTT",
@@ -278,7 +272,7 @@ static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
        write_seqcount_end(&resv->seq);
 
        /* Drop the references to the removed fences or move them to ef_list */
-       for (i = j, k = 0; i < old->shared_count; ++i) {
+       for (i = j; i < old->shared_count; ++i) {
                struct dma_fence *f;
 
                f = rcu_dereference_protected(new->shared[i],
@@ -348,11 +342,9 @@ validate_fail:
        return ret;
 }
 
-static int amdgpu_amdkfd_validate(void *param, struct amdgpu_bo *bo)
+static int amdgpu_amdkfd_validate_vm_bo(void *_unused, struct amdgpu_bo *bo)
 {
-       struct amdgpu_vm_parser *p = param;
-
-       return amdgpu_amdkfd_bo_validate(bo, p->domain, p->wait);
+       return amdgpu_amdkfd_bo_validate(bo, bo->allowed_domains, false);
 }
 
 /* vm_validate_pt_pd_bos - Validate page table and directory BOs
@@ -364,28 +356,23 @@ static int amdgpu_amdkfd_validate(void *param, struct amdgpu_bo *bo)
  */
 static int vm_validate_pt_pd_bos(struct amdgpu_vm *vm)
 {
-       struct amdgpu_bo *pd = vm->root.base.bo;
+       struct amdgpu_bo *pd = vm->root.bo;
        struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
-       struct amdgpu_vm_parser param;
        int ret;
 
-       param.domain = AMDGPU_GEM_DOMAIN_VRAM;
-       param.wait = false;
-
-       ret = amdgpu_vm_validate_pt_bos(adev, vm, amdgpu_amdkfd_validate,
-                                       &param);
+       ret = amdgpu_vm_validate_pt_bos(adev, vm, amdgpu_amdkfd_validate_vm_bo, NULL);
        if (ret) {
                pr_err("failed to validate PT BOs\n");
                return ret;
        }
 
-       ret = amdgpu_amdkfd_validate(&param, pd);
+       ret = amdgpu_amdkfd_validate_vm_bo(NULL, pd);
        if (ret) {
                pr_err("failed to validate PD\n");
                return ret;
        }
 
-       vm->pd_phys_addr = amdgpu_gmc_pd_addr(vm->root.base.bo);
+       vm->pd_phys_addr = amdgpu_gmc_pd_addr(vm->root.bo);
 
        if (vm->use_cpu_for_update) {
                ret = amdgpu_bo_kmap(pd, NULL);
@@ -400,7 +387,7 @@ static int vm_validate_pt_pd_bos(struct amdgpu_vm *vm)
 
 static int vm_update_pds(struct amdgpu_vm *vm, struct amdgpu_sync *sync)
 {
-       struct amdgpu_bo *pd = vm->root.base.bo;
+       struct amdgpu_bo *pd = vm->root.bo;
        struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
        int ret;
 
@@ -652,7 +639,7 @@ kfd_mem_attach_dmabuf(struct amdgpu_device *adev, struct kgd_mem *mem,
                }
        }
 
-       gobj = amdgpu_gem_prime_import(&adev->ddev, mem->dmabuf);
+       gobj = amdgpu_gem_prime_import(adev_to_drm(adev), mem->dmabuf);
        if (IS_ERR(gobj))
                return PTR_ERR(gobj);
 
@@ -1070,7 +1057,8 @@ static void unmap_bo_from_gpuvm(struct kgd_mem *mem,
 
 static int update_gpuvm_pte(struct kgd_mem *mem,
                            struct kfd_mem_attachment *entry,
-                           struct amdgpu_sync *sync)
+                           struct amdgpu_sync *sync,
+                           bool *table_freed)
 {
        struct amdgpu_bo_va *bo_va = entry->bo_va;
        struct amdgpu_device *adev = entry->adev;
@@ -1081,7 +1069,7 @@ static int update_gpuvm_pte(struct kgd_mem *mem,
                return ret;
 
        /* Update the page tables  */
-       ret = amdgpu_vm_bo_update(adev, bo_va, false);
+       ret = amdgpu_vm_bo_update(adev, bo_va, false, table_freed);
        if (ret) {
                pr_err("amdgpu_vm_bo_update failed\n");
                return ret;
@@ -1093,7 +1081,8 @@ static int update_gpuvm_pte(struct kgd_mem *mem,
 static int map_bo_to_gpuvm(struct kgd_mem *mem,
                           struct kfd_mem_attachment *entry,
                           struct amdgpu_sync *sync,
-                          bool no_update_pte)
+                          bool no_update_pte,
+                          bool *table_freed)
 {
        int ret;
 
@@ -1110,7 +1099,7 @@ static int map_bo_to_gpuvm(struct kgd_mem *mem,
        if (no_update_pte)
                return 0;
 
-       ret = update_gpuvm_pte(mem, entry, sync);
+       ret = update_gpuvm_pte(mem, entry, sync, table_freed);
        if (ret) {
                pr_err("update_gpuvm_pte() failed\n");
                goto update_gpuvm_pte_failed;
@@ -1164,7 +1153,7 @@ static int process_sync_pds_resv(struct amdkfd_process_info *process_info,
 
        list_for_each_entry(peer_vm, &process_info->vm_list_head,
                            vm_list_node) {
-               struct amdgpu_bo *pd = peer_vm->root.base.bo;
+               struct amdgpu_bo *pd = peer_vm->root.bo;
 
                ret = amdgpu_sync_resv(NULL, sync, pd->tbo.base.resv,
                                       AMDGPU_SYNC_NE_OWNER,
@@ -1231,7 +1220,7 @@ static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info,
        vm->process_info = *process_info;
 
        /* Validate page directory and attach eviction fence */
-       ret = amdgpu_bo_reserve(vm->root.base.bo, true);
+       ret = amdgpu_bo_reserve(vm->root.bo, true);
        if (ret)
                goto reserve_pd_fail;
        ret = vm_validate_pt_pd_bos(vm);
@@ -1239,16 +1228,16 @@ static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info,
                pr_err("validate_pt_pd_bos() failed\n");
                goto validate_pd_fail;
        }
-       ret = amdgpu_bo_sync_wait(vm->root.base.bo,
+       ret = amdgpu_bo_sync_wait(vm->root.bo,
                                  AMDGPU_FENCE_OWNER_KFD, false);
        if (ret)
                goto wait_pd_fail;
-       ret = dma_resv_reserve_shared(vm->root.base.bo->tbo.base.resv, 1);
+       ret = dma_resv_reserve_shared(vm->root.bo->tbo.base.resv, 1);
        if (ret)
                goto reserve_shared_fail;
-       amdgpu_bo_fence(vm->root.base.bo,
+       amdgpu_bo_fence(vm->root.bo,
                        &vm->process_info->eviction_fence->base, true);
-       amdgpu_bo_unreserve(vm->root.base.bo);
+       amdgpu_bo_unreserve(vm->root.bo);
 
        /* Update process info */
        mutex_lock(&vm->process_info->lock);
@@ -1262,7 +1251,7 @@ static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info,
 reserve_shared_fail:
 wait_pd_fail:
 validate_pd_fail:
-       amdgpu_bo_unreserve(vm->root.base.bo);
+       amdgpu_bo_unreserve(vm->root.bo);
 reserve_pd_fail:
        vm->process_info = NULL;
        if (info) {
@@ -1317,7 +1306,7 @@ void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
                                    struct amdgpu_vm *vm)
 {
        struct amdkfd_process_info *process_info = vm->process_info;
-       struct amdgpu_bo *pd = vm->root.base.bo;
+       struct amdgpu_bo *pd = vm->root.bo;
 
        if (!process_info)
                return;
@@ -1373,7 +1362,7 @@ void amdgpu_amdkfd_gpuvm_release_process_vm(struct kgd_dev *kgd, void *drm_priv)
 uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *drm_priv)
 {
        struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
-       struct amdgpu_bo *pd = avm->root.base.bo;
+       struct amdgpu_bo *pd = avm->root.bo;
        struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
 
        if (adev->asic_type < CHIP_VEGA10)
@@ -1608,7 +1597,8 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
 }
 
 int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
-               struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv)
+               struct kgd_dev *kgd, struct kgd_mem *mem,
+               void *drm_priv, bool *table_freed)
 {
        struct amdgpu_device *adev = get_amdgpu_device(kgd);
        struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
@@ -1696,7 +1686,7 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
                         entry->va, entry->va + bo_size, entry);
 
                ret = map_bo_to_gpuvm(mem, entry, ctx.sync,
-                                     is_invalid_userptr);
+                                     is_invalid_userptr, table_freed);
                if (ret) {
                        pr_err("Failed to map bo to gpuvm\n");
                        goto out_unreserve;
@@ -2146,7 +2136,7 @@ static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
                                continue;
 
                        kfd_mem_dmaunmap_attachment(mem, attachment);
-                       ret = update_gpuvm_pte(mem, attachment, &sync);
+                       ret = update_gpuvm_pte(mem, attachment, &sync, NULL);
                        if (ret) {
                                pr_err("%s: update PTE failed\n", __func__);
                                /* make sure this gets validated again */
@@ -2352,7 +2342,7 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
                                continue;
 
                        kfd_mem_dmaunmap_attachment(mem, attachment);
-                       ret = update_gpuvm_pte(mem, attachment, &sync_obj);
+                       ret = update_gpuvm_pte(mem, attachment, &sync_obj, NULL);
                        if (ret) {
                                pr_debug("Memory eviction: update PTE failed. Try again\n");
                                goto validate_map_fail;
@@ -2399,7 +2389,7 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
        /* Attach eviction fence to PD / PT BOs */
        list_for_each_entry(peer_vm, &process_info->vm_list_head,
                            vm_list_node) {
-               struct amdgpu_bo *bo = peer_vm->root.base.bo;
+               struct amdgpu_bo *bo = peer_vm->root.bo;
 
                amdgpu_bo_fence(bo, &process_info->eviction_fence->base, true);
        }