Merge tag 'amd-drm-next-5.14-2021-06-02' of https://gitlab.freedesktop.org/agd5f...
[linux-2.6-microblaze.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_amdkfd_gpuvm.c
index 7d4118c..141cd29 100644 (file)
@@ -33,9 +33,6 @@
 #include <uapi/linux/kfd_ioctl.h>
 #include "amdgpu_xgmi.h"
 
-/* BO flag to indicate a KFD userptr BO */
-#define AMDGPU_AMDKFD_USERPTR_BO (1ULL << 63)
-
 /* Userptr restore delay, just long enough to allow consecutive VM
  * changes to accumulate
  */
@@ -75,16 +72,16 @@ static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
        return (struct amdgpu_device *)kgd;
 }
 
-static bool check_if_add_bo_to_vm(struct amdgpu_vm *avm,
+static bool kfd_mem_is_attached(struct amdgpu_vm *avm,
                struct kgd_mem *mem)
 {
-       struct kfd_bo_va_list *entry;
+       struct kfd_mem_attachment *entry;
 
-       list_for_each_entry(entry, &mem->bo_va_list, bo_list)
+       list_for_each_entry(entry, &mem->attachments, list)
                if (entry->bo_va->base.vm == avm)
-                       return false;
+                       return true;
 
-       return true;
+       return false;
 }
 
 /* Set memory usage limits. Current, limits are
@@ -108,6 +105,11 @@ void amdgpu_amdkfd_gpuvm_init_mem_limits(void)
                (kfd_mem_limit.max_ttm_mem_limit >> 20));
 }
 
+void amdgpu_amdkfd_reserve_system_mem(uint64_t size)
+{
+       kfd_mem_limit.system_mem_used += size;
+}
+
 /* Estimate page table size needed to represent a given memory size
  *
  * With 4KB pages, we need one 8 byte PTE for each 4KB of memory
@@ -217,7 +219,7 @@ void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo)
        u32 domain = bo->preferred_domains;
        bool sg = (bo->preferred_domains == AMDGPU_GEM_DOMAIN_CPU);
 
-       if (bo->flags & AMDGPU_AMDKFD_USERPTR_BO) {
+       if (bo->flags & AMDGPU_AMDKFD_CREATE_USERPTR_BO) {
                domain = AMDGPU_GEM_DOMAIN_CPU;
                sg = false;
        }
@@ -431,7 +433,8 @@ static uint64_t get_pte_flags(struct amdgpu_device *adev, struct kgd_mem *mem)
                                mapping_flags |= coherent ?
                                        AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW;
                        else
-                               mapping_flags |= AMDGPU_VM_MTYPE_UC;
+                               mapping_flags |= coherent ?
+                                       AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
                } else {
                        mapping_flags |= coherent ?
                                AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
@@ -450,7 +453,8 @@ static uint64_t get_pte_flags(struct amdgpu_device *adev, struct kgd_mem *mem)
                                if (adev->gmc.xgmi.connected_to_cpu)
                                        snoop = true;
                        } else {
-                               mapping_flags |= AMDGPU_VM_MTYPE_UC;
+                               mapping_flags |= coherent ?
+                                       AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
                                if (amdgpu_xgmi_same_hive(adev, bo_adev))
                                        snoop = true;
                        }
@@ -471,87 +475,320 @@ static uint64_t get_pte_flags(struct amdgpu_device *adev, struct kgd_mem *mem)
        return pte_flags;
 }
 
-/* add_bo_to_vm - Add a BO to a VM
+static int
+kfd_mem_dmamap_userptr(struct kgd_mem *mem,
+                      struct kfd_mem_attachment *attachment)
+{
+       enum dma_data_direction direction =
+               mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ?
+               DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
+       struct ttm_operation_ctx ctx = {.interruptible = true};
+       struct amdgpu_bo *bo = attachment->bo_va->base.bo;
+       struct amdgpu_device *adev = attachment->adev;
+       struct ttm_tt *src_ttm = mem->bo->tbo.ttm;
+       struct ttm_tt *ttm = bo->tbo.ttm;
+       int ret;
+
+       ttm->sg = kmalloc(sizeof(*ttm->sg), GFP_KERNEL);
+       if (unlikely(!ttm->sg))
+               return -ENOMEM;
+
+       if (WARN_ON(ttm->num_pages != src_ttm->num_pages))
+               return -EINVAL;
+
+       /* Same sequence as in amdgpu_ttm_tt_pin_userptr */
+       ret = sg_alloc_table_from_pages(ttm->sg, src_ttm->pages,
+                                       ttm->num_pages, 0,
+                                       (u64)ttm->num_pages << PAGE_SHIFT,
+                                       GFP_KERNEL);
+       if (unlikely(ret))
+               goto free_sg;
+
+       ret = dma_map_sgtable(adev->dev, ttm->sg, direction, 0);
+       if (unlikely(ret))
+               goto release_sg;
+
+       drm_prime_sg_to_dma_addr_array(ttm->sg, ttm->dma_address,
+                                      ttm->num_pages);
+
+       amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
+       ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
+       if (ret)
+               goto unmap_sg;
+
+       return 0;
+
+unmap_sg:
+       dma_unmap_sgtable(adev->dev, ttm->sg, direction, 0);
+release_sg:
+       pr_err("DMA map userptr failed: %d\n", ret);
+       sg_free_table(ttm->sg);
+free_sg:
+       kfree(ttm->sg);
+       ttm->sg = NULL;
+       return ret;
+}
+
+static int
+kfd_mem_dmamap_dmabuf(struct kfd_mem_attachment *attachment)
+{
+       struct ttm_operation_ctx ctx = {.interruptible = true};
+       struct amdgpu_bo *bo = attachment->bo_va->base.bo;
+
+       amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
+       return ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
+}
+
+static int
+kfd_mem_dmamap_attachment(struct kgd_mem *mem,
+                         struct kfd_mem_attachment *attachment)
+{
+       switch (attachment->type) {
+       case KFD_MEM_ATT_SHARED:
+               return 0;
+       case KFD_MEM_ATT_USERPTR:
+               return kfd_mem_dmamap_userptr(mem, attachment);
+       case KFD_MEM_ATT_DMABUF:
+               return kfd_mem_dmamap_dmabuf(attachment);
+       default:
+               WARN_ON_ONCE(1);
+       }
+       return -EINVAL;
+}
+
+static void
+kfd_mem_dmaunmap_userptr(struct kgd_mem *mem,
+                        struct kfd_mem_attachment *attachment)
+{
+       enum dma_data_direction direction =
+               mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ?
+               DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
+       struct ttm_operation_ctx ctx = {.interruptible = false};
+       struct amdgpu_bo *bo = attachment->bo_va->base.bo;
+       struct amdgpu_device *adev = attachment->adev;
+       struct ttm_tt *ttm = bo->tbo.ttm;
+
+       if (unlikely(!ttm->sg))
+               return;
+
+       amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
+       ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
+
+       dma_unmap_sgtable(adev->dev, ttm->sg, direction, 0);
+       sg_free_table(ttm->sg);
+       ttm->sg = NULL;
+}
+
+static void
+kfd_mem_dmaunmap_dmabuf(struct kfd_mem_attachment *attachment)
+{
+       struct ttm_operation_ctx ctx = {.interruptible = true};
+       struct amdgpu_bo *bo = attachment->bo_va->base.bo;
+
+       amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
+       ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
+}
+
+static void
+kfd_mem_dmaunmap_attachment(struct kgd_mem *mem,
+                           struct kfd_mem_attachment *attachment)
+{
+       switch (attachment->type) {
+       case KFD_MEM_ATT_SHARED:
+               break;
+       case KFD_MEM_ATT_USERPTR:
+               kfd_mem_dmaunmap_userptr(mem, attachment);
+               break;
+       case KFD_MEM_ATT_DMABUF:
+               kfd_mem_dmaunmap_dmabuf(attachment);
+               break;
+       default:
+               WARN_ON_ONCE(1);
+       }
+}
+
+static int
+kfd_mem_attach_userptr(struct amdgpu_device *adev, struct kgd_mem *mem,
+                      struct amdgpu_bo **bo)
+{
+       unsigned long bo_size = mem->bo->tbo.base.size;
+       struct drm_gem_object *gobj;
+       int ret;
+
+       ret = amdgpu_bo_reserve(mem->bo, false);
+       if (ret)
+               return ret;
+
+       ret = amdgpu_gem_object_create(adev, bo_size, 1,
+                                      AMDGPU_GEM_DOMAIN_CPU,
+                                      AMDGPU_GEM_CREATE_PREEMPTIBLE,
+                                      ttm_bo_type_sg, mem->bo->tbo.base.resv,
+                                      &gobj);
+       amdgpu_bo_unreserve(mem->bo);
+       if (ret)
+               return ret;
+
+       *bo = gem_to_amdgpu_bo(gobj);
+       (*bo)->parent = amdgpu_bo_ref(mem->bo);
+
+       return 0;
+}
+
+static int
+kfd_mem_attach_dmabuf(struct amdgpu_device *adev, struct kgd_mem *mem,
+                     struct amdgpu_bo **bo)
+{
+       struct drm_gem_object *gobj;
+       int ret;
+
+       if (!mem->dmabuf) {
+               mem->dmabuf = amdgpu_gem_prime_export(&mem->bo->tbo.base,
+                       mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ?
+                               DRM_RDWR : 0);
+               if (IS_ERR(mem->dmabuf)) {
+                       ret = PTR_ERR(mem->dmabuf);
+                       mem->dmabuf = NULL;
+                       return ret;
+               }
+       }
+
+       gobj = amdgpu_gem_prime_import(&adev->ddev, mem->dmabuf);
+       if (IS_ERR(gobj))
+               return PTR_ERR(gobj);
+
+       /* Import takes an extra reference on the dmabuf. Drop it now to
+        * avoid leaking it. We only need the one reference in
+        * kgd_mem->dmabuf.
+        */
+       dma_buf_put(mem->dmabuf);
+
+       *bo = gem_to_amdgpu_bo(gobj);
+       (*bo)->flags |= AMDGPU_GEM_CREATE_PREEMPTIBLE;
+       (*bo)->parent = amdgpu_bo_ref(mem->bo);
+
+       return 0;
+}
+
+/* kfd_mem_attach - Add a BO to a VM
  *
  * Everything that needs to bo done only once when a BO is first added
  * to a VM. It can later be mapped and unmapped many times without
  * repeating these steps.
  *
+ * 0. Create BO for DMA mapping, if needed
  * 1. Allocate and initialize BO VA entry data structure
  * 2. Add BO to the VM
  * 3. Determine ASIC-specific PTE flags
  * 4. Alloc page tables and directories if needed
  * 4a.  Validate new page tables and directories
  */
-static int add_bo_to_vm(struct amdgpu_device *adev, struct kgd_mem *mem,
-               struct amdgpu_vm *vm, bool is_aql,
-               struct kfd_bo_va_list **p_bo_va_entry)
+static int kfd_mem_attach(struct amdgpu_device *adev, struct kgd_mem *mem,
+               struct amdgpu_vm *vm, bool is_aql)
 {
-       int ret;
-       struct kfd_bo_va_list *bo_va_entry;
-       struct amdgpu_bo *bo = mem->bo;
+       struct amdgpu_device *bo_adev = amdgpu_ttm_adev(mem->bo->tbo.bdev);
+       unsigned long bo_size = mem->bo->tbo.base.size;
        uint64_t va = mem->va;
-       struct list_head *list_bo_va = &mem->bo_va_list;
-       unsigned long bo_size = bo->tbo.base.size;
+       struct kfd_mem_attachment *attachment[2] = {NULL, NULL};
+       struct amdgpu_bo *bo[2] = {NULL, NULL};
+       int i, ret;
 
        if (!va) {
                pr_err("Invalid VA when adding BO to VM\n");
                return -EINVAL;
        }
 
-       if (is_aql)
-               va += bo_size;
-
-       bo_va_entry = kzalloc(sizeof(*bo_va_entry), GFP_KERNEL);
-       if (!bo_va_entry)
-               return -ENOMEM;
+       for (i = 0; i <= is_aql; i++) {
+               attachment[i] = kzalloc(sizeof(*attachment[i]), GFP_KERNEL);
+               if (unlikely(!attachment[i])) {
+                       ret = -ENOMEM;
+                       goto unwind;
+               }
 
-       pr_debug("\t add VA 0x%llx - 0x%llx to vm %p\n", va,
-                       va + bo_size, vm);
+               pr_debug("\t add VA 0x%llx - 0x%llx to vm %p\n", va,
+                        va + bo_size, vm);
 
-       /* Add BO to VM internal data structures*/
-       bo_va_entry->bo_va = amdgpu_vm_bo_add(adev, vm, bo);
-       if (!bo_va_entry->bo_va) {
-               ret = -EINVAL;
-               pr_err("Failed to add BO object to VM. ret == %d\n",
-                               ret);
-               goto err_vmadd;
-       }
+               if (adev == bo_adev || (mem->domain == AMDGPU_GEM_DOMAIN_VRAM &&
+                                       amdgpu_xgmi_same_hive(adev, bo_adev))) {
+                       /* Mappings on the local GPU and VRAM mappings in the
+                        * local hive share the original BO
+                        */
+                       attachment[i]->type = KFD_MEM_ATT_SHARED;
+                       bo[i] = mem->bo;
+                       drm_gem_object_get(&bo[i]->tbo.base);
+               } else if (i > 0) {
+                       /* Multiple mappings on the same GPU share the BO */
+                       attachment[i]->type = KFD_MEM_ATT_SHARED;
+                       bo[i] = bo[0];
+                       drm_gem_object_get(&bo[i]->tbo.base);
+               } else if (amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm)) {
+                       /* Create an SG BO to DMA-map userptrs on other GPUs */
+                       attachment[i]->type = KFD_MEM_ATT_USERPTR;
+                       ret = kfd_mem_attach_userptr(adev, mem, &bo[i]);
+                       if (ret)
+                               goto unwind;
+               } else if (mem->domain == AMDGPU_GEM_DOMAIN_GTT &&
+                          mem->bo->tbo.type != ttm_bo_type_sg) {
+                       /* GTT BOs use DMA-mapping ability of dynamic-attach
+                        * DMA bufs. TODO: The same should work for VRAM on
+                        * large-BAR GPUs.
+                        */
+                       attachment[i]->type = KFD_MEM_ATT_DMABUF;
+                       ret = kfd_mem_attach_dmabuf(adev, mem, &bo[i]);
+                       if (ret)
+                               goto unwind;
+               } else {
+                       /* FIXME: Need to DMA-map other BO types:
+                        * large-BAR VRAM, doorbells, MMIO remap
+                        */
+                       attachment[i]->type = KFD_MEM_ATT_SHARED;
+                       bo[i] = mem->bo;
+                       drm_gem_object_get(&bo[i]->tbo.base);
+               }
 
-       bo_va_entry->va = va;
-       bo_va_entry->pte_flags = get_pte_flags(adev, mem);
-       bo_va_entry->kgd_dev = (void *)adev;
-       list_add(&bo_va_entry->bo_list, list_bo_va);
+               /* Add BO to VM internal data structures */
+               attachment[i]->bo_va = amdgpu_vm_bo_add(adev, vm, bo[i]);
+               if (unlikely(!attachment[i]->bo_va)) {
+                       ret = -ENOMEM;
+                       pr_err("Failed to add BO object to VM. ret == %d\n",
+                              ret);
+                       goto unwind;
+               }
 
-       if (p_bo_va_entry)
-               *p_bo_va_entry = bo_va_entry;
+               attachment[i]->va = va;
+               attachment[i]->pte_flags = get_pte_flags(adev, mem);
+               attachment[i]->adev = adev;
+               list_add(&attachment[i]->list, &mem->attachments);
 
-       /* Allocate validate page tables if needed */
-       ret = vm_validate_pt_pd_bos(vm);
-       if (ret) {
-               pr_err("validate_pt_pd_bos() failed\n");
-               goto err_alloc_pts;
+               va += bo_size;
        }
 
        return 0;
 
-err_alloc_pts:
-       amdgpu_vm_bo_rmv(adev, bo_va_entry->bo_va);
-       list_del(&bo_va_entry->bo_list);
-err_vmadd:
-       kfree(bo_va_entry);
+unwind:
+       for (; i >= 0; i--) {
+               if (!attachment[i])
+                       continue;
+               if (attachment[i]->bo_va) {
+                       amdgpu_vm_bo_rmv(adev, attachment[i]->bo_va);
+                       list_del(&attachment[i]->list);
+               }
+               if (bo[i])
+                       drm_gem_object_put(&bo[i]->tbo.base);
+               kfree(attachment[i]);
+       }
        return ret;
 }
 
-static void remove_bo_from_vm(struct amdgpu_device *adev,
-               struct kfd_bo_va_list *entry, unsigned long size)
+static void kfd_mem_detach(struct kfd_mem_attachment *attachment)
 {
-       pr_debug("\t remove VA 0x%llx - 0x%llx in entry %p\n",
-                       entry->va,
-                       entry->va + size, entry);
-       amdgpu_vm_bo_rmv(adev, entry->bo_va);
-       list_del(&entry->bo_list);
-       kfree(entry);
+       struct amdgpu_bo *bo = attachment->bo_va->base.bo;
+
+       pr_debug("\t remove VA 0x%llx in entry %p\n",
+                       attachment->va, attachment);
+       amdgpu_vm_bo_rmv(attachment->adev, attachment->bo_va);
+       drm_gem_object_put(&bo->tbo.base);
+       list_del(&attachment->list);
+       kfree(attachment);
 }
 
 static void add_kgd_mem_to_kfd_bo_list(struct kgd_mem *mem,
@@ -726,7 +963,7 @@ static int reserve_bo_and_cond_vms(struct kgd_mem *mem,
                                struct bo_vm_reservation_context *ctx)
 {
        struct amdgpu_bo *bo = mem->bo;
-       struct kfd_bo_va_list *entry;
+       struct kfd_mem_attachment *entry;
        unsigned int i;
        int ret;
 
@@ -738,7 +975,7 @@ static int reserve_bo_and_cond_vms(struct kgd_mem *mem,
        INIT_LIST_HEAD(&ctx->list);
        INIT_LIST_HEAD(&ctx->duplicates);
 
-       list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
+       list_for_each_entry(entry, &mem->attachments, list) {
                if ((vm && vm != entry->bo_va->base.vm) ||
                        (entry->is_mapped != map_type
                        && map_type != BO_VM_ALL))
@@ -760,7 +997,7 @@ static int reserve_bo_and_cond_vms(struct kgd_mem *mem,
        list_add(&ctx->kfd_bo.tv.head, &ctx->list);
 
        i = 0;
-       list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
+       list_for_each_entry(entry, &mem->attachments, list) {
                if ((vm && vm != entry->bo_va->base.vm) ||
                        (entry->is_mapped != map_type
                        && map_type != BO_VM_ALL))
@@ -814,11 +1051,12 @@ static int unreserve_bo_and_vms(struct bo_vm_reservation_context *ctx,
        return ret;
 }
 
-static int unmap_bo_from_gpuvm(struct amdgpu_device *adev,
-                               struct kfd_bo_va_list *entry,
+static void unmap_bo_from_gpuvm(struct kgd_mem *mem,
+                               struct kfd_mem_attachment *entry,
                                struct amdgpu_sync *sync)
 {
        struct amdgpu_bo_va *bo_va = entry->bo_va;
+       struct amdgpu_device *adev = entry->adev;
        struct amdgpu_vm *vm = bo_va->base.vm;
 
        amdgpu_vm_bo_unmap(adev, bo_va, entry->va);
@@ -827,15 +1065,20 @@ static int unmap_bo_from_gpuvm(struct amdgpu_device *adev,
 
        amdgpu_sync_fence(sync, bo_va->last_pt_update);
 
-       return 0;
+       kfd_mem_dmaunmap_attachment(mem, entry);
 }
 
-static int update_gpuvm_pte(struct amdgpu_device *adev,
-               struct kfd_bo_va_list *entry,
-               struct amdgpu_sync *sync)
+static int update_gpuvm_pte(struct kgd_mem *mem,
+                           struct kfd_mem_attachment *entry,
+                           struct amdgpu_sync *sync)
 {
-       int ret;
        struct amdgpu_bo_va *bo_va = entry->bo_va;
+       struct amdgpu_device *adev = entry->adev;
+       int ret;
+
+       ret = kfd_mem_dmamap_attachment(mem, entry);
+       if (ret)
+               return ret;
 
        /* Update the page tables  */
        ret = amdgpu_vm_bo_update(adev, bo_va, false);
@@ -847,14 +1090,15 @@ static int update_gpuvm_pte(struct amdgpu_device *adev,
        return amdgpu_sync_fence(sync, bo_va->last_pt_update);
 }
 
-static int map_bo_to_gpuvm(struct amdgpu_device *adev,
-               struct kfd_bo_va_list *entry, struct amdgpu_sync *sync,
-               bool no_update_pte)
+static int map_bo_to_gpuvm(struct kgd_mem *mem,
+                          struct kfd_mem_attachment *entry,
+                          struct amdgpu_sync *sync,
+                          bool no_update_pte)
 {
        int ret;
 
        /* Set virtual address for the allocation */
-       ret = amdgpu_vm_bo_map(adev, entry->bo_va, entry->va, 0,
+       ret = amdgpu_vm_bo_map(entry->adev, entry->bo_va, entry->va, 0,
                               amdgpu_bo_size(entry->bo_va->base.bo),
                               entry->pte_flags);
        if (ret) {
@@ -866,7 +1110,7 @@ static int map_bo_to_gpuvm(struct amdgpu_device *adev,
        if (no_update_pte)
                return 0;
 
-       ret = update_gpuvm_pte(adev, entry, sync);
+       ret = update_gpuvm_pte(mem, entry, sync);
        if (ret) {
                pr_err("update_gpuvm_pte() failed\n");
                goto update_gpuvm_pte_failed;
@@ -875,7 +1119,7 @@ static int map_bo_to_gpuvm(struct amdgpu_device *adev,
        return 0;
 
 update_gpuvm_pte_failed:
-       unmap_bo_from_gpuvm(adev, entry, sync);
+       unmap_bo_from_gpuvm(mem, entry, sync);
        return ret;
 }
 
@@ -967,7 +1211,8 @@ static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info,
 
                info->eviction_fence =
                        amdgpu_amdkfd_fence_create(dma_fence_context_alloc(1),
-                                                  current->mm);
+                                                  current->mm,
+                                                  NULL);
                if (!info->eviction_fence) {
                        pr_err("Failed to create eviction fence\n");
                        ret = -ENOMEM;
@@ -1036,15 +1281,19 @@ create_evict_fence_fail:
 
 int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct kgd_dev *kgd,
                                           struct file *filp, u32 pasid,
-                                          void **vm, void **process_info,
+                                          void **process_info,
                                           struct dma_fence **ef)
 {
        struct amdgpu_device *adev = get_amdgpu_device(kgd);
-       struct drm_file *drm_priv = filp->private_data;
-       struct amdgpu_fpriv *drv_priv = drm_priv->driver_priv;
-       struct amdgpu_vm *avm = &drv_priv->vm;
+       struct amdgpu_fpriv *drv_priv;
+       struct amdgpu_vm *avm;
        int ret;
 
+       ret = amdgpu_file_to_fpriv(filp, &drv_priv);
+       if (ret)
+               return ret;
+       avm = &drv_priv->vm;
+
        /* Already a compute VM? */
        if (avm->process_info)
                return -EINVAL;
@@ -1059,7 +1308,7 @@ int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct kgd_dev *kgd,
        if (ret)
                return ret;
 
-       *vm = (void *)avm;
+       amdgpu_vm_set_task_info(avm);
 
        return 0;
 }
@@ -1100,15 +1349,17 @@ void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
        }
 }
 
-void amdgpu_amdkfd_gpuvm_release_process_vm(struct kgd_dev *kgd, void *vm)
+void amdgpu_amdkfd_gpuvm_release_process_vm(struct kgd_dev *kgd, void *drm_priv)
 {
        struct amdgpu_device *adev = get_amdgpu_device(kgd);
-       struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
+       struct amdgpu_vm *avm;
 
-       if (WARN_ON(!kgd || !vm))
+       if (WARN_ON(!kgd || !drm_priv))
                return;
 
-       pr_debug("Releasing process vm %p\n", vm);
+       avm = drm_priv_to_vm(drm_priv);
+
+       pr_debug("Releasing process vm %p\n", avm);
 
        /* The original pasid of amdgpu vm has already been
         * released during making a amdgpu vm to a compute vm
@@ -1119,9 +1370,9 @@ void amdgpu_amdkfd_gpuvm_release_process_vm(struct kgd_dev *kgd, void *vm)
        amdgpu_vm_release_compute(adev, avm);
 }
 
-uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *vm)
+uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *drm_priv)
 {
-       struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
+       struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
        struct amdgpu_bo *pd = avm->root.base.bo;
        struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
 
@@ -1132,11 +1383,11 @@ uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *vm)
 
 int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
                struct kgd_dev *kgd, uint64_t va, uint64_t size,
-               void *vm, struct kgd_mem **mem,
+               void *drm_priv, struct kgd_mem **mem,
                uint64_t *offset, uint32_t flags)
 {
        struct amdgpu_device *adev = get_amdgpu_device(kgd);
-       struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
+       struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
        enum ttm_bo_type bo_type = ttm_bo_type_device;
        struct sg_table *sg = NULL;
        uint64_t user_addr = 0;
@@ -1161,7 +1412,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
        } else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) {
                domain = AMDGPU_GEM_DOMAIN_GTT;
                alloc_domain = AMDGPU_GEM_DOMAIN_CPU;
-               alloc_flags = 0;
+               alloc_flags = AMDGPU_GEM_CREATE_PREEMPTIBLE;
                if (!offset || !*offset)
                        return -EINVAL;
                user_addr = untagged_addr(*offset);
@@ -1185,7 +1436,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
                ret = -ENOMEM;
                goto err;
        }
-       INIT_LIST_HEAD(&(*mem)->bo_va_list);
+       INIT_LIST_HEAD(&(*mem)->attachments);
        mutex_init(&(*mem)->lock);
        (*mem)->aql_queue = !!(flags & KFD_IOC_ALLOC_MEM_FLAGS_AQL_QUEUE_MEM);
 
@@ -1216,6 +1467,11 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
                         domain_string(alloc_domain), ret);
                goto err_bo_create;
        }
+       ret = drm_vma_node_allow(&gobj->vma_node, drm_priv);
+       if (ret) {
+               pr_debug("Failed to allow vma node access. ret %d\n", ret);
+               goto err_node_allow;
+       }
        bo = gem_to_amdgpu_bo(gobj);
        if (bo_type == ttm_bo_type_sg) {
                bo->tbo.sg = sg;
@@ -1224,7 +1480,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
        bo->kfd_bo = *mem;
        (*mem)->bo = bo;
        if (user_addr)
-               bo->flags |= AMDGPU_AMDKFD_USERPTR_BO;
+               bo->flags |= AMDGPU_AMDKFD_CREATE_USERPTR_BO;
 
        (*mem)->va = va;
        (*mem)->domain = domain;
@@ -1245,6 +1501,8 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
 
 allocate_init_user_pages_failed:
        remove_kgd_mem_from_kfd_bo_list(*mem, avm->process_info);
+       drm_vma_node_revoke(&gobj->vma_node, drm_priv);
+err_node_allow:
        amdgpu_bo_unref(&bo);
        /* Don't unreserve system mem limit twice */
        goto err_reserve_limit;
@@ -1262,11 +1520,12 @@ err:
 }
 
 int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
-               struct kgd_dev *kgd, struct kgd_mem *mem, uint64_t *size)
+               struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv,
+               uint64_t *size)
 {
        struct amdkfd_process_info *process_info = mem->process_info;
        unsigned long bo_size = mem->bo->tbo.base.size;
-       struct kfd_bo_va_list *entry, *tmp;
+       struct kfd_mem_attachment *entry, *tmp;
        struct bo_vm_reservation_context ctx;
        struct ttm_validate_buffer *bo_list_entry;
        unsigned int mapped_to_gpu_memory;
@@ -1309,13 +1568,12 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
        pr_debug("Release VA 0x%llx - 0x%llx\n", mem->va,
                mem->va + bo_size * (1 + mem->aql_queue));
 
-       /* Remove from VM internal data structures */
-       list_for_each_entry_safe(entry, tmp, &mem->bo_va_list, bo_list)
-               remove_bo_from_vm((struct amdgpu_device *)entry->kgd_dev,
-                               entry, bo_size);
-
        ret = unreserve_bo_and_vms(&ctx, false, false);
 
+       /* Remove from VM internal data structures */
+       list_for_each_entry_safe(entry, tmp, &mem->attachments, list)
+               kfd_mem_detach(entry);
+
        /* Free the sync object */
        amdgpu_sync_free(&mem->sync);
 
@@ -1339,6 +1597,9 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
        }
 
        /* Free the BO*/
+       drm_vma_node_revoke(&mem->bo->tbo.base.vma_node, drm_priv);
+       if (mem->dmabuf)
+               dma_buf_put(mem->dmabuf);
        drm_gem_object_put(&mem->bo->tbo.base);
        mutex_destroy(&mem->lock);
        kfree(mem);
@@ -1347,17 +1608,15 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
 }
 
 int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
-               struct kgd_dev *kgd, struct kgd_mem *mem, void *vm)
+               struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv)
 {
        struct amdgpu_device *adev = get_amdgpu_device(kgd);
-       struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
+       struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
        int ret;
        struct amdgpu_bo *bo;
        uint32_t domain;
-       struct kfd_bo_va_list *entry;
+       struct kfd_mem_attachment *entry;
        struct bo_vm_reservation_context ctx;
-       struct kfd_bo_va_list *bo_va_entry = NULL;
-       struct kfd_bo_va_list *bo_va_entry_aql = NULL;
        unsigned long bo_size;
        bool is_invalid_userptr = false;
 
@@ -1391,9 +1650,15 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
        pr_debug("Map VA 0x%llx - 0x%llx to vm %p domain %s\n",
                        mem->va,
                        mem->va + bo_size * (1 + mem->aql_queue),
-                       vm, domain_string(domain));
+                       avm, domain_string(domain));
 
-       ret = reserve_bo_and_vm(mem, vm, &ctx);
+       if (!kfd_mem_is_attached(avm, mem)) {
+               ret = kfd_mem_attach(adev, mem, avm, mem->aql_queue);
+               if (ret)
+                       goto out;
+       }
+
+       ret = reserve_bo_and_vm(mem, avm, &ctx);
        if (unlikely(ret))
                goto out;
 
@@ -1406,22 +1671,9 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
            bo->tbo.mem.mem_type == TTM_PL_SYSTEM)
                is_invalid_userptr = true;
 
-       if (check_if_add_bo_to_vm(avm, mem)) {
-               ret = add_bo_to_vm(adev, mem, avm, false,
-                               &bo_va_entry);
-               if (ret)
-                       goto add_bo_to_vm_failed;
-               if (mem->aql_queue) {
-                       ret = add_bo_to_vm(adev, mem, avm,
-                                       true, &bo_va_entry_aql);
-                       if (ret)
-                               goto add_bo_to_vm_failed_aql;
-               }
-       } else {
-               ret = vm_validate_pt_pd_bos(avm);
-               if (unlikely(ret))
-                       goto add_bo_to_vm_failed;
-       }
+       ret = vm_validate_pt_pd_bos(avm);
+       if (unlikely(ret))
+               goto out_unreserve;
 
        if (mem->mapped_to_gpu_memory == 0 &&
            !amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
@@ -1432,34 +1684,34 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
                ret = amdgpu_amdkfd_bo_validate(bo, domain, true);
                if (ret) {
                        pr_debug("Validate failed\n");
-                       goto map_bo_to_gpuvm_failed;
+                       goto out_unreserve;
                }
        }
 
-       list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
-               if (entry->bo_va->base.vm == vm && !entry->is_mapped) {
-                       pr_debug("\t map VA 0x%llx - 0x%llx in entry %p\n",
-                                       entry->va, entry->va + bo_size,
-                                       entry);
+       list_for_each_entry(entry, &mem->attachments, list) {
+               if (entry->bo_va->base.vm != avm || entry->is_mapped)
+                       continue;
 
-                       ret = map_bo_to_gpuvm(adev, entry, ctx.sync,
-                                             is_invalid_userptr);
-                       if (ret) {
-                               pr_err("Failed to map bo to gpuvm\n");
-                               goto map_bo_to_gpuvm_failed;
-                       }
+               pr_debug("\t map VA 0x%llx - 0x%llx in entry %p\n",
+                        entry->va, entry->va + bo_size, entry);
 
-                       ret = vm_update_pds(vm, ctx.sync);
-                       if (ret) {
-                               pr_err("Failed to update page directories\n");
-                               goto map_bo_to_gpuvm_failed;
-                       }
+               ret = map_bo_to_gpuvm(mem, entry, ctx.sync,
+                                     is_invalid_userptr);
+               if (ret) {
+                       pr_err("Failed to map bo to gpuvm\n");
+                       goto out_unreserve;
+               }
 
-                       entry->is_mapped = true;
-                       mem->mapped_to_gpu_memory++;
-                       pr_debug("\t INC mapping count %d\n",
-                                       mem->mapped_to_gpu_memory);
+               ret = vm_update_pds(avm, ctx.sync);
+               if (ret) {
+                       pr_err("Failed to update page directories\n");
+                       goto out_unreserve;
                }
+
+               entry->is_mapped = true;
+               mem->mapped_to_gpu_memory++;
+               pr_debug("\t INC mapping count %d\n",
+                        mem->mapped_to_gpu_memory);
        }
 
        if (!amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) && !bo->tbo.pin_count)
@@ -1470,13 +1722,7 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
 
        goto out;
 
-map_bo_to_gpuvm_failed:
-       if (bo_va_entry_aql)
-               remove_bo_from_vm(adev, bo_va_entry_aql, bo_size);
-add_bo_to_vm_failed_aql:
-       if (bo_va_entry)
-               remove_bo_from_vm(adev, bo_va_entry, bo_size);
-add_bo_to_vm_failed:
+out_unreserve:
        unreserve_bo_and_vms(&ctx, false, false);
 out:
        mutex_unlock(&mem->process_info->lock);
@@ -1485,19 +1731,18 @@ out:
 }
 
 int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
-               struct kgd_dev *kgd, struct kgd_mem *mem, void *vm)
+               struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv)
 {
-       struct amdgpu_device *adev = get_amdgpu_device(kgd);
-       struct amdkfd_process_info *process_info =
-               ((struct amdgpu_vm *)vm)->process_info;
+       struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
+       struct amdkfd_process_info *process_info = avm->process_info;
        unsigned long bo_size = mem->bo->tbo.base.size;
-       struct kfd_bo_va_list *entry;
+       struct kfd_mem_attachment *entry;
        struct bo_vm_reservation_context ctx;
        int ret;
 
        mutex_lock(&mem->lock);
 
-       ret = reserve_bo_and_cond_vms(mem, vm, BO_VM_MAPPED, &ctx);
+       ret = reserve_bo_and_cond_vms(mem, avm, BO_VM_MAPPED, &ctx);
        if (unlikely(ret))
                goto out;
        /* If no VMs were reserved, it means the BO wasn't actually mapped */
@@ -1506,35 +1751,28 @@ int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
                goto unreserve_out;
        }
 
-       ret = vm_validate_pt_pd_bos((struct amdgpu_vm *)vm);
+       ret = vm_validate_pt_pd_bos(avm);
        if (unlikely(ret))
                goto unreserve_out;
 
        pr_debug("Unmap VA 0x%llx - 0x%llx from vm %p\n",
                mem->va,
                mem->va + bo_size * (1 + mem->aql_queue),
-               vm);
-
-       list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
-               if (entry->bo_va->base.vm == vm && entry->is_mapped) {
-                       pr_debug("\t unmap VA 0x%llx - 0x%llx from entry %p\n",
-                                       entry->va,
-                                       entry->va + bo_size,
-                                       entry);
-
-                       ret = unmap_bo_from_gpuvm(adev, entry, ctx.sync);
-                       if (ret == 0) {
-                               entry->is_mapped = false;
-                       } else {
-                               pr_err("failed to unmap VA 0x%llx\n",
-                                               mem->va);
-                               goto unreserve_out;
-                       }
+               avm);
 
-                       mem->mapped_to_gpu_memory--;
-                       pr_debug("\t DEC mapping count %d\n",
-                                       mem->mapped_to_gpu_memory);
-               }
+       list_for_each_entry(entry, &mem->attachments, list) {
+               if (entry->bo_va->base.vm != avm || !entry->is_mapped)
+                       continue;
+
+               pr_debug("\t unmap VA 0x%llx - 0x%llx from entry %p\n",
+                        entry->va, entry->va + bo_size, entry);
+
+               unmap_bo_from_gpuvm(mem, entry, ctx.sync);
+               entry->is_mapped = false;
+
+               mem->mapped_to_gpu_memory--;
+               pr_debug("\t DEC mapping count %d\n",
+                        mem->mapped_to_gpu_memory);
        }
 
        /* If BO is unmapped from all VMs, unfence it. It can be evicted if
@@ -1642,14 +1880,15 @@ int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct kgd_dev *kgd,
 
 int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd,
                                      struct dma_buf *dma_buf,
-                                     uint64_t va, void *vm,
+                                     uint64_t va, void *drm_priv,
                                      struct kgd_mem **mem, uint64_t *size,
                                      uint64_t *mmap_offset)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
+       struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
        struct drm_gem_object *obj;
        struct amdgpu_bo *bo;
-       struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
+       int ret;
 
        if (dma_buf->ops != &amdgpu_dmabuf_ops)
                /* Can't handle non-graphics buffers */
@@ -1670,13 +1909,19 @@ int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd,
        if (!*mem)
                return -ENOMEM;
 
+       ret = drm_vma_node_allow(&obj->vma_node, drm_priv);
+       if (ret) {
+               kfree(mem);
+               return ret;
+       }
+
        if (size)
                *size = amdgpu_bo_size(bo);
 
        if (mmap_offset)
                *mmap_offset = amdgpu_bo_mmap_offset(bo);
 
-       INIT_LIST_HEAD(&(*mem)->bo_va_list);
+       INIT_LIST_HEAD(&(*mem)->attachments);
        mutex_init(&(*mem)->lock);
 
        (*mem)->alloc_flags =
@@ -1873,7 +2118,7 @@ static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
        list_for_each_entry_safe(mem, tmp_mem,
                                 &process_info->userptr_inval_list,
                                 validate_list.head) {
-               struct kfd_bo_va_list *bo_va_entry;
+               struct kfd_mem_attachment *attachment;
 
                bo = mem->bo;
 
@@ -1896,13 +2141,12 @@ static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
                 * VM faults if the GPU tries to access the invalid
                 * memory.
                 */
-               list_for_each_entry(bo_va_entry, &mem->bo_va_list, bo_list) {
-                       if (!bo_va_entry->is_mapped)
+               list_for_each_entry(attachment, &mem->attachments, list) {
+                       if (!attachment->is_mapped)
                                continue;
 
-                       ret = update_gpuvm_pte((struct amdgpu_device *)
-                                              bo_va_entry->kgd_dev,
-                                              bo_va_entry, &sync);
+                       kfd_mem_dmaunmap_attachment(mem, attachment);
+                       ret = update_gpuvm_pte(mem, attachment, &sync);
                        if (ret) {
                                pr_err("%s: update PTE failed\n", __func__);
                                /* make sure this gets validated again */
@@ -2083,7 +2327,7 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
 
                struct amdgpu_bo *bo = mem->bo;
                uint32_t domain = mem->domain;
-               struct kfd_bo_va_list *bo_va_entry;
+               struct kfd_mem_attachment *attachment;
 
                total_size += amdgpu_bo_size(bo);
 
@@ -2103,12 +2347,12 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
                        pr_debug("Memory eviction: Sync BO fence failed. Try again\n");
                        goto validate_map_fail;
                }
-               list_for_each_entry(bo_va_entry, &mem->bo_va_list,
-                                   bo_list) {
-                       ret = update_gpuvm_pte((struct amdgpu_device *)
-                                             bo_va_entry->kgd_dev,
-                                             bo_va_entry,
-                                             &sync_obj);
+               list_for_each_entry(attachment, &mem->attachments, list) {
+                       if (!attachment->is_mapped)
+                               continue;
+
+                       kfd_mem_dmaunmap_attachment(mem, attachment);
+                       ret = update_gpuvm_pte(mem, attachment, &sync_obj);
                        if (ret) {
                                pr_debug("Memory eviction: update PTE failed. Try again\n");
                                goto validate_map_fail;
@@ -2135,7 +2379,8 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
         */
        new_fence = amdgpu_amdkfd_fence_create(
                                process_info->eviction_fence->base.context,
-                               process_info->eviction_fence->mm);
+                               process_info->eviction_fence->mm,
+                               NULL);
        if (!new_fence) {
                pr_err("Failed to create eviction fence\n");
                ret = -ENOMEM;
@@ -2182,7 +2427,7 @@ int amdgpu_amdkfd_add_gws_to_process(void *info, void *gws, struct kgd_mem **mem
                return -ENOMEM;
 
        mutex_init(&(*mem)->lock);
-       INIT_LIST_HEAD(&(*mem)->bo_va_list);
+       INIT_LIST_HEAD(&(*mem)->attachments);
        (*mem)->bo = amdgpu_bo_ref(gws_bo);
        (*mem)->domain = AMDGPU_GEM_DOMAIN_GWS;
        (*mem)->process_info = process_info;