Merge tag 'drm-fixes-for-v4.14-rc1' of git://people.freedesktop.org/~airlied/linux
authorLinus Torvalds <torvalds@linux-foundation.org>
Sat, 16 Sep 2017 00:52:52 +0000 (17:52 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sat, 16 Sep 2017 00:52:52 +0000 (17:52 -0700)
Pull drm AMD fixes from Dave Airlie:
 "Just had a single AMD fixes pull from Alex for rc1"

* tag 'drm-fixes-for-v4.14-rc1' of git://people.freedesktop.org/~airlied/linux:
  drm/amdgpu: revert "fix deadlock of reservation between cs and gpu reset v2"
  drm/amdgpu: remove duplicate return statement
  drm/amdgpu: check memory allocation failure
  drm/amd/amdgpu: fix BANK_SELECT on Vega10 (v2)
  drm/amdgpu: inline amdgpu_ttm_do_bind again
  drm/amdgpu: fix amdgpu_ttm_bind
  drm/amdgpu: remove the GART copy hack
  drm/ttm:fix wrong decoding of bo_count
  drm/ttm: fix missing inc bo_count
  drm/amdgpu: set sched_hw_submission higher for KIQ (v3)
  drm/amdgpu: move default gart size setting into gmc modules
  drm/amdgpu: refine default gart size
  drm/amd/powerplay: ACG frequency added in PPTable
  drm/amdgpu: discard commands of killed processes
  drm/amdgpu: fix and cleanup shadow handling
  drm/amdgpu: add automatic per asic settings for gart_size
  drm/amdgpu/gfx8: fix spelling typo in mqd allocation
  drm/amd/powerplay: unhalt mec after loading
  drm/amdgpu/virtual_dce: Virtual display doesn't support disable vblank immediately
  drm/amdgpu: Fix huge page updates with CPU

1  2 
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c

@@@ -165,14 -165,6 +165,6 @@@ static int amdgpu_vm_validate_level(str
        unsigned i;
        int r;
  
-       if (parent->bo->shadow) {
-               struct amdgpu_bo *shadow = parent->bo->shadow;
-               r = amdgpu_ttm_bind(&shadow->tbo, &shadow->tbo.mem);
-               if (r)
-                       return r;
-       }
        if (use_cpu_for_update) {
                r = amdgpu_bo_kmap(parent->bo, NULL);
                if (r)
@@@ -1277,7 -1269,7 +1269,7 @@@ static void amdgpu_vm_handle_huge_pages
        /* In the case of a mixed PT the PDE must point to it*/
        if (p->adev->asic_type < CHIP_VEGA10 ||
            nptes != AMDGPU_VM_PTE_COUNT(p->adev) ||
-           p->func == amdgpu_vm_do_copy_ptes ||
+           p->src ||
            !(flags & AMDGPU_PTE_VALID)) {
  
                dst = amdgpu_bo_gpu_offset(entry->bo);
        entry->addr = (dst | flags);
  
        if (use_cpu_update) {
+               /* In case a huge page is replaced with a system
+                * memory mapping, p->pages_addr != NULL and
+                * amdgpu_vm_cpu_set_ptes would try to translate dst
+                * through amdgpu_vm_map_gart. But dst is already a
+                * GPU address (of the page table). Disable
+                * amdgpu_vm_map_gart temporarily.
+                */
+               dma_addr_t *tmp;
+               tmp = p->pages_addr;
+               p->pages_addr = NULL;
                pd_addr = (unsigned long)amdgpu_bo_kptr(parent->bo);
                pde = pd_addr + (entry - parent->entries) * 8;
                amdgpu_vm_cpu_set_ptes(p, pde, dst, 1, 0, flags);
+               p->pages_addr = tmp;
        } else {
                if (parent->bo->shadow) {
                        pd_addr = amdgpu_bo_gpu_offset(parent->bo->shadow);
@@@ -1610,7 -1616,6 +1616,6 @@@ error_free
   *
   * @adev: amdgpu_device pointer
   * @exclusive: fence we need to sync to
-  * @gtt_flags: flags as they are used for GTT
   * @pages_addr: DMA addresses to use for mapping
   * @vm: requested vm
   * @mapping: mapped range and flags to use for the update
   */
  static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
                                      struct dma_fence *exclusive,
-                                     uint64_t gtt_flags,
                                      dma_addr_t *pages_addr,
                                      struct amdgpu_vm *vm,
                                      struct amdgpu_bo_va_mapping *mapping,
                }
  
                if (pages_addr) {
-                       if (flags == gtt_flags)
-                               src = adev->gart.table_addr +
-                                       (addr >> AMDGPU_GPU_PAGE_SHIFT) * 8;
-                       else
-                               max_entries = min(max_entries, 16ull * 1024ull);
+                       max_entries = min(max_entries, 16ull * 1024ull);
                        addr = 0;
                } else if (flags & AMDGPU_PTE_VALID) {
                        addr += adev->vm_manager.vram_base_offset;
@@@ -1728,10 -1728,10 +1728,10 @@@ int amdgpu_vm_bo_update(struct amdgpu_d
        struct amdgpu_vm *vm = bo_va->base.vm;
        struct amdgpu_bo_va_mapping *mapping;
        dma_addr_t *pages_addr = NULL;
-       uint64_t gtt_flags, flags;
        struct ttm_mem_reg *mem;
        struct drm_mm_node *nodes;
        struct dma_fence *exclusive;
+       uint64_t flags;
        int r;
  
        if (clear || !bo_va->base.bo) {
                exclusive = reservation_object_get_excl(bo->tbo.resv);
        }
  
-       if (bo) {
+       if (bo)
                flags = amdgpu_ttm_tt_pte_flags(adev, bo->tbo.ttm, mem);
-               gtt_flags = (amdgpu_ttm_is_bound(bo->tbo.ttm) &&
-                       adev == amdgpu_ttm_adev(bo->tbo.bdev)) ?
-                       flags : 0;
-       } else {
+       else
                flags = 0x0;
-               gtt_flags = ~0x0;
-       }
  
        spin_lock(&vm->status_lock);
        if (!list_empty(&bo_va->base.vm_status))
        spin_unlock(&vm->status_lock);
  
        list_for_each_entry(mapping, &bo_va->invalids, list) {
-               r = amdgpu_vm_bo_split_mapping(adev, exclusive,
-                                              gtt_flags, pages_addr, vm,
+               r = amdgpu_vm_bo_split_mapping(adev, exclusive, pages_addr, vm,
                                               mapping, flags, nodes,
                                               &bo_va->last_pt_update);
                if (r)
@@@ -2475,7 -2469,7 +2469,7 @@@ int amdgpu_vm_init(struct amdgpu_devic
        u64 flags;
        uint64_t init_pde_value = 0;
  
 -      vm->va = RB_ROOT;
 +      vm->va = RB_ROOT_CACHED;
        vm->client_id = atomic64_inc_return(&adev->vm_manager.client_counter);
        for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
                vm->reserved_vmid[i] = NULL;
@@@ -2596,11 -2590,10 +2590,11 @@@ void amdgpu_vm_fini(struct amdgpu_devic
  
        amd_sched_entity_fini(vm->entity.sched, &vm->entity);
  
 -      if (!RB_EMPTY_ROOT(&vm->va)) {
 +      if (!RB_EMPTY_ROOT(&vm->va.rb_root)) {
                dev_err(adev->dev, "still active bo inside vm\n");
        }
 -      rbtree_postorder_for_each_entry_safe(mapping, tmp, &vm->va, rb) {
 +      rbtree_postorder_for_each_entry_safe(mapping, tmp,
 +                                           &vm->va.rb_root, rb) {
                list_del(&mapping->list);
                amdgpu_vm_it_remove(mapping, &vm->va);
                kfree(mapping);