drm/amdgpu: cleanup amdgpu_ttm_copy_mem_to_mem and amdgpu_map_buffer v2
[linux-2.6-microblaze.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_ttm.c
index c6e9885..7a73282 100644 (file)
 #include "amdgpu_ras.h"
 #include "bif/bif_4_1_d.h"
 
-static int amdgpu_map_buffer(struct ttm_buffer_object *bo,
-                            struct ttm_mem_reg *mem, unsigned num_pages,
-                            uint64_t offset, unsigned window,
-                            struct amdgpu_ring *ring,
-                            uint64_t *addr);
+#define AMDGPU_TTM_VRAM_MAX_DW_READ    (size_t)128
 
-static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev);
-static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev);
-
-static int amdgpu_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
-{
-       return 0;
-}
 
 /**
  * amdgpu_init_mem_type - Initialize a memory manager for a specific type of
@@ -283,7 +272,7 @@ static uint64_t amdgpu_mm_node_addr(struct ttm_buffer_object *bo,
  *
  */
 static struct drm_mm_node *amdgpu_find_mm_node(struct ttm_mem_reg *mem,
-                                              unsigned long *offset)
+                                              uint64_t *offset)
 {
        struct drm_mm_node *mm_node = mem->mm_node;
 
@@ -294,92 +283,170 @@ static struct drm_mm_node *amdgpu_find_mm_node(struct ttm_mem_reg *mem,
        return mm_node;
 }
 
+/**
+ * amdgpu_ttm_map_buffer - Map memory into the GART windows
+ * @bo: buffer object to map
+ * @mem: memory object to map
+ * @mm_node: drm_mm node object to map
+ * @num_pages: number of pages to map
+ * @offset: offset into @mm_node where to start
+ * @window: which GART window to use
+ * @ring: DMA ring to use for the copy
+ * @tmz: if we should setup a TMZ enabled mapping
+ * @addr: resulting address inside the MC address space
+ *
+ * Setup one of the GART windows to access a specific piece of memory or return
+ * the physical address for local memory.
+ */
+static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo,
+                                struct ttm_mem_reg *mem,
+                                struct drm_mm_node *mm_node,
+                                unsigned num_pages, uint64_t offset,
+                                unsigned window, struct amdgpu_ring *ring,
+                                bool tmz, uint64_t *addr)
+{
+       struct ttm_dma_tt *dma = container_of(bo->ttm, struct ttm_dma_tt, ttm);
+       struct amdgpu_device *adev = ring->adev;
+       struct amdgpu_job *job;
+       unsigned num_dw, num_bytes;
+       dma_addr_t *dma_address;
+       struct dma_fence *fence;
+       uint64_t src_addr, dst_addr;
+       uint64_t flags;
+       int r;
+
+       BUG_ON(adev->mman.buffer_funcs->copy_max_bytes <
+              AMDGPU_GTT_MAX_TRANSFER_SIZE * 8);
+
+       /* Map only what can't be accessed directly */
+       if (mem->start != AMDGPU_BO_INVALID_OFFSET) {
+               *addr = amdgpu_mm_node_addr(bo, mm_node, mem) + offset;
+               return 0;
+       }
+
+       *addr = adev->gmc.gart_start;
+       *addr += (u64)window * AMDGPU_GTT_MAX_TRANSFER_SIZE *
+               AMDGPU_GPU_PAGE_SIZE;
+       *addr += offset & ~PAGE_MASK;
+
+       num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8);
+       num_bytes = num_pages * 8;
+
+       r = amdgpu_job_alloc_with_ib(adev, num_dw * 4 + num_bytes,
+                                    AMDGPU_IB_POOL_NORMAL, &job);
+       if (r)
+               return r;
+
+       src_addr = num_dw * 4;
+       src_addr += job->ibs[0].gpu_addr;
+
+       dst_addr = amdgpu_bo_gpu_offset(adev->gart.bo);
+       dst_addr += window * AMDGPU_GTT_MAX_TRANSFER_SIZE * 8;
+       amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr,
+                               dst_addr, num_bytes, false);
+
+       amdgpu_ring_pad_ib(ring, &job->ibs[0]);
+       WARN_ON(job->ibs[0].length_dw > num_dw);
+
+       dma_address = &dma->dma_address[offset >> PAGE_SHIFT];
+       flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, mem);
+       if (tmz)
+               flags |= AMDGPU_PTE_TMZ;
+
+       r = amdgpu_gart_map(adev, 0, num_pages, dma_address, flags,
+                           &job->ibs[0].ptr[num_dw]);
+       if (r)
+               goto error_free;
+
+       r = amdgpu_job_submit(job, &adev->mman.entity,
+                             AMDGPU_FENCE_OWNER_UNDEFINED, &fence);
+       if (r)
+               goto error_free;
+
+       dma_fence_put(fence);
+
+       return r;
+
+error_free:
+       amdgpu_job_free(job);
+       return r;
+}
+
 /**
  * amdgpu_copy_ttm_mem_to_mem - Helper function for copy
+ * @adev: amdgpu device
+ * @src: buffer/address where to read from
+ * @dst: buffer/address where to write to
+ * @size: number of bytes to copy
+ * @tmz: if a secure copy should be used
+ * @resv: resv object to sync to
+ * @f: Returns the last fence if multiple jobs are submitted.
  *
  * The function copies @size bytes from {src->mem + src->offset} to
  * {dst->mem + dst->offset}. src->bo and dst->bo could be same BO for a
  * move and different for a BO to BO copy.
  *
- * @f: Returns the last fence if multiple jobs are submitted.
  */
 int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
-                              struct amdgpu_copy_mem *src,
-                              struct amdgpu_copy_mem *dst,
-                              uint64_t size,
+                              const struct amdgpu_copy_mem *src,
+                              const struct amdgpu_copy_mem *dst,
+                              uint64_t size, bool tmz,
                               struct dma_resv *resv,
                               struct dma_fence **f)
 {
+       const uint32_t GTT_MAX_BYTES = (AMDGPU_GTT_MAX_TRANSFER_SIZE *
+                                       AMDGPU_GPU_PAGE_SIZE);
+
+       uint64_t src_node_size, dst_node_size, src_offset, dst_offset;
        struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
        struct drm_mm_node *src_mm, *dst_mm;
-       uint64_t src_node_start, dst_node_start, src_node_size,
-                dst_node_size, src_page_offset, dst_page_offset;
        struct dma_fence *fence = NULL;
        int r = 0;
-       const uint64_t GTT_MAX_BYTES = (AMDGPU_GTT_MAX_TRANSFER_SIZE *
-                                       AMDGPU_GPU_PAGE_SIZE);
 
        if (!adev->mman.buffer_funcs_enabled) {
                DRM_ERROR("Trying to move memory with ring turned off.\n");
                return -EINVAL;
        }
 
-       src_mm = amdgpu_find_mm_node(src->mem, &src->offset);
-       src_node_start = amdgpu_mm_node_addr(src->bo, src_mm, src->mem) +
-                                            src->offset;
-       src_node_size = (src_mm->size << PAGE_SHIFT) - src->offset;
-       src_page_offset = src_node_start & (PAGE_SIZE - 1);
+       src_offset = src->offset;
+       src_mm = amdgpu_find_mm_node(src->mem, &src_offset);
+       src_node_size = (src_mm->size << PAGE_SHIFT) - src_offset;
 
-       dst_mm = amdgpu_find_mm_node(dst->mem, &dst->offset);
-       dst_node_start = amdgpu_mm_node_addr(dst->bo, dst_mm, dst->mem) +
-                                            dst->offset;
-       dst_node_size = (dst_mm->size << PAGE_SHIFT) - dst->offset;
-       dst_page_offset = dst_node_start & (PAGE_SIZE - 1);
+       dst_offset = dst->offset;
+       dst_mm = amdgpu_find_mm_node(dst->mem, &dst_offset);
+       dst_node_size = (dst_mm->size << PAGE_SHIFT) - dst_offset;
 
        mutex_lock(&adev->mman.gtt_window_lock);
 
        while (size) {
-               unsigned long cur_size;
-               uint64_t from = src_node_start, to = dst_node_start;
+               uint32_t src_page_offset = src_offset & ~PAGE_MASK;
+               uint32_t dst_page_offset = dst_offset & ~PAGE_MASK;
                struct dma_fence *next;
+               uint32_t cur_size;
+               uint64_t from, to;
 
                /* Copy size cannot exceed GTT_MAX_BYTES. So if src or dst
                 * begins at an offset, then adjust the size accordingly
                 */
-               cur_size = min3(min(src_node_size, dst_node_size), size,
-                               GTT_MAX_BYTES);
-               if (cur_size + src_page_offset > GTT_MAX_BYTES ||
-                   cur_size + dst_page_offset > GTT_MAX_BYTES)
-                       cur_size -= max(src_page_offset, dst_page_offset);
-
-               /* Map only what needs to be accessed. Map src to window 0 and
-                * dst to window 1
-                */
-               if (src->mem->start == AMDGPU_BO_INVALID_OFFSET) {
-                       r = amdgpu_map_buffer(src->bo, src->mem,
-                                       PFN_UP(cur_size + src_page_offset),
-                                       src_node_start, 0, ring,
-                                       &from);
-                       if (r)
-                               goto error;
-                       /* Adjust the offset because amdgpu_map_buffer returns
-                        * start of mapped page
-                        */
-                       from += src_page_offset;
-               }
+               cur_size = min3(src_node_size, dst_node_size, size);
+               cur_size = min(GTT_MAX_BYTES - src_page_offset, cur_size);
+               cur_size = min(GTT_MAX_BYTES - dst_page_offset, cur_size);
+
+               /* Map src to window 0 and dst to window 1. */
+               r = amdgpu_ttm_map_buffer(src->bo, src->mem, src_mm,
+                                         PFN_UP(cur_size + src_page_offset),
+                                         src_offset, 0, ring, tmz, &from);
+               if (r)
+                       goto error;
 
-               if (dst->mem->start == AMDGPU_BO_INVALID_OFFSET) {
-                       r = amdgpu_map_buffer(dst->bo, dst->mem,
-                                       PFN_UP(cur_size + dst_page_offset),
-                                       dst_node_start, 1, ring,
-                                       &to);
-                       if (r)
-                               goto error;
-                       to += dst_page_offset;
-               }
+               r = amdgpu_ttm_map_buffer(dst->bo, dst->mem, dst_mm,
+                                         PFN_UP(cur_size + dst_page_offset),
+                                         dst_offset, 1, ring, tmz, &to);
+               if (r)
+                       goto error;
 
                r = amdgpu_copy_buffer(ring, from, to, cur_size,
-                                      resv, &next, false, true);
+                                      resv, &next, false, true, tmz);
                if (r)
                        goto error;
 
@@ -392,23 +459,20 @@ int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
 
                src_node_size -= cur_size;
                if (!src_node_size) {
-                       src_node_start = amdgpu_mm_node_addr(src->bo, ++src_mm,
-                                                            src->mem);
-                       src_node_size = (src_mm->size << PAGE_SHIFT);
-                       src_page_offset = 0;
+                       ++src_mm;
+                       src_node_size = src_mm->size << PAGE_SHIFT;
+                       src_offset = 0;
                } else {
-                       src_node_start += cur_size;
-                       src_page_offset = src_node_start & (PAGE_SIZE - 1);
+                       src_offset += cur_size;
                }
+
                dst_node_size -= cur_size;
                if (!dst_node_size) {
-                       dst_node_start = amdgpu_mm_node_addr(dst->bo, ++dst_mm,
-                                                            dst->mem);
-                       dst_node_size = (dst_mm->size << PAGE_SHIFT);
-                       dst_page_offset = 0;
+                       ++dst_mm;
+                       dst_node_size = dst_mm->size << PAGE_SHIFT;
+                       dst_offset = 0;
                } else {
-                       dst_node_start += cur_size;
-                       dst_page_offset = dst_node_start & (PAGE_SIZE - 1);
+                       dst_offset += cur_size;
                }
        }
 error:
@@ -431,6 +495,7 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
                            struct ttm_mem_reg *old_mem)
 {
        struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
+       struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
        struct amdgpu_copy_mem src, dst;
        struct dma_fence *fence = NULL;
        int r;
@@ -444,14 +509,14 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
 
        r = amdgpu_ttm_copy_mem_to_mem(adev, &src, &dst,
                                       new_mem->num_pages << PAGE_SHIFT,
+                                      amdgpu_bo_encrypted(abo),
                                       bo->base.resv, &fence);
        if (r)
                goto error;
 
        /* clear the space being freed */
        if (old_mem->mem_type == TTM_PL_VRAM &&
-           (ttm_to_amdgpu_bo(bo)->flags &
-            AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE)) {
+           (abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE)) {
                struct dma_fence *wipe_fence = NULL;
 
                r = amdgpu_fill_buffer(ttm_to_amdgpu_bo(bo), AMDGPU_POISON,
@@ -748,8 +813,8 @@ static void amdgpu_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_re
 static unsigned long amdgpu_ttm_io_mem_pfn(struct ttm_buffer_object *bo,
                                           unsigned long page_offset)
 {
+       uint64_t offset = (page_offset << PAGE_SHIFT);
        struct drm_mm_node *mm;
-       unsigned long offset = (page_offset << PAGE_SHIFT);
 
        mm = amdgpu_find_mm_node(&bo->mem, &offset);
        return (bo->mem.bus.base >> PAGE_SHIFT) + mm->start +
@@ -1034,7 +1099,10 @@ int amdgpu_ttm_gart_bind(struct amdgpu_device *adev,
        struct amdgpu_ttm_tt *gtt = (void *)ttm;
        int r;
 
-       if (abo->flags & AMDGPU_GEM_CREATE_MQD_GFX9) {
+       if (amdgpu_bo_encrypted(abo))
+               flags |= AMDGPU_PTE_TMZ;
+
+       if (abo->flags & AMDGPU_GEM_CREATE_CP_MQD_GFX9) {
                uint64_t page_idx = 1;
 
                r = amdgpu_gart_bind(adev, gtt->offset, page_idx,
@@ -1042,7 +1110,10 @@ int amdgpu_ttm_gart_bind(struct amdgpu_device *adev,
                if (r)
                        goto gart_bind_fail;
 
-               /* Patch mtype of the second part BO */
+               /* The memory type of the first page defaults to UC. Now
+                * modify the memory type to NC from the second page of
+                * the BO onward.
+                */
                flags &= ~AMDGPU_PTE_MTYPE_VG10_MASK;
                flags |= AMDGPU_PTE_MTYPE_VG10(AMDGPU_MTYPE_NC);
 
@@ -1543,6 +1614,9 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
 
        switch (bo->mem.mem_type) {
        case TTM_PL_TT:
+               if (amdgpu_bo_is_amdgpu_bo(bo) &&
+                   amdgpu_bo_encrypted(ttm_to_amdgpu_bo(bo)))
+                       return false;
                return true;
 
        case TTM_PL_VRAM:
@@ -1591,12 +1665,13 @@ static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo,
        if (bo->mem.mem_type != TTM_PL_VRAM)
                return -EIO;
 
-       nodes = amdgpu_find_mm_node(&abo->tbo.mem, &offset);
-       pos = (nodes->start << PAGE_SHIFT) + offset;
+       pos = offset;
+       nodes = amdgpu_find_mm_node(&abo->tbo.mem, &pos);
+       pos += (nodes->start << PAGE_SHIFT);
 
        while (len && pos < adev->gmc.mc_vram_size) {
                uint64_t aligned_pos = pos & ~(uint64_t)3;
-               uint32_t bytes = 4 - (pos & 3);
+               uint64_t bytes = 4 - (pos & 3);
                uint32_t shift = (pos & 3) * 8;
                uint32_t mask = 0xffffffff << shift;
 
@@ -1605,20 +1680,28 @@ static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo,
                        bytes = len;
                }
 
-               spin_lock_irqsave(&adev->mmio_idx_lock, flags);
-               WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)aligned_pos) | 0x80000000);
-               WREG32_NO_KIQ(mmMM_INDEX_HI, aligned_pos >> 31);
-               if (!write || mask != 0xffffffff)
-                       value = RREG32_NO_KIQ(mmMM_DATA);
-               if (write) {
-                       value &= ~mask;
-                       value |= (*(uint32_t *)buf << shift) & mask;
-                       WREG32_NO_KIQ(mmMM_DATA, value);
-               }
-               spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
-               if (!write) {
-                       value = (value & mask) >> shift;
-                       memcpy(buf, &value, bytes);
+               if (mask != 0xffffffff) {
+                       spin_lock_irqsave(&adev->mmio_idx_lock, flags);
+                       WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)aligned_pos) | 0x80000000);
+                       WREG32_NO_KIQ(mmMM_INDEX_HI, aligned_pos >> 31);
+                       if (!write || mask != 0xffffffff)
+                               value = RREG32_NO_KIQ(mmMM_DATA);
+                       if (write) {
+                               value &= ~mask;
+                               value |= (*(uint32_t *)buf << shift) & mask;
+                               WREG32_NO_KIQ(mmMM_DATA, value);
+                       }
+                       spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
+                       if (!write) {
+                               value = (value & mask) >> shift;
+                               memcpy(buf, &value, bytes);
+                       }
+               } else {
+                       bytes = (nodes->start + nodes->size) << PAGE_SHIFT;
+                       bytes = min(bytes - pos, (uint64_t)len & ~0x3ull);
+
+                       amdgpu_device_vram_access(adev, pos, (uint32_t *)buf,
+                                                 bytes, write);
                }
 
                ret += bytes;
@@ -1638,7 +1721,6 @@ static struct ttm_bo_driver amdgpu_bo_driver = {
        .ttm_tt_create = &amdgpu_ttm_tt_create,
        .ttm_tt_populate = &amdgpu_ttm_tt_populate,
        .ttm_tt_unpopulate = &amdgpu_ttm_tt_unpopulate,
-       .invalidate_caches = &amdgpu_invalidate_caches,
        .init_mem_type = &amdgpu_init_mem_type,
        .eviction_valuable = amdgpu_ttm_bo_eviction_valuable,
        .evict_flags = &amdgpu_evict_flags,
@@ -1836,9 +1918,11 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
         *The reserved vram for memory training must be pinned to the specified
         *place on the VRAM, so reserve it early.
         */
-       r = amdgpu_ttm_training_reserve_vram_init(adev);
-       if (r)
-               return r;
+       if (!amdgpu_sriov_vf(adev)) {
+               r = amdgpu_ttm_training_reserve_vram_init(adev);
+               if (r)
+                       return r;
+       }
 
        /* allocate memory as required for VGA
         * This is used for VGA emulation and pre-OS scanout buffers to
@@ -1911,12 +1995,6 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
                return r;
        }
 
-       /* Register debugfs entries for amdgpu_ttm */
-       r = amdgpu_ttm_debugfs_init(adev);
-       if (r) {
-               DRM_ERROR("Failed to init debugfs\n");
-               return r;
-       }
        return 0;
 }
 
@@ -1938,7 +2016,6 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev)
        if (!adev->mman.initialized)
                return;
 
-       amdgpu_ttm_debugfs_fini(adev);
        amdgpu_ttm_training_reserve_vram_fini(adev);
        /* return the IP Discovery TMR memory back to VRAM */
        amdgpu_bo_free_kernel(&adev->discovery_memory, NULL, NULL);
@@ -2017,74 +2094,11 @@ int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma)
        return ttm_bo_mmap(filp, vma, &adev->mman.bdev);
 }
 
-static int amdgpu_map_buffer(struct ttm_buffer_object *bo,
-                            struct ttm_mem_reg *mem, unsigned num_pages,
-                            uint64_t offset, unsigned window,
-                            struct amdgpu_ring *ring,
-                            uint64_t *addr)
-{
-       struct amdgpu_ttm_tt *gtt = (void *)bo->ttm;
-       struct amdgpu_device *adev = ring->adev;
-       struct ttm_tt *ttm = bo->ttm;
-       struct amdgpu_job *job;
-       unsigned num_dw, num_bytes;
-       dma_addr_t *dma_address;
-       struct dma_fence *fence;
-       uint64_t src_addr, dst_addr;
-       uint64_t flags;
-       int r;
-
-       BUG_ON(adev->mman.buffer_funcs->copy_max_bytes <
-              AMDGPU_GTT_MAX_TRANSFER_SIZE * 8);
-
-       *addr = adev->gmc.gart_start;
-       *addr += (u64)window * AMDGPU_GTT_MAX_TRANSFER_SIZE *
-               AMDGPU_GPU_PAGE_SIZE;
-
-       num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8);
-       num_bytes = num_pages * 8;
-
-       r = amdgpu_job_alloc_with_ib(adev, num_dw * 4 + num_bytes, &job);
-       if (r)
-               return r;
-
-       src_addr = num_dw * 4;
-       src_addr += job->ibs[0].gpu_addr;
-
-       dst_addr = amdgpu_bo_gpu_offset(adev->gart.bo);
-       dst_addr += window * AMDGPU_GTT_MAX_TRANSFER_SIZE * 8;
-       amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr,
-                               dst_addr, num_bytes);
-
-       amdgpu_ring_pad_ib(ring, &job->ibs[0]);
-       WARN_ON(job->ibs[0].length_dw > num_dw);
-
-       dma_address = &gtt->ttm.dma_address[offset >> PAGE_SHIFT];
-       flags = amdgpu_ttm_tt_pte_flags(adev, ttm, mem);
-       r = amdgpu_gart_map(adev, 0, num_pages, dma_address, flags,
-                           &job->ibs[0].ptr[num_dw]);
-       if (r)
-               goto error_free;
-
-       r = amdgpu_job_submit(job, &adev->mman.entity,
-                             AMDGPU_FENCE_OWNER_UNDEFINED, &fence);
-       if (r)
-               goto error_free;
-
-       dma_fence_put(fence);
-
-       return r;
-
-error_free:
-       amdgpu_job_free(job);
-       return r;
-}
-
 int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
                       uint64_t dst_offset, uint32_t byte_count,
                       struct dma_resv *resv,
                       struct dma_fence **fence, bool direct_submit,
-                      bool vm_needs_flush)
+                      bool vm_needs_flush, bool tmz)
 {
        struct amdgpu_device *adev = ring->adev;
        struct amdgpu_job *job;
@@ -2103,7 +2117,8 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
        num_loops = DIV_ROUND_UP(byte_count, max_bytes);
        num_dw = ALIGN(num_loops * adev->mman.buffer_funcs->copy_num_dw, 8);
 
-       r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, &job);
+       r = amdgpu_job_alloc_with_ib(adev, num_dw * 4,
+                       direct_submit ? AMDGPU_IB_POOL_DIRECT : AMDGPU_IB_POOL_NORMAL, &job);
        if (r)
                return r;
 
@@ -2113,8 +2128,8 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
        }
        if (resv) {
                r = amdgpu_sync_resv(adev, &job->sync, resv,
-                                    AMDGPU_FENCE_OWNER_UNDEFINED,
-                                    false);
+                                    AMDGPU_SYNC_ALWAYS,
+                                    AMDGPU_FENCE_OWNER_UNDEFINED);
                if (r) {
                        DRM_ERROR("sync failed (%d).\n", r);
                        goto error_free;
@@ -2125,7 +2140,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
                uint32_t cur_size_in_bytes = min(byte_count, max_bytes);
 
                amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_offset,
-                                       dst_offset, cur_size_in_bytes);
+                                       dst_offset, cur_size_in_bytes, tmz);
 
                src_offset += cur_size_in_bytes;
                dst_offset += cur_size_in_bytes;
@@ -2192,13 +2207,14 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
        /* for IB padding */
        num_dw += 64;
 
-       r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, &job);
+       r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, AMDGPU_IB_POOL_NORMAL, &job);
        if (r)
                return r;
 
        if (resv) {
                r = amdgpu_sync_resv(adev, &job->sync, resv,
-                                    AMDGPU_FENCE_OWNER_UNDEFINED, false);
+                                    AMDGPU_SYNC_ALWAYS,
+                                    AMDGPU_FENCE_OWNER_UNDEFINED);
                if (r) {
                        DRM_ERROR("sync failed (%d).\n", r);
                        goto error_free;
@@ -2279,7 +2295,6 @@ static ssize_t amdgpu_ttm_vram_read(struct file *f, char __user *buf,
 {
        struct amdgpu_device *adev = file_inode(f)->i_private;
        ssize_t result = 0;
-       int r;
 
        if (size & 0x3 || *pos & 0x3)
                return -EINVAL;
@@ -2287,27 +2302,19 @@ static ssize_t amdgpu_ttm_vram_read(struct file *f, char __user *buf,
        if (*pos >= adev->gmc.mc_vram_size)
                return -ENXIO;
 
+       size = min(size, (size_t)(adev->gmc.mc_vram_size - *pos));
        while (size) {
-               unsigned long flags;
-               uint32_t value;
+               size_t bytes = min(size, AMDGPU_TTM_VRAM_MAX_DW_READ * 4);
+               uint32_t value[AMDGPU_TTM_VRAM_MAX_DW_READ];
 
-               if (*pos >= adev->gmc.mc_vram_size)
-                       return result;
-
-               spin_lock_irqsave(&adev->mmio_idx_lock, flags);
-               WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)*pos) | 0x80000000);
-               WREG32_NO_KIQ(mmMM_INDEX_HI, *pos >> 31);
-               value = RREG32_NO_KIQ(mmMM_DATA);
-               spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
-
-               r = put_user(value, (uint32_t *)buf);
-               if (r)
-                       return r;
+               amdgpu_device_vram_access(adev, *pos, value, bytes, false);
+               if (copy_to_user(buf, value, bytes))
+                       return -EFAULT;
 
-               result += 4;
-               buf += 4;
-               *pos += 4;
-               size -= 4;
+               result += bytes;
+               buf += bytes;
+               *pos += bytes;
+               size -= bytes;
        }
 
        return result;
@@ -2544,7 +2551,7 @@ static const struct {
 
 #endif
 
-static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev)
+int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev)
 {
 #if defined(CONFIG_DEBUG_FS)
        unsigned count;
@@ -2579,13 +2586,3 @@ static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev)
        return 0;
 #endif
 }
-
-static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev)
-{
-#if defined(CONFIG_DEBUG_FS)
-       unsigned i;
-
-       for (i = 0; i < ARRAY_SIZE(ttm_debugfs_entries); i++)
-               debugfs_remove(adev->mman.debugfs_entries[i]);
-#endif
-}