drm/ttm: make ttm_range_man_init/takedown take type + args
[linux-2.6-microblaze.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_ttm.c
index e59c01a..5556958 100644 (file)
 
 #define AMDGPU_TTM_VRAM_MAX_DW_READ    (size_t)128
 
-
-/**
- * amdgpu_init_mem_type - Initialize a memory manager for a specific type of
- * memory request.
- *
- * @bdev: The TTM BO device object (contains a reference to amdgpu_device)
- * @type: The type of memory requested
- * @man: The memory type manager for each domain
- *
- * This is called by ttm_bo_init_mm() when a buffer object is being
- * initialized.
- */
-static int amdgpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
-                               struct ttm_mem_type_manager *man)
+static int amdgpu_ttm_init_on_chip(struct amdgpu_device *adev,
+                                   unsigned int type,
+                                   uint64_t size)
 {
-       struct amdgpu_device *adev;
-
-       adev = amdgpu_ttm_adev(bdev);
-
-       switch (type) {
-       case TTM_PL_SYSTEM:
-               /* System memory */
-               man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
-               man->available_caching = TTM_PL_MASK_CACHING;
-               man->default_caching = TTM_PL_FLAG_CACHED;
-               break;
-       case TTM_PL_TT:
-               /* GTT memory  */
-               man->func = &amdgpu_gtt_mgr_func;
-               man->gpu_offset = adev->gmc.gart_start;
-               man->available_caching = TTM_PL_MASK_CACHING;
-               man->default_caching = TTM_PL_FLAG_CACHED;
-               man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA;
-               break;
-       case TTM_PL_VRAM:
-               /* "On-card" video ram */
-               man->func = &amdgpu_vram_mgr_func;
-               man->gpu_offset = adev->gmc.vram_start;
-               man->flags = TTM_MEMTYPE_FLAG_FIXED |
-                            TTM_MEMTYPE_FLAG_MAPPABLE;
-               man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
-               man->default_caching = TTM_PL_FLAG_WC;
-               break;
-       case AMDGPU_PL_GDS:
-       case AMDGPU_PL_GWS:
-       case AMDGPU_PL_OA:
-               /* On-chip GDS memory*/
-               man->func = &ttm_bo_manager_func;
-               man->gpu_offset = 0;
-               man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_CMA;
-               man->available_caching = TTM_PL_FLAG_UNCACHED;
-               man->default_caching = TTM_PL_FLAG_UNCACHED;
-               break;
-       default:
-               DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
-               return -EINVAL;
-       }
-       return 0;
+       return ttm_range_man_init(&adev->mman.bdev, type,
+                                 TTM_PL_FLAG_UNCACHED, TTM_PL_FLAG_UNCACHED,
+                                 false, size >> PAGE_SHIFT);
 }
 
 /**
@@ -258,7 +207,8 @@ static uint64_t amdgpu_mm_node_addr(struct ttm_buffer_object *bo,
 
        if (mm_node->start != AMDGPU_BO_INVALID_OFFSET) {
                addr = mm_node->start << PAGE_SHIFT;
-               addr += bo->bdev->man[mem->mem_type].gpu_offset;
+               addr += amdgpu_ttm_domain_start(amdgpu_ttm_adev(bo->bdev),
+                                               mem->mem_type);
        }
        return addr;
 }
@@ -431,12 +381,22 @@ int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
        }
 
        src_offset = src->offset;
-       src_mm = amdgpu_find_mm_node(src->mem, &src_offset);
-       src_node_size = (src_mm->size << PAGE_SHIFT) - src_offset;
+       if (src->mem->mm_node) {
+               src_mm = amdgpu_find_mm_node(src->mem, &src_offset);
+               src_node_size = (src_mm->size << PAGE_SHIFT) - src_offset;
+       } else {
+               src_mm = NULL;
+               src_node_size = ULLONG_MAX;
+       }
 
        dst_offset = dst->offset;
-       dst_mm = amdgpu_find_mm_node(dst->mem, &dst_offset);
-       dst_node_size = (dst_mm->size << PAGE_SHIFT) - dst_offset;
+       if (dst->mem->mm_node) {
+               dst_mm = amdgpu_find_mm_node(dst->mem, &dst_offset);
+               dst_node_size = (dst_mm->size << PAGE_SHIFT) - dst_offset;
+       } else {
+               dst_mm = NULL;
+               dst_node_size = ULLONG_MAX;
+       }
 
        mutex_lock(&adev->mman.gtt_window_lock);
 
@@ -788,7 +748,6 @@ memcpy:
  */
 static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
 {
-       struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
        struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
        struct drm_mm_node *mm_node = mem->mm_node;
 
@@ -797,8 +756,7 @@ static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_
        mem->bus.size = mem->num_pages << PAGE_SHIFT;
        mem->bus.base = 0;
        mem->bus.is_iomem = false;
-       if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
-               return -EINVAL;
+
        switch (mem->mem_type) {
        case TTM_PL_SYSTEM:
                /* system memory */
@@ -828,10 +786,6 @@ static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_
        return 0;
 }
 
-static void amdgpu_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
-{
-}
-
 static unsigned long amdgpu_ttm_io_mem_pfn(struct ttm_buffer_object *bo,
                                           unsigned long page_offset)
 {
@@ -843,6 +797,27 @@ static unsigned long amdgpu_ttm_io_mem_pfn(struct ttm_buffer_object *bo,
                (offset >> PAGE_SHIFT);
 }
 
+/**
+ * amdgpu_ttm_domain_start - Returns GPU start address
+ * @adev: amdgpu device object
+ * @type: type of the memory
+ *
+ * Returns:
+ * GPU start address of a memory domain
+ */
+
+uint64_t amdgpu_ttm_domain_start(struct amdgpu_device *adev, uint32_t type)
+{
+       switch (type) {
+       case TTM_PL_TT:
+               return adev->gmc.gart_start;
+       case TTM_PL_VRAM:
+               return adev->gmc.vram_start;
+       }
+
+       return 0;
+}
+
 /*
  * TTM backend functions.
  */
@@ -1024,7 +999,6 @@ static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt *ttm)
 {
        struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
        struct amdgpu_ttm_tt *gtt = (void *)ttm;
-       unsigned nents;
        int r;
 
        int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
@@ -1039,9 +1013,8 @@ static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt *ttm)
                goto release_sg;
 
        /* Map SG to device */
-       r = -ENOMEM;
-       nents = dma_map_sg(adev->dev, ttm->sg->sgl, ttm->sg->nents, direction);
-       if (nents == 0)
+       r = dma_map_sgtable(adev->dev, ttm->sg, direction, 0);
+       if (r)
                goto release_sg;
 
        /* convert SG to linear array of pages and dma addresses */
@@ -1072,8 +1045,7 @@ static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
                return;
 
        /* unmap the pages mapped to the device */
-       dma_unmap_sg(adev->dev, ttm->sg->sgl, ttm->sg->nents, direction);
-
+       dma_unmap_sgtable(adev->dev, ttm->sg, direction, 0);
        sg_free_table(ttm->sg);
 
 #if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR)
@@ -1239,9 +1211,6 @@ int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
                bo->mem = tmp;
        }
 
-       bo->offset = (bo->mem.start << PAGE_SHIFT) +
-               bo->bdev->man[bo->mem.mem_type].gpu_offset;
-
        return 0;
 }
 
@@ -1722,7 +1691,6 @@ static struct ttm_bo_driver amdgpu_bo_driver = {
        .ttm_tt_create = &amdgpu_ttm_tt_create,
        .ttm_tt_populate = &amdgpu_ttm_tt_populate,
        .ttm_tt_unpopulate = &amdgpu_ttm_tt_unpopulate,
-       .init_mem_type = &amdgpu_init_mem_type,
        .eviction_valuable = amdgpu_ttm_bo_eviction_valuable,
        .evict_flags = &amdgpu_evict_flags,
        .move = &amdgpu_bo_move,
@@ -1731,7 +1699,6 @@ static struct ttm_bo_driver amdgpu_bo_driver = {
        .release_notify = &amdgpu_bo_release_notify,
        .fault_reserve_notify = &amdgpu_bo_fault_reserve_notify,
        .io_mem_reserve = &amdgpu_ttm_io_mem_reserve,
-       .io_mem_free = &amdgpu_ttm_io_mem_free,
        .io_mem_pfn = amdgpu_ttm_io_mem_pfn,
        .access_memory = &amdgpu_ttm_access_memory,
        .del_from_lru_notify = &amdgpu_vm_del_from_lru_notify
@@ -1886,8 +1853,7 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
        adev->mman.bdev.no_retry = true;
 
        /* Initialize VRAM pool with all of VRAM divided into pages */
-       r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_VRAM,
-                               adev->gmc.real_vram_size >> PAGE_SHIFT);
+       r = amdgpu_vram_mgr_init(adev);
        if (r) {
                DRM_ERROR("Failed initializing VRAM heap.\n");
                return r;
@@ -1968,7 +1934,7 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
                gtt_size = (uint64_t)amdgpu_gtt_size << 20;
 
        /* Initialize GTT memory pool */
-       r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_TT, gtt_size >> PAGE_SHIFT);
+       r = amdgpu_gtt_mgr_init(adev, gtt_size);
        if (r) {
                DRM_ERROR("Failed initializing GTT heap.\n");
                return r;
@@ -1977,22 +1943,19 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
                 (unsigned)(gtt_size / (1024 * 1024)));
 
        /* Initialize various on-chip memory pools */
-       r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_GDS,
-                          adev->gds.gds_size);
+       r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_GDS, adev->gds.gds_size);
        if (r) {
                DRM_ERROR("Failed initializing GDS heap.\n");
                return r;
        }
 
-       r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_GWS,
-                          adev->gds.gws_size);
+       r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_GWS, adev->gds.gws_size);
        if (r) {
                DRM_ERROR("Failed initializing gws heap.\n");
                return r;
        }
 
-       r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_OA,
-                          adev->gds.oa_size);
+       r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_OA, adev->gds.oa_size);
        if (r) {
                DRM_ERROR("Failed initializing oa heap.\n");
                return r;
@@ -2028,11 +1991,11 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev)
                iounmap(adev->mman.aper_base_kaddr);
        adev->mman.aper_base_kaddr = NULL;
 
-       ttm_bo_clean_mm(&adev->mman.bdev, TTM_PL_VRAM);
-       ttm_bo_clean_mm(&adev->mman.bdev, TTM_PL_TT);
-       ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_GDS);
-       ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_GWS);
-       ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_OA);
+       amdgpu_vram_mgr_fini(adev);
+       amdgpu_gtt_mgr_fini(adev);
+       ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_GDS);
+       ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_GWS);
+       ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_OA);
        ttm_bo_device_release(&adev->mman.bdev);
        adev->mman.initialized = false;
        DRM_INFO("amdgpu: ttm finalized\n");
@@ -2049,7 +2012,7 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev)
  */
 void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
 {
-       struct ttm_mem_type_manager *man = &adev->mman.bdev.man[TTM_PL_VRAM];
+       struct ttm_mem_type_manager *man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
        uint64_t size;
        int r;
 
@@ -2271,7 +2234,7 @@ static int amdgpu_mm_dump_table(struct seq_file *m, void *data)
        unsigned ttm_pl = (uintptr_t)node->info_ent->data;
        struct drm_device *dev = node->minor->dev;
        struct amdgpu_device *adev = dev->dev_private;
-       struct ttm_mem_type_manager *man = &adev->mman.bdev.man[ttm_pl];
+       struct ttm_mem_type_manager *man = ttm_manager_type(&adev->mman.bdev, ttm_pl);
        struct drm_printer p = drm_seq_file_printer(m);
 
        man->func->debug(man, &p);