drm/vmwgfx: switch the TTM backends to self alloc
authorChristian König <christian.koenig@amd.com>
Mon, 26 Apr 2021 15:03:34 +0000 (17:03 +0200)
committerChristian König <christian.koenig@amd.com>
Fri, 4 Jun 2021 13:16:46 +0000 (15:16 +0200)
Similar to the TTM range manager.

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20210602100914.46246-9-christian.koenig@amd.com
drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
drivers/gpu/drm/vmwgfx/vmwgfx_thp.c

index 1774960..82a5e64 100644 (file)
@@ -57,6 +57,12 @@ static int vmw_gmrid_man_get_node(struct ttm_resource_manager *man,
        struct vmwgfx_gmrid_man *gman = to_gmrid_manager(man);
        int id;
 
+       mem->mm_node = kmalloc(sizeof(*mem), GFP_KERNEL);
+       if (!mem->mm_node)
+               return -ENOMEM;
+
+       ttm_resource_init(bo, place, mem->mm_node);
+
        id = ida_alloc_max(&gman->gmr_ida, gman->max_gmr_ids - 1, GFP_KERNEL);
        if (id < 0)
                return id;
@@ -87,13 +93,11 @@ static void vmw_gmrid_man_put_node(struct ttm_resource_manager *man,
 {
        struct vmwgfx_gmrid_man *gman = to_gmrid_manager(man);
 
-       if (mem->mm_node) {
-               ida_free(&gman->gmr_ida, mem->start);
-               spin_lock(&gman->lock);
-               gman->used_gmr_pages -= mem->num_pages;
-               spin_unlock(&gman->lock);
-               mem->mm_node = NULL;
-       }
+       ida_free(&gman->gmr_ida, mem->start);
+       spin_lock(&gman->lock);
+       gman->used_gmr_pages -= mem->num_pages;
+       spin_unlock(&gman->lock);
+       kfree(mem->mm_node);
 }
 
 static const struct ttm_resource_manager_func vmw_gmrid_manager_func;
index 5ccc35b..8765835 100644 (file)
@@ -7,6 +7,7 @@
 #include "vmwgfx_drv.h"
 #include <drm/ttm/ttm_bo_driver.h>
 #include <drm/ttm/ttm_placement.h>
+#include <drm/ttm/ttm_range_manager.h>
 
 /**
  * struct vmw_thp_manager - Range manager implementing huge page alignment
@@ -54,16 +55,18 @@ static int vmw_thp_get_node(struct ttm_resource_manager *man,
 {
        struct vmw_thp_manager *rman = to_thp_manager(man);
        struct drm_mm *mm = &rman->mm;
-       struct drm_mm_node *node;
+       struct ttm_range_mgr_node *node;
        unsigned long align_pages;
        unsigned long lpfn;
        enum drm_mm_insert_mode mode = DRM_MM_INSERT_BEST;
        int ret;
 
-       node = kzalloc(sizeof(*node), GFP_KERNEL);
+       node = kzalloc(struct_size(node, mm_nodes, 1), GFP_KERNEL);
        if (!node)
                return -ENOMEM;
 
+       ttm_resource_init(bo, place, &node->base);
+
        lpfn = place->lpfn;
        if (!lpfn)
                lpfn = man->size;
@@ -76,8 +79,9 @@ static int vmw_thp_get_node(struct ttm_resource_manager *man,
        if (IS_ENABLED(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)) {
                align_pages = (HPAGE_PUD_SIZE >> PAGE_SHIFT);
                if (mem->num_pages >= align_pages) {
-                       ret = vmw_thp_insert_aligned(bo, mm, node, align_pages,
-                                                    place, mem, lpfn, mode);
+                       ret = vmw_thp_insert_aligned(bo, mm, &node->mm_nodes[0],
+                                                    align_pages, place, mem,
+                                                    lpfn, mode);
                        if (!ret)
                                goto found_unlock;
                }
@@ -85,14 +89,15 @@ static int vmw_thp_get_node(struct ttm_resource_manager *man,
 
        align_pages = (HPAGE_PMD_SIZE >> PAGE_SHIFT);
        if (mem->num_pages >= align_pages) {
-               ret = vmw_thp_insert_aligned(bo, mm, node, align_pages, place,
-                                            mem, lpfn, mode);
+               ret = vmw_thp_insert_aligned(bo, mm, &node->mm_nodes[0],
+                                            align_pages, place, mem, lpfn,
+                                            mode);
                if (!ret)
                        goto found_unlock;
        }
 
-       ret = drm_mm_insert_node_in_range(mm, node, mem->num_pages,
-                                         bo->page_alignment, 0,
+       ret = drm_mm_insert_node_in_range(mm, &node->mm_nodes[0],
+                                         mem->num_pages, bo->page_alignment, 0,
                                          place->fpfn, lpfn, mode);
 found_unlock:
        spin_unlock(&rman->lock);
@@ -100,8 +105,8 @@ found_unlock:
        if (unlikely(ret)) {
                kfree(node);
        } else {
-               mem->mm_node = node;
-               mem->start = node->start;
+               mem->mm_node = &node->mm_nodes[0];
+               mem->start = node->mm_nodes[0].start;
        }
 
        return ret;
@@ -113,15 +118,13 @@ static void vmw_thp_put_node(struct ttm_resource_manager *man,
                             struct ttm_resource *mem)
 {
        struct vmw_thp_manager *rman = to_thp_manager(man);
+       struct ttm_range_mgr_node * node = mem->mm_node;
 
-       if (mem->mm_node) {
-               spin_lock(&rman->lock);
-               drm_mm_remove_node(mem->mm_node);
-               spin_unlock(&rman->lock);
+       spin_lock(&rman->lock);
+       drm_mm_remove_node(&node->mm_nodes[0]);
+       spin_unlock(&rman->lock);
 
-               kfree(mem->mm_node);
-               mem->mm_node = NULL;
-       }
+       kfree(node);
 }
 
 int vmw_thp_init(struct vmw_private *dev_priv)