1 // SPDX-License-Identifier: GPL-2.0+
3 * Copyright (C) 2023 Loongson Technology Corporation Limited
6 #include <drm/drm_drv.h>
7 #include <drm/drm_file.h>
8 #include <drm/drm_gem.h>
9 #include <drm/drm_managed.h>
10 #include <drm/drm_prime.h>
15 const char *lsdc_mem_type_to_str(uint32_t mem_type)
31 const char *lsdc_domain_to_str(u32 domain)
34 case LSDC_GEM_DOMAIN_VRAM:
36 case LSDC_GEM_DOMAIN_GTT:
38 case LSDC_GEM_DOMAIN_SYSTEM:
47 static void lsdc_bo_set_placement(struct lsdc_bo *lbo, u32 domain)
53 if (lbo->tbo.base.size <= PAGE_SIZE)
54 pflags |= TTM_PL_FLAG_TOPDOWN;
56 lbo->placement.placement = lbo->placements;
57 lbo->placement.busy_placement = lbo->placements;
59 if (domain & LSDC_GEM_DOMAIN_VRAM) {
60 lbo->placements[c].mem_type = TTM_PL_VRAM;
61 lbo->placements[c++].flags = pflags;
64 if (domain & LSDC_GEM_DOMAIN_GTT) {
65 lbo->placements[c].mem_type = TTM_PL_TT;
66 lbo->placements[c++].flags = pflags;
69 if (domain & LSDC_GEM_DOMAIN_SYSTEM) {
70 lbo->placements[c].mem_type = TTM_PL_SYSTEM;
71 lbo->placements[c++].flags = 0;
75 lbo->placements[c].mem_type = TTM_PL_SYSTEM;
76 lbo->placements[c++].flags = 0;
79 lbo->placement.num_placement = c;
80 lbo->placement.num_busy_placement = c;
82 for (i = 0; i < c; ++i) {
83 lbo->placements[i].fpfn = 0;
84 lbo->placements[i].lpfn = 0;
88 static void lsdc_ttm_tt_destroy(struct ttm_device *bdev, struct ttm_tt *tt)
94 static struct ttm_tt *
95 lsdc_ttm_tt_create(struct ttm_buffer_object *tbo, uint32_t page_flags)
100 tt = kzalloc(sizeof(*tt), GFP_KERNEL);
104 ret = ttm_sg_tt_init(tt, tbo, page_flags, ttm_cached);
113 static int lsdc_ttm_tt_populate(struct ttm_device *bdev,
115 struct ttm_operation_ctx *ctx)
117 bool slave = !!(ttm->page_flags & TTM_TT_FLAG_EXTERNAL);
119 if (slave && ttm->sg) {
120 drm_prime_sg_to_dma_addr_array(ttm->sg,
127 return ttm_pool_alloc(&bdev->pool, ttm, ctx);
130 static void lsdc_ttm_tt_unpopulate(struct ttm_device *bdev,
133 bool slave = !!(ttm->page_flags & TTM_TT_FLAG_EXTERNAL);
138 return ttm_pool_free(&bdev->pool, ttm);
141 static void lsdc_bo_evict_flags(struct ttm_buffer_object *tbo,
142 struct ttm_placement *tplacement)
144 struct ttm_resource *resource = tbo->resource;
145 struct lsdc_bo *lbo = to_lsdc_bo(tbo);
147 switch (resource->mem_type) {
149 lsdc_bo_set_placement(lbo, LSDC_GEM_DOMAIN_GTT);
153 lsdc_bo_set_placement(lbo, LSDC_GEM_DOMAIN_SYSTEM);
157 *tplacement = lbo->placement;
160 static int lsdc_bo_move(struct ttm_buffer_object *tbo,
162 struct ttm_operation_ctx *ctx,
163 struct ttm_resource *new_mem,
164 struct ttm_place *hop)
166 struct drm_device *ddev = tbo->base.dev;
167 struct ttm_resource *old_mem = tbo->resource;
168 struct lsdc_bo *lbo = to_lsdc_bo(tbo);
171 if (unlikely(tbo->pin_count > 0)) {
172 drm_warn(ddev, "Can't move a pinned BO\n");
176 ret = ttm_bo_wait_ctx(tbo, ctx);
181 drm_dbg(ddev, "bo[%p] move: NULL to %s, size: %zu\n",
182 lbo, lsdc_mem_type_to_str(new_mem->mem_type),
184 ttm_bo_move_null(tbo, new_mem);
188 if (old_mem->mem_type == TTM_PL_SYSTEM && !tbo->ttm) {
189 ttm_bo_move_null(tbo, new_mem);
190 drm_dbg(ddev, "bo[%p] move: SYSTEM to NULL, size: %zu\n",
191 lbo, lsdc_bo_size(lbo));
195 if (old_mem->mem_type == TTM_PL_SYSTEM &&
196 new_mem->mem_type == TTM_PL_TT) {
197 drm_dbg(ddev, "bo[%p] move: SYSTEM to GTT, size: %zu\n",
198 lbo, lsdc_bo_size(lbo));
199 ttm_bo_move_null(tbo, new_mem);
203 if (old_mem->mem_type == TTM_PL_TT &&
204 new_mem->mem_type == TTM_PL_SYSTEM) {
205 drm_dbg(ddev, "bo[%p] move: GTT to SYSTEM, size: %zu\n",
206 lbo, lsdc_bo_size(lbo));
207 ttm_resource_free(tbo, &tbo->resource);
208 ttm_bo_assign_mem(tbo, new_mem);
212 drm_dbg(ddev, "bo[%p] move: %s to %s, size: %zu\n",
214 lsdc_mem_type_to_str(old_mem->mem_type),
215 lsdc_mem_type_to_str(new_mem->mem_type),
218 return ttm_bo_move_memcpy(tbo, ctx, new_mem);
221 static int lsdc_bo_reserve_io_mem(struct ttm_device *bdev,
222 struct ttm_resource *mem)
224 struct lsdc_device *ldev = tdev_to_ldev(bdev);
226 switch (mem->mem_type) {
232 mem->bus.offset = (mem->start << PAGE_SHIFT) + ldev->vram_base;
233 mem->bus.is_iomem = true;
234 mem->bus.caching = ttm_write_combined;
243 static struct ttm_device_funcs lsdc_bo_driver = {
244 .ttm_tt_create = lsdc_ttm_tt_create,
245 .ttm_tt_populate = lsdc_ttm_tt_populate,
246 .ttm_tt_unpopulate = lsdc_ttm_tt_unpopulate,
247 .ttm_tt_destroy = lsdc_ttm_tt_destroy,
248 .eviction_valuable = ttm_bo_eviction_valuable,
249 .evict_flags = lsdc_bo_evict_flags,
250 .move = lsdc_bo_move,
251 .io_mem_reserve = lsdc_bo_reserve_io_mem,
254 u64 lsdc_bo_gpu_offset(struct lsdc_bo *lbo)
256 struct ttm_buffer_object *tbo = &lbo->tbo;
257 struct drm_device *ddev = tbo->base.dev;
258 struct ttm_resource *resource = tbo->resource;
260 if (unlikely(!tbo->pin_count)) {
261 drm_err(ddev, "unpinned bo, gpu virtual address is invalid\n");
265 if (unlikely(resource->mem_type == TTM_PL_SYSTEM))
268 return resource->start << PAGE_SHIFT;
271 size_t lsdc_bo_size(struct lsdc_bo *lbo)
273 struct ttm_buffer_object *tbo = &lbo->tbo;
275 return tbo->base.size;
278 int lsdc_bo_reserve(struct lsdc_bo *lbo)
280 return ttm_bo_reserve(&lbo->tbo, true, false, NULL);
283 void lsdc_bo_unreserve(struct lsdc_bo *lbo)
285 return ttm_bo_unreserve(&lbo->tbo);
288 int lsdc_bo_pin(struct lsdc_bo *lbo, u32 domain, u64 *gpu_addr)
290 struct ttm_operation_ctx ctx = { false, false };
291 struct ttm_buffer_object *tbo = &lbo->tbo;
292 struct lsdc_device *ldev = tdev_to_ldev(tbo->bdev);
298 if (lbo->sharing_count && domain == LSDC_GEM_DOMAIN_VRAM)
302 lsdc_bo_set_placement(lbo, domain);
304 ret = ttm_bo_validate(tbo, &lbo->placement, &ctx);
306 drm_err(&ldev->base, "%p validate failed: %d\n", lbo, ret);
310 if (domain == LSDC_GEM_DOMAIN_VRAM)
311 ldev->vram_pinned_size += lsdc_bo_size(lbo);
312 else if (domain == LSDC_GEM_DOMAIN_GTT)
313 ldev->gtt_pinned_size += lsdc_bo_size(lbo);
319 *gpu_addr = lsdc_bo_gpu_offset(lbo);
324 void lsdc_bo_unpin(struct lsdc_bo *lbo)
326 struct ttm_buffer_object *tbo = &lbo->tbo;
327 struct lsdc_device *ldev = tdev_to_ldev(tbo->bdev);
329 if (unlikely(!tbo->pin_count)) {
330 drm_dbg(&ldev->base, "%p unpin is not necessary\n", lbo);
336 if (!tbo->pin_count) {
337 if (tbo->resource->mem_type == TTM_PL_VRAM)
338 ldev->vram_pinned_size -= lsdc_bo_size(lbo);
339 else if (tbo->resource->mem_type == TTM_PL_TT)
340 ldev->gtt_pinned_size -= lsdc_bo_size(lbo);
344 void lsdc_bo_ref(struct lsdc_bo *lbo)
346 struct ttm_buffer_object *tbo = &lbo->tbo;
351 void lsdc_bo_unref(struct lsdc_bo *lbo)
353 struct ttm_buffer_object *tbo = &lbo->tbo;
358 int lsdc_bo_kmap(struct lsdc_bo *lbo)
360 struct ttm_buffer_object *tbo = &lbo->tbo;
361 struct drm_gem_object *gem = &tbo->base;
362 struct drm_device *ddev = gem->dev;
366 ret = dma_resv_wait_timeout(gem->resv, DMA_RESV_USAGE_KERNEL, false,
367 MAX_SCHEDULE_TIMEOUT);
369 drm_warn(ddev, "wait fence timeout\n");
376 err = ttm_bo_kmap(tbo, 0, PFN_UP(lsdc_bo_size(lbo)), &lbo->kmap);
378 drm_err(ddev, "kmap %p failed: %d\n", lbo, err);
382 lbo->kptr = ttm_kmap_obj_virtual(&lbo->kmap, &lbo->is_iomem);
387 void lsdc_bo_kunmap(struct lsdc_bo *lbo)
393 ttm_bo_kunmap(&lbo->kmap);
396 void lsdc_bo_clear(struct lsdc_bo *lbo)
401 memset_io((void __iomem *)lbo->kptr, 0, lbo->size);
403 memset(lbo->kptr, 0, lbo->size);
408 int lsdc_bo_evict_vram(struct drm_device *ddev)
410 struct lsdc_device *ldev = to_lsdc(ddev);
411 struct ttm_device *bdev = &ldev->bdev;
412 struct ttm_resource_manager *man;
414 man = ttm_manager_type(bdev, TTM_PL_VRAM);
418 return ttm_resource_manager_evict_all(bdev, man);
421 static void lsdc_bo_destroy(struct ttm_buffer_object *tbo)
423 struct lsdc_device *ldev = tdev_to_ldev(tbo->bdev);
424 struct lsdc_bo *lbo = to_lsdc_bo(tbo);
426 mutex_lock(&ldev->gem.mutex);
427 list_del_init(&lbo->list);
428 mutex_unlock(&ldev->gem.mutex);
430 drm_gem_object_release(&tbo->base);
435 struct lsdc_bo *lsdc_bo_create(struct drm_device *ddev,
440 struct dma_resv *resv)
442 struct lsdc_device *ldev = to_lsdc(ddev);
443 struct ttm_device *bdev = &ldev->bdev;
444 struct ttm_buffer_object *tbo;
446 enum ttm_bo_type bo_type;
449 lbo = kzalloc(sizeof(*lbo), GFP_KERNEL);
451 return ERR_PTR(-ENOMEM);
453 INIT_LIST_HEAD(&lbo->list);
455 lbo->initial_domain = domain & (LSDC_GEM_DOMAIN_VRAM |
456 LSDC_GEM_DOMAIN_GTT |
457 LSDC_GEM_DOMAIN_SYSTEM);
461 size = ALIGN(size, PAGE_SIZE);
463 ret = drm_gem_object_init(ddev, &tbo->base, size);
472 bo_type = ttm_bo_type_kernel;
474 bo_type = ttm_bo_type_sg;
476 bo_type = ttm_bo_type_device;
478 lsdc_bo_set_placement(lbo, domain);
481 ret = ttm_bo_init_validate(bdev, tbo, bo_type, &lbo->placement, 0,
482 false, sg, resv, lsdc_bo_destroy);
491 struct lsdc_bo *lsdc_bo_create_kernel_pinned(struct drm_device *ddev,
498 lbo = lsdc_bo_create(ddev, domain, size, true, NULL, NULL);
500 return ERR_CAST(lbo);
502 ret = lsdc_bo_reserve(lbo);
508 ret = lsdc_bo_pin(lbo, domain, NULL);
509 lsdc_bo_unreserve(lbo);
518 void lsdc_bo_free_kernel_pinned(struct lsdc_bo *lbo)
522 ret = lsdc_bo_reserve(lbo);
527 lsdc_bo_unreserve(lbo);
532 static void lsdc_ttm_fini(struct drm_device *ddev, void *data)
534 struct lsdc_device *ldev = (struct lsdc_device *)data;
536 ttm_range_man_fini(&ldev->bdev, TTM_PL_VRAM);
537 ttm_range_man_fini(&ldev->bdev, TTM_PL_TT);
539 ttm_device_fini(&ldev->bdev);
541 drm_dbg(ddev, "ttm finished\n");
544 int lsdc_ttm_init(struct lsdc_device *ldev)
546 struct drm_device *ddev = &ldev->base;
547 unsigned long num_vram_pages;
548 unsigned long num_gtt_pages;
551 ret = ttm_device_init(&ldev->bdev, &lsdc_bo_driver, ddev->dev,
552 ddev->anon_inode->i_mapping,
553 ddev->vma_offset_manager, false, true);
557 num_vram_pages = ldev->vram_size >> PAGE_SHIFT;
559 ret = ttm_range_man_init(&ldev->bdev, TTM_PL_VRAM, false, num_vram_pages);
563 drm_info(ddev, "VRAM: %lu pages ready\n", num_vram_pages);
565 /* 512M is far enough for us now */
566 ldev->gtt_size = 512 << 20;
568 num_gtt_pages = ldev->gtt_size >> PAGE_SHIFT;
570 ret = ttm_range_man_init(&ldev->bdev, TTM_PL_TT, true, num_gtt_pages);
574 drm_info(ddev, "GTT: %lu pages ready\n", num_gtt_pages);
576 return drmm_add_action_or_reset(ddev, lsdc_ttm_fini, ldev);
579 void lsdc_ttm_debugfs_init(struct lsdc_device *ldev)
581 struct ttm_device *bdev = &ldev->bdev;
582 struct drm_device *ddev = &ldev->base;
583 struct drm_minor *minor = ddev->primary;
584 struct dentry *root = minor->debugfs_root;
585 struct ttm_resource_manager *vram_man;
586 struct ttm_resource_manager *gtt_man;
588 vram_man = ttm_manager_type(bdev, TTM_PL_VRAM);
589 gtt_man = ttm_manager_type(bdev, TTM_PL_TT);
591 ttm_resource_manager_create_debugfs(vram_man, root, "vram_mm");
592 ttm_resource_manager_create_debugfs(gtt_man, root, "gtt_mm");