Merge tag 'kvm-s390-master-6.8-1' of https://git.kernel.org/pub/scm/linux/kernel...
[linux-2.6-microblaze.git] / drivers / gpu / drm / loongson / lsdc_ttm.c
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Copyright (C) 2023 Loongson Technology Corporation Limited
4  */
5
6 #include <drm/drm_drv.h>
7 #include <drm/drm_file.h>
8 #include <drm/drm_gem.h>
9 #include <drm/drm_managed.h>
10 #include <drm/drm_prime.h>
11
12 #include "lsdc_drv.h"
13 #include "lsdc_ttm.h"
14
15 const char *lsdc_mem_type_to_str(uint32_t mem_type)
16 {
17         switch (mem_type) {
18         case TTM_PL_VRAM:
19                 return "VRAM";
20         case TTM_PL_TT:
21                 return "GTT";
22         case TTM_PL_SYSTEM:
23                 return "SYSTEM";
24         default:
25                 break;
26         }
27
28         return "Unknown";
29 }
30
31 const char *lsdc_domain_to_str(u32 domain)
32 {
33         switch (domain) {
34         case LSDC_GEM_DOMAIN_VRAM:
35                 return "VRAM";
36         case LSDC_GEM_DOMAIN_GTT:
37                 return "GTT";
38         case LSDC_GEM_DOMAIN_SYSTEM:
39                 return "SYSTEM";
40         default:
41                 break;
42         }
43
44         return "Unknown";
45 }
46
47 static void lsdc_bo_set_placement(struct lsdc_bo *lbo, u32 domain)
48 {
49         u32 c = 0;
50         u32 pflags = 0;
51         u32 i;
52
53         if (lbo->tbo.base.size <= PAGE_SIZE)
54                 pflags |= TTM_PL_FLAG_TOPDOWN;
55
56         lbo->placement.placement = lbo->placements;
57         lbo->placement.busy_placement = lbo->placements;
58
59         if (domain & LSDC_GEM_DOMAIN_VRAM) {
60                 lbo->placements[c].mem_type = TTM_PL_VRAM;
61                 lbo->placements[c++].flags = pflags;
62         }
63
64         if (domain & LSDC_GEM_DOMAIN_GTT) {
65                 lbo->placements[c].mem_type = TTM_PL_TT;
66                 lbo->placements[c++].flags = pflags;
67         }
68
69         if (domain & LSDC_GEM_DOMAIN_SYSTEM) {
70                 lbo->placements[c].mem_type = TTM_PL_SYSTEM;
71                 lbo->placements[c++].flags = 0;
72         }
73
74         if (!c) {
75                 lbo->placements[c].mem_type = TTM_PL_SYSTEM;
76                 lbo->placements[c++].flags = 0;
77         }
78
79         lbo->placement.num_placement = c;
80         lbo->placement.num_busy_placement = c;
81
82         for (i = 0; i < c; ++i) {
83                 lbo->placements[i].fpfn = 0;
84                 lbo->placements[i].lpfn = 0;
85         }
86 }
87
88 static void lsdc_ttm_tt_destroy(struct ttm_device *bdev, struct ttm_tt *tt)
89 {
90         ttm_tt_fini(tt);
91         kfree(tt);
92 }
93
94 static struct ttm_tt *
95 lsdc_ttm_tt_create(struct ttm_buffer_object *tbo, uint32_t page_flags)
96 {
97         struct ttm_tt *tt;
98         int ret;
99
100         tt = kzalloc(sizeof(*tt), GFP_KERNEL);
101         if (!tt)
102                 return NULL;
103
104         ret = ttm_sg_tt_init(tt, tbo, page_flags, ttm_cached);
105         if (ret < 0) {
106                 kfree(tt);
107                 return NULL;
108         }
109
110         return tt;
111 }
112
113 static int lsdc_ttm_tt_populate(struct ttm_device *bdev,
114                                 struct ttm_tt *ttm,
115                                 struct ttm_operation_ctx *ctx)
116 {
117         bool slave = !!(ttm->page_flags & TTM_TT_FLAG_EXTERNAL);
118
119         if (slave && ttm->sg) {
120                 drm_prime_sg_to_dma_addr_array(ttm->sg,
121                                                ttm->dma_address,
122                                                ttm->num_pages);
123
124                 return 0;
125         }
126
127         return ttm_pool_alloc(&bdev->pool, ttm, ctx);
128 }
129
130 static void lsdc_ttm_tt_unpopulate(struct ttm_device *bdev,
131                                    struct ttm_tt *ttm)
132 {
133         bool slave = !!(ttm->page_flags & TTM_TT_FLAG_EXTERNAL);
134
135         if (slave)
136                 return;
137
138         return ttm_pool_free(&bdev->pool, ttm);
139 }
140
141 static void lsdc_bo_evict_flags(struct ttm_buffer_object *tbo,
142                                 struct ttm_placement *tplacement)
143 {
144         struct ttm_resource *resource = tbo->resource;
145         struct lsdc_bo *lbo = to_lsdc_bo(tbo);
146
147         switch (resource->mem_type) {
148         case TTM_PL_VRAM:
149                 lsdc_bo_set_placement(lbo, LSDC_GEM_DOMAIN_GTT);
150                 break;
151         case TTM_PL_TT:
152         default:
153                 lsdc_bo_set_placement(lbo, LSDC_GEM_DOMAIN_SYSTEM);
154                 break;
155         }
156
157         *tplacement = lbo->placement;
158 }
159
160 static int lsdc_bo_move(struct ttm_buffer_object *tbo,
161                         bool evict,
162                         struct ttm_operation_ctx *ctx,
163                         struct ttm_resource *new_mem,
164                         struct ttm_place *hop)
165 {
166         struct drm_device *ddev = tbo->base.dev;
167         struct ttm_resource *old_mem = tbo->resource;
168         struct lsdc_bo *lbo = to_lsdc_bo(tbo);
169         int ret;
170
171         if (unlikely(tbo->pin_count > 0)) {
172                 drm_warn(ddev, "Can't move a pinned BO\n");
173                 return -EINVAL;
174         }
175
176         ret = ttm_bo_wait_ctx(tbo, ctx);
177         if (ret)
178                 return ret;
179
180         if (!old_mem) {
181                 drm_dbg(ddev, "bo[%p] move: NULL to %s, size: %zu\n",
182                         lbo, lsdc_mem_type_to_str(new_mem->mem_type),
183                         lsdc_bo_size(lbo));
184                 ttm_bo_move_null(tbo, new_mem);
185                 return 0;
186         }
187
188         if (old_mem->mem_type == TTM_PL_SYSTEM && !tbo->ttm) {
189                 ttm_bo_move_null(tbo, new_mem);
190                 drm_dbg(ddev, "bo[%p] move: SYSTEM to NULL, size: %zu\n",
191                         lbo, lsdc_bo_size(lbo));
192                 return 0;
193         }
194
195         if (old_mem->mem_type == TTM_PL_SYSTEM &&
196             new_mem->mem_type == TTM_PL_TT) {
197                 drm_dbg(ddev, "bo[%p] move: SYSTEM to GTT, size: %zu\n",
198                         lbo, lsdc_bo_size(lbo));
199                 ttm_bo_move_null(tbo, new_mem);
200                 return 0;
201         }
202
203         if (old_mem->mem_type == TTM_PL_TT &&
204             new_mem->mem_type == TTM_PL_SYSTEM) {
205                 drm_dbg(ddev, "bo[%p] move: GTT to SYSTEM, size: %zu\n",
206                         lbo, lsdc_bo_size(lbo));
207                 ttm_resource_free(tbo, &tbo->resource);
208                 ttm_bo_assign_mem(tbo, new_mem);
209                 return 0;
210         }
211
212         drm_dbg(ddev, "bo[%p] move: %s to %s, size: %zu\n",
213                 lbo,
214                 lsdc_mem_type_to_str(old_mem->mem_type),
215                 lsdc_mem_type_to_str(new_mem->mem_type),
216                 lsdc_bo_size(lbo));
217
218         return ttm_bo_move_memcpy(tbo, ctx, new_mem);
219 }
220
221 static int lsdc_bo_reserve_io_mem(struct ttm_device *bdev,
222                                   struct ttm_resource *mem)
223 {
224         struct lsdc_device *ldev = tdev_to_ldev(bdev);
225
226         switch (mem->mem_type) {
227         case TTM_PL_SYSTEM:
228                 break;
229         case TTM_PL_TT:
230                 break;
231         case TTM_PL_VRAM:
232                 mem->bus.offset = (mem->start << PAGE_SHIFT) + ldev->vram_base;
233                 mem->bus.is_iomem = true;
234                 mem->bus.caching = ttm_write_combined;
235                 break;
236         default:
237                 return -EINVAL;
238         }
239
240         return 0;
241 }
242
243 static struct ttm_device_funcs lsdc_bo_driver = {
244         .ttm_tt_create = lsdc_ttm_tt_create,
245         .ttm_tt_populate = lsdc_ttm_tt_populate,
246         .ttm_tt_unpopulate = lsdc_ttm_tt_unpopulate,
247         .ttm_tt_destroy = lsdc_ttm_tt_destroy,
248         .eviction_valuable = ttm_bo_eviction_valuable,
249         .evict_flags = lsdc_bo_evict_flags,
250         .move = lsdc_bo_move,
251         .io_mem_reserve = lsdc_bo_reserve_io_mem,
252 };
253
254 u64 lsdc_bo_gpu_offset(struct lsdc_bo *lbo)
255 {
256         struct ttm_buffer_object *tbo = &lbo->tbo;
257         struct drm_device *ddev = tbo->base.dev;
258         struct ttm_resource *resource = tbo->resource;
259
260         if (unlikely(!tbo->pin_count)) {
261                 drm_err(ddev, "unpinned bo, gpu virtual address is invalid\n");
262                 return 0;
263         }
264
265         if (unlikely(resource->mem_type == TTM_PL_SYSTEM))
266                 return 0;
267
268         return resource->start << PAGE_SHIFT;
269 }
270
271 size_t lsdc_bo_size(struct lsdc_bo *lbo)
272 {
273         struct ttm_buffer_object *tbo = &lbo->tbo;
274
275         return tbo->base.size;
276 }
277
278 int lsdc_bo_reserve(struct lsdc_bo *lbo)
279 {
280         return ttm_bo_reserve(&lbo->tbo, true, false, NULL);
281 }
282
283 void lsdc_bo_unreserve(struct lsdc_bo *lbo)
284 {
285         return ttm_bo_unreserve(&lbo->tbo);
286 }
287
288 int lsdc_bo_pin(struct lsdc_bo *lbo, u32 domain, u64 *gpu_addr)
289 {
290         struct ttm_operation_ctx ctx = { false, false };
291         struct ttm_buffer_object *tbo = &lbo->tbo;
292         struct lsdc_device *ldev = tdev_to_ldev(tbo->bdev);
293         int ret;
294
295         if (tbo->pin_count)
296                 goto bo_pinned;
297
298         if (lbo->sharing_count && domain == LSDC_GEM_DOMAIN_VRAM)
299                 return -EINVAL;
300
301         if (domain)
302                 lsdc_bo_set_placement(lbo, domain);
303
304         ret = ttm_bo_validate(tbo, &lbo->placement, &ctx);
305         if (unlikely(ret)) {
306                 drm_err(&ldev->base, "%p validate failed: %d\n", lbo, ret);
307                 return ret;
308         }
309
310         if (domain == LSDC_GEM_DOMAIN_VRAM)
311                 ldev->vram_pinned_size += lsdc_bo_size(lbo);
312         else if (domain == LSDC_GEM_DOMAIN_GTT)
313                 ldev->gtt_pinned_size += lsdc_bo_size(lbo);
314
315 bo_pinned:
316         ttm_bo_pin(tbo);
317
318         if (gpu_addr)
319                 *gpu_addr = lsdc_bo_gpu_offset(lbo);
320
321         return 0;
322 }
323
324 void lsdc_bo_unpin(struct lsdc_bo *lbo)
325 {
326         struct ttm_buffer_object *tbo = &lbo->tbo;
327         struct lsdc_device *ldev = tdev_to_ldev(tbo->bdev);
328
329         if (unlikely(!tbo->pin_count)) {
330                 drm_dbg(&ldev->base, "%p unpin is not necessary\n", lbo);
331                 return;
332         }
333
334         ttm_bo_unpin(tbo);
335
336         if (!tbo->pin_count) {
337                 if (tbo->resource->mem_type == TTM_PL_VRAM)
338                         ldev->vram_pinned_size -= lsdc_bo_size(lbo);
339                 else if (tbo->resource->mem_type == TTM_PL_TT)
340                         ldev->gtt_pinned_size -= lsdc_bo_size(lbo);
341         }
342 }
343
344 void lsdc_bo_ref(struct lsdc_bo *lbo)
345 {
346         struct ttm_buffer_object *tbo = &lbo->tbo;
347
348         ttm_bo_get(tbo);
349 }
350
351 void lsdc_bo_unref(struct lsdc_bo *lbo)
352 {
353         struct ttm_buffer_object *tbo = &lbo->tbo;
354
355         ttm_bo_put(tbo);
356 }
357
358 int lsdc_bo_kmap(struct lsdc_bo *lbo)
359 {
360         struct ttm_buffer_object *tbo = &lbo->tbo;
361         struct drm_gem_object *gem = &tbo->base;
362         struct drm_device *ddev = gem->dev;
363         long ret;
364         int err;
365
366         ret = dma_resv_wait_timeout(gem->resv, DMA_RESV_USAGE_KERNEL, false,
367                                     MAX_SCHEDULE_TIMEOUT);
368         if (ret < 0) {
369                 drm_warn(ddev, "wait fence timeout\n");
370                 return ret;
371         }
372
373         if (lbo->kptr)
374                 return 0;
375
376         err = ttm_bo_kmap(tbo, 0, PFN_UP(lsdc_bo_size(lbo)), &lbo->kmap);
377         if (err) {
378                 drm_err(ddev, "kmap %p failed: %d\n", lbo, err);
379                 return err;
380         }
381
382         lbo->kptr = ttm_kmap_obj_virtual(&lbo->kmap, &lbo->is_iomem);
383
384         return 0;
385 }
386
387 void lsdc_bo_kunmap(struct lsdc_bo *lbo)
388 {
389         if (!lbo->kptr)
390                 return;
391
392         lbo->kptr = NULL;
393         ttm_bo_kunmap(&lbo->kmap);
394 }
395
396 void lsdc_bo_clear(struct lsdc_bo *lbo)
397 {
398         lsdc_bo_kmap(lbo);
399
400         if (lbo->is_iomem)
401                 memset_io((void __iomem *)lbo->kptr, 0, lbo->size);
402         else
403                 memset(lbo->kptr, 0, lbo->size);
404
405         lsdc_bo_kunmap(lbo);
406 }
407
408 int lsdc_bo_evict_vram(struct drm_device *ddev)
409 {
410         struct lsdc_device *ldev = to_lsdc(ddev);
411         struct ttm_device *bdev = &ldev->bdev;
412         struct ttm_resource_manager *man;
413
414         man = ttm_manager_type(bdev, TTM_PL_VRAM);
415         if (unlikely(!man))
416                 return 0;
417
418         return ttm_resource_manager_evict_all(bdev, man);
419 }
420
421 static void lsdc_bo_destroy(struct ttm_buffer_object *tbo)
422 {
423         struct lsdc_device *ldev = tdev_to_ldev(tbo->bdev);
424         struct lsdc_bo *lbo = to_lsdc_bo(tbo);
425
426         mutex_lock(&ldev->gem.mutex);
427         list_del_init(&lbo->list);
428         mutex_unlock(&ldev->gem.mutex);
429
430         drm_gem_object_release(&tbo->base);
431
432         kfree(lbo);
433 }
434
435 struct lsdc_bo *lsdc_bo_create(struct drm_device *ddev,
436                                u32 domain,
437                                size_t size,
438                                bool kernel,
439                                struct sg_table *sg,
440                                struct dma_resv *resv)
441 {
442         struct lsdc_device *ldev = to_lsdc(ddev);
443         struct ttm_device *bdev = &ldev->bdev;
444         struct ttm_buffer_object *tbo;
445         struct lsdc_bo *lbo;
446         enum ttm_bo_type bo_type;
447         int ret;
448
449         lbo = kzalloc(sizeof(*lbo), GFP_KERNEL);
450         if (!lbo)
451                 return ERR_PTR(-ENOMEM);
452
453         INIT_LIST_HEAD(&lbo->list);
454
455         lbo->initial_domain = domain & (LSDC_GEM_DOMAIN_VRAM |
456                                         LSDC_GEM_DOMAIN_GTT |
457                                         LSDC_GEM_DOMAIN_SYSTEM);
458
459         tbo = &lbo->tbo;
460
461         size = ALIGN(size, PAGE_SIZE);
462
463         ret = drm_gem_object_init(ddev, &tbo->base, size);
464         if (ret) {
465                 kfree(lbo);
466                 return ERR_PTR(ret);
467         }
468
469         tbo->bdev = bdev;
470
471         if (kernel)
472                 bo_type = ttm_bo_type_kernel;
473         else if (sg)
474                 bo_type = ttm_bo_type_sg;
475         else
476                 bo_type = ttm_bo_type_device;
477
478         lsdc_bo_set_placement(lbo, domain);
479         lbo->size = size;
480
481         ret = ttm_bo_init_validate(bdev, tbo, bo_type, &lbo->placement, 0,
482                                    false, sg, resv, lsdc_bo_destroy);
483         if (ret) {
484                 kfree(lbo);
485                 return ERR_PTR(ret);
486         }
487
488         return lbo;
489 }
490
491 struct lsdc_bo *lsdc_bo_create_kernel_pinned(struct drm_device *ddev,
492                                              u32 domain,
493                                              size_t size)
494 {
495         struct lsdc_bo *lbo;
496         int ret;
497
498         lbo = lsdc_bo_create(ddev, domain, size, true, NULL, NULL);
499         if (IS_ERR(lbo))
500                 return ERR_CAST(lbo);
501
502         ret = lsdc_bo_reserve(lbo);
503         if (unlikely(ret)) {
504                 lsdc_bo_unref(lbo);
505                 return ERR_PTR(ret);
506         }
507
508         ret = lsdc_bo_pin(lbo, domain, NULL);
509         lsdc_bo_unreserve(lbo);
510         if (unlikely(ret)) {
511                 lsdc_bo_unref(lbo);
512                 return ERR_PTR(ret);
513         }
514
515         return lbo;
516 }
517
518 void lsdc_bo_free_kernel_pinned(struct lsdc_bo *lbo)
519 {
520         int ret;
521
522         ret = lsdc_bo_reserve(lbo);
523         if (unlikely(ret))
524                 return;
525
526         lsdc_bo_unpin(lbo);
527         lsdc_bo_unreserve(lbo);
528
529         lsdc_bo_unref(lbo);
530 }
531
532 static void lsdc_ttm_fini(struct drm_device *ddev, void *data)
533 {
534         struct lsdc_device *ldev = (struct lsdc_device *)data;
535
536         ttm_range_man_fini(&ldev->bdev, TTM_PL_VRAM);
537         ttm_range_man_fini(&ldev->bdev, TTM_PL_TT);
538
539         ttm_device_fini(&ldev->bdev);
540
541         drm_dbg(ddev, "ttm finished\n");
542 }
543
544 int lsdc_ttm_init(struct lsdc_device *ldev)
545 {
546         struct drm_device *ddev = &ldev->base;
547         unsigned long num_vram_pages;
548         unsigned long num_gtt_pages;
549         int ret;
550
551         ret = ttm_device_init(&ldev->bdev, &lsdc_bo_driver, ddev->dev,
552                               ddev->anon_inode->i_mapping,
553                               ddev->vma_offset_manager, false, true);
554         if (ret)
555                 return ret;
556
557         num_vram_pages = ldev->vram_size >> PAGE_SHIFT;
558
559         ret = ttm_range_man_init(&ldev->bdev, TTM_PL_VRAM, false, num_vram_pages);
560         if (unlikely(ret))
561                 return ret;
562
563         drm_info(ddev, "VRAM: %lu pages ready\n", num_vram_pages);
564
565         /* 512M is far enough for us now */
566         ldev->gtt_size = 512 << 20;
567
568         num_gtt_pages = ldev->gtt_size >> PAGE_SHIFT;
569
570         ret = ttm_range_man_init(&ldev->bdev, TTM_PL_TT, true, num_gtt_pages);
571         if (unlikely(ret))
572                 return ret;
573
574         drm_info(ddev, "GTT: %lu pages ready\n", num_gtt_pages);
575
576         return drmm_add_action_or_reset(ddev, lsdc_ttm_fini, ldev);
577 }
578
579 void lsdc_ttm_debugfs_init(struct lsdc_device *ldev)
580 {
581         struct ttm_device *bdev = &ldev->bdev;
582         struct drm_device *ddev = &ldev->base;
583         struct drm_minor *minor = ddev->primary;
584         struct dentry *root = minor->debugfs_root;
585         struct ttm_resource_manager *vram_man;
586         struct ttm_resource_manager *gtt_man;
587
588         vram_man = ttm_manager_type(bdev, TTM_PL_VRAM);
589         gtt_man = ttm_manager_type(bdev, TTM_PL_TT);
590
591         ttm_resource_manager_create_debugfs(vram_man, root, "vram_mm");
592         ttm_resource_manager_create_debugfs(gtt_man, root, "gtt_mm");
593 }