2 * Copyright 2020 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
22 * Authors: Christian König
25 #include <linux/dma-buf-map.h>
26 #include <linux/io-mapping.h>
27 #include <linux/scatterlist.h>
29 #include <drm/ttm/ttm_resource.h>
30 #include <drm/ttm/ttm_bo_driver.h>
33 * ttm_lru_bulk_move_init - initialize a bulk move structure
34 * @bulk: the structure to init
36 * For now just memset the structure to zero.
38 void ttm_lru_bulk_move_init(struct ttm_lru_bulk_move *bulk)
40 memset(bulk, 0, sizeof(*bulk));
42 EXPORT_SYMBOL(ttm_lru_bulk_move_init);
45 * ttm_lru_bulk_move_tail - bulk move range of resources to the LRU tail.
47 * @bulk: bulk move structure
49 * Bulk move BOs to the LRU tail, only valid to use when driver makes sure that
50 * resource order never changes. Should be called with &ttm_device.lru_lock held.
52 void ttm_lru_bulk_move_tail(struct ttm_lru_bulk_move *bulk)
56 for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
57 struct ttm_lru_bulk_move_pos *pos = &bulk->tt[i];
58 struct ttm_resource_manager *man;
63 lockdep_assert_held(&pos->first->bo->bdev->lru_lock);
64 dma_resv_assert_held(pos->first->bo->base.resv);
65 dma_resv_assert_held(pos->last->bo->base.resv);
67 man = ttm_manager_type(pos->first->bo->bdev, TTM_PL_TT);
68 list_bulk_move_tail(&man->lru[i], &pos->first->lru,
72 for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
73 struct ttm_lru_bulk_move_pos *pos = &bulk->vram[i];
74 struct ttm_resource_manager *man;
79 lockdep_assert_held(&pos->first->bo->bdev->lru_lock);
80 dma_resv_assert_held(pos->first->bo->base.resv);
81 dma_resv_assert_held(pos->last->bo->base.resv);
83 man = ttm_manager_type(pos->first->bo->bdev, TTM_PL_VRAM);
84 list_bulk_move_tail(&man->lru[i], &pos->first->lru,
88 EXPORT_SYMBOL(ttm_lru_bulk_move_tail);
90 /* Record a resource position in a bulk move structure */
91 static void ttm_lru_bulk_move_set_pos(struct ttm_lru_bulk_move_pos *pos,
92 struct ttm_resource *res)
99 /* Move a resource to the LRU tail and track the bulk position */
100 void ttm_resource_move_to_lru_tail(struct ttm_resource *res,
101 struct ttm_lru_bulk_move *bulk)
103 struct ttm_buffer_object *bo = res->bo;
104 struct ttm_device *bdev = bo->bdev;
105 struct ttm_resource_manager *man;
107 lockdep_assert_held(&bo->bdev->lru_lock);
110 list_move_tail(&res->lru, &bdev->pinned);
111 if (bdev->funcs->del_from_lru_notify)
112 bdev->funcs->del_from_lru_notify(res->bo);
116 man = ttm_manager_type(bdev, res->mem_type);
117 list_move_tail(&res->lru, &man->lru[bo->priority]);
119 if (bdev->funcs->del_from_lru_notify)
120 bdev->funcs->del_from_lru_notify(bo);
125 switch (res->mem_type) {
127 ttm_lru_bulk_move_set_pos(&bulk->tt[bo->priority], res);
131 ttm_lru_bulk_move_set_pos(&bulk->vram[bo->priority], res);
137 * ttm_resource_init - resource object constructure
138 * @bo: buffer object this resources is allocated for
139 * @place: placement of the resource
140 * @res: the resource object to inistilize
142 * Initialize a new resource object. Counterpart of ttm_resource_fini().
144 void ttm_resource_init(struct ttm_buffer_object *bo,
145 const struct ttm_place *place,
146 struct ttm_resource *res)
148 struct ttm_resource_manager *man;
151 res->num_pages = PFN_UP(bo->base.size);
152 res->mem_type = place->mem_type;
153 res->placement = place->flags;
154 res->bus.addr = NULL;
156 res->bus.is_iomem = false;
157 res->bus.caching = ttm_cached;
159 INIT_LIST_HEAD(&res->lru);
161 man = ttm_manager_type(bo->bdev, place->mem_type);
162 spin_lock(&bo->bdev->lru_lock);
163 man->usage += res->num_pages << PAGE_SHIFT;
164 ttm_resource_move_to_lru_tail(res, NULL);
165 spin_unlock(&bo->bdev->lru_lock);
167 EXPORT_SYMBOL(ttm_resource_init);
170 * ttm_resource_fini - resource destructor
171 * @man: the resource manager this resource belongs to
172 * @res: the resource to clean up
174 * Should be used by resource manager backends to clean up the TTM resource
175 * objects before freeing the underlying structure. Makes sure the resource is
176 * removed from the LRU before destruction.
177 * Counterpart of ttm_resource_init().
179 void ttm_resource_fini(struct ttm_resource_manager *man,
180 struct ttm_resource *res)
182 struct ttm_device *bdev = man->bdev;
184 spin_lock(&bdev->lru_lock);
185 list_del_init(&res->lru);
186 if (res->bo && bdev->funcs->del_from_lru_notify)
187 bdev->funcs->del_from_lru_notify(res->bo);
188 man->usage -= res->num_pages << PAGE_SHIFT;
189 spin_unlock(&bdev->lru_lock);
191 EXPORT_SYMBOL(ttm_resource_fini);
193 int ttm_resource_alloc(struct ttm_buffer_object *bo,
194 const struct ttm_place *place,
195 struct ttm_resource **res_ptr)
197 struct ttm_resource_manager *man =
198 ttm_manager_type(bo->bdev, place->mem_type);
200 return man->func->alloc(man, bo, place, res_ptr);
203 void ttm_resource_free(struct ttm_buffer_object *bo, struct ttm_resource **res)
205 struct ttm_resource_manager *man;
210 man = ttm_manager_type(bo->bdev, (*res)->mem_type);
211 man->func->free(man, *res);
214 EXPORT_SYMBOL(ttm_resource_free);
216 static bool ttm_resource_places_compat(struct ttm_resource *res,
217 const struct ttm_place *places,
218 unsigned num_placement)
222 if (res->placement & TTM_PL_FLAG_TEMPORARY)
225 for (i = 0; i < num_placement; i++) {
226 const struct ttm_place *heap = &places[i];
228 if (res->start < heap->fpfn || (heap->lpfn &&
229 (res->start + res->num_pages) > heap->lpfn))
232 if ((res->mem_type == heap->mem_type) &&
233 (!(heap->flags & TTM_PL_FLAG_CONTIGUOUS) ||
234 (res->placement & TTM_PL_FLAG_CONTIGUOUS)))
241 * ttm_resource_compat - check if resource is compatible with placement
243 * @res: the resource to check
244 * @placement: the placement to check against
246 * Returns true if the placement is compatible.
248 bool ttm_resource_compat(struct ttm_resource *res,
249 struct ttm_placement *placement)
251 if (ttm_resource_places_compat(res, placement->placement,
252 placement->num_placement))
255 if ((placement->busy_placement != placement->placement ||
256 placement->num_busy_placement > placement->num_placement) &&
257 ttm_resource_places_compat(res, placement->busy_placement,
258 placement->num_busy_placement))
263 EXPORT_SYMBOL(ttm_resource_compat);
265 void ttm_resource_set_bo(struct ttm_resource *res,
266 struct ttm_buffer_object *bo)
268 spin_lock(&bo->bdev->lru_lock);
270 spin_unlock(&bo->bdev->lru_lock);
274 * ttm_resource_manager_init
276 * @man: memory manager object to init
277 * @bdev: ttm device this manager belongs to
278 * @size: size of managed resources in arbitrary units
280 * Initialise core parts of a manager object.
282 void ttm_resource_manager_init(struct ttm_resource_manager *man,
283 struct ttm_device *bdev,
288 spin_lock_init(&man->move_lock);
293 for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
294 INIT_LIST_HEAD(&man->lru[i]);
297 EXPORT_SYMBOL(ttm_resource_manager_init);
300 * ttm_resource_manager_evict_all
302 * @bdev - device to use
303 * @man - manager to use
305 * Evict all the objects out of a memory manager until it is empty.
306 * Part of memory manager cleanup sequence.
308 int ttm_resource_manager_evict_all(struct ttm_device *bdev,
309 struct ttm_resource_manager *man)
311 struct ttm_operation_ctx ctx = {
312 .interruptible = false,
313 .no_wait_gpu = false,
316 struct dma_fence *fence;
321 * Can't use standard list traversal since we're unlocking.
324 spin_lock(&bdev->lru_lock);
325 for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
326 while (!list_empty(&man->lru[i])) {
327 spin_unlock(&bdev->lru_lock);
328 ret = ttm_mem_evict_first(bdev, man, NULL, &ctx,
332 spin_lock(&bdev->lru_lock);
335 spin_unlock(&bdev->lru_lock);
337 spin_lock(&man->move_lock);
338 fence = dma_fence_get(man->move);
339 spin_unlock(&man->move_lock);
342 ret = dma_fence_wait(fence, false);
343 dma_fence_put(fence);
350 EXPORT_SYMBOL(ttm_resource_manager_evict_all);
353 * ttm_resource_manager_usage
355 * @man: A memory manager object.
357 * Return how many resources are currently used.
359 uint64_t ttm_resource_manager_usage(struct ttm_resource_manager *man)
363 spin_lock(&man->bdev->lru_lock);
365 spin_unlock(&man->bdev->lru_lock);
368 EXPORT_SYMBOL(ttm_resource_manager_usage);
371 * ttm_resource_manager_debug
373 * @man: manager type to dump.
374 * @p: printer to use for debug.
376 void ttm_resource_manager_debug(struct ttm_resource_manager *man,
377 struct drm_printer *p)
379 drm_printf(p, " use_type: %d\n", man->use_type);
380 drm_printf(p, " use_tt: %d\n", man->use_tt);
381 drm_printf(p, " size: %llu\n", man->size);
382 drm_printf(p, " usage: %llu\n", ttm_resource_manager_usage(man));
383 if (man->func->debug)
384 man->func->debug(man, p);
386 EXPORT_SYMBOL(ttm_resource_manager_debug);
388 static void ttm_kmap_iter_iomap_map_local(struct ttm_kmap_iter *iter,
389 struct dma_buf_map *dmap,
392 struct ttm_kmap_iter_iomap *iter_io =
393 container_of(iter, typeof(*iter_io), base);
397 while (i >= iter_io->cache.end) {
398 iter_io->cache.sg = iter_io->cache.sg ?
399 sg_next(iter_io->cache.sg) : iter_io->st->sgl;
400 iter_io->cache.i = iter_io->cache.end;
401 iter_io->cache.end += sg_dma_len(iter_io->cache.sg) >>
403 iter_io->cache.offs = sg_dma_address(iter_io->cache.sg) -
407 if (i < iter_io->cache.i) {
408 iter_io->cache.end = 0;
409 iter_io->cache.sg = NULL;
413 addr = io_mapping_map_local_wc(iter_io->iomap, iter_io->cache.offs +
414 (((resource_size_t)i - iter_io->cache.i)
416 dma_buf_map_set_vaddr_iomem(dmap, addr);
419 static void ttm_kmap_iter_iomap_unmap_local(struct ttm_kmap_iter *iter,
420 struct dma_buf_map *map)
422 io_mapping_unmap_local(map->vaddr_iomem);
425 static const struct ttm_kmap_iter_ops ttm_kmap_iter_io_ops = {
426 .map_local = ttm_kmap_iter_iomap_map_local,
427 .unmap_local = ttm_kmap_iter_iomap_unmap_local,
432 * ttm_kmap_iter_iomap_init - Initialize a struct ttm_kmap_iter_iomap
433 * @iter_io: The struct ttm_kmap_iter_iomap to initialize.
434 * @iomap: The struct io_mapping representing the underlying linear io_memory.
435 * @st: sg_table into @iomap, representing the memory of the struct
437 * @start: Offset that needs to be subtracted from @st to make
438 * sg_dma_address(st->sgl) - @start == 0 for @iomap start.
440 * Return: Pointer to the embedded struct ttm_kmap_iter.
442 struct ttm_kmap_iter *
443 ttm_kmap_iter_iomap_init(struct ttm_kmap_iter_iomap *iter_io,
444 struct io_mapping *iomap,
446 resource_size_t start)
448 iter_io->base.ops = &ttm_kmap_iter_io_ops;
449 iter_io->iomap = iomap;
451 iter_io->start = start;
452 memset(&iter_io->cache, 0, sizeof(iter_io->cache));
454 return &iter_io->base;
456 EXPORT_SYMBOL(ttm_kmap_iter_iomap_init);
459 * DOC: Linear io iterator
461 * This code should die in the not too near future. Best would be if we could
462 * make io-mapping use memremap for all io memory, and have memremap
463 * implement a kmap_local functionality. We could then strip a huge amount of
464 * code. These linear io iterators are implemented to mimic old functionality,
465 * and they don't use kmap_local semantics at all internally. Rather ioremap or
466 * friends, and at least on 32-bit they add global TLB flushes and points
470 static void ttm_kmap_iter_linear_io_map_local(struct ttm_kmap_iter *iter,
471 struct dma_buf_map *dmap,
474 struct ttm_kmap_iter_linear_io *iter_io =
475 container_of(iter, typeof(*iter_io), base);
477 *dmap = iter_io->dmap;
478 dma_buf_map_incr(dmap, i * PAGE_SIZE);
481 static const struct ttm_kmap_iter_ops ttm_kmap_iter_linear_io_ops = {
482 .map_local = ttm_kmap_iter_linear_io_map_local,
487 * ttm_kmap_iter_linear_io_init - Initialize an iterator for linear io memory
488 * @iter_io: The iterator to initialize
489 * @bdev: The TTM device
490 * @mem: The ttm resource representing the iomap.
492 * This function is for internal TTM use only. It sets up a memcpy kmap iterator
493 * pointing at a linear chunk of io memory.
495 * Return: A pointer to the embedded struct ttm_kmap_iter or error pointer on
498 struct ttm_kmap_iter *
499 ttm_kmap_iter_linear_io_init(struct ttm_kmap_iter_linear_io *iter_io,
500 struct ttm_device *bdev,
501 struct ttm_resource *mem)
505 ret = ttm_mem_io_reserve(bdev, mem);
508 if (!mem->bus.is_iomem) {
514 dma_buf_map_set_vaddr(&iter_io->dmap, mem->bus.addr);
515 iter_io->needs_unmap = false;
517 size_t bus_size = (size_t)mem->num_pages << PAGE_SHIFT;
519 iter_io->needs_unmap = true;
520 memset(&iter_io->dmap, 0, sizeof(iter_io->dmap));
521 if (mem->bus.caching == ttm_write_combined)
522 dma_buf_map_set_vaddr_iomem(&iter_io->dmap,
523 ioremap_wc(mem->bus.offset,
525 else if (mem->bus.caching == ttm_cached)
526 dma_buf_map_set_vaddr(&iter_io->dmap,
527 memremap(mem->bus.offset, bus_size,
532 /* If uncached requested or if mapping cached or wc failed */
533 if (dma_buf_map_is_null(&iter_io->dmap))
534 dma_buf_map_set_vaddr_iomem(&iter_io->dmap,
535 ioremap(mem->bus.offset,
538 if (dma_buf_map_is_null(&iter_io->dmap)) {
544 iter_io->base.ops = &ttm_kmap_iter_linear_io_ops;
545 return &iter_io->base;
548 ttm_mem_io_free(bdev, mem);
554 * ttm_kmap_iter_linear_io_fini - Clean up an iterator for linear io memory
555 * @iter_io: The iterator to initialize
556 * @bdev: The TTM device
557 * @mem: The ttm resource representing the iomap.
559 * This function is for internal TTM use only. It cleans up a memcpy kmap
560 * iterator initialized by ttm_kmap_iter_linear_io_init.
563 ttm_kmap_iter_linear_io_fini(struct ttm_kmap_iter_linear_io *iter_io,
564 struct ttm_device *bdev,
565 struct ttm_resource *mem)
567 if (iter_io->needs_unmap && dma_buf_map_is_set(&iter_io->dmap)) {
568 if (iter_io->dmap.is_iomem)
569 iounmap(iter_io->dmap.vaddr_iomem);
571 memunmap(iter_io->dmap.vaddr);
574 ttm_mem_io_free(bdev, mem);