1 /* SPDX-License-Identifier: GPL-2.0 OR MIT */
4 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
5 * Copyright 2020 Advanced Micro Devices, Inc.
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the "Software"),
9 * to deal in the Software without restriction, including without limitation
10 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
11 * and/or sell copies of the Software, and to permit persons to whom the
12 * Software is furnished to do so, subject to the following conditions:
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
21 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
23 * OTHER DEALINGS IN THE SOFTWARE.
25 * Authors: Christian König
28 #define pr_fmt(fmt) "[TTM DEVICE] " fmt
32 #include <drm/ttm/ttm_device.h>
33 #include <drm/ttm/ttm_tt.h>
34 #include <drm/ttm/ttm_placement.h>
35 #include <drm/ttm/ttm_bo_api.h>
37 #include "ttm_module.h"
40 * ttm_global_mutex - protecting the global state
42 DEFINE_MUTEX(ttm_global_mutex);
43 unsigned ttm_glob_use_count;
44 struct ttm_global ttm_glob;
45 EXPORT_SYMBOL(ttm_glob);
47 static void ttm_global_release(void)
49 struct ttm_global *glob = &ttm_glob;
51 mutex_lock(&ttm_global_mutex);
52 if (--ttm_glob_use_count > 0)
58 __free_page(glob->dummy_read_page);
59 memset(glob, 0, sizeof(*glob));
61 mutex_unlock(&ttm_global_mutex);
64 static int ttm_global_init(void)
66 struct ttm_global *glob = &ttm_glob;
67 unsigned long num_pages;
71 mutex_lock(&ttm_global_mutex);
72 if (++ttm_glob_use_count > 1)
77 /* Limit the number of pages in the pool to about 50% of the total
80 num_pages = ((u64)si.totalram * si.mem_unit) >> PAGE_SHIFT;
81 ttm_pool_mgr_init(num_pages * 50 / 100);
84 glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
86 if (unlikely(glob->dummy_read_page == NULL)) {
91 INIT_LIST_HEAD(&glob->device_list);
92 atomic_set(&glob->bo_count, 0);
94 debugfs_create_atomic_t("buffer_objects", 0444, ttm_debugfs_root,
97 mutex_unlock(&ttm_global_mutex);
102 * A buffer object shrink method that tries to swap out the first
103 * buffer object on the global::swap_lru list.
105 int ttm_global_swapout(struct ttm_operation_ctx *ctx, gfp_t gfp_flags)
107 struct ttm_global *glob = &ttm_glob;
108 struct ttm_device *bdev;
111 mutex_lock(&ttm_global_mutex);
112 list_for_each_entry(bdev, &glob->device_list, device_list) {
113 ret = ttm_device_swapout(bdev, ctx, gfp_flags);
115 list_move_tail(&bdev->device_list, &glob->device_list);
119 mutex_unlock(&ttm_global_mutex);
122 EXPORT_SYMBOL(ttm_global_swapout);
124 int ttm_device_swapout(struct ttm_device *bdev, struct ttm_operation_ctx *ctx,
127 struct ttm_resource_manager *man;
128 struct ttm_buffer_object *bo;
132 spin_lock(&bdev->lru_lock);
133 for (i = TTM_PL_SYSTEM; i < TTM_NUM_MEM_TYPES; ++i) {
134 man = ttm_manager_type(bdev, i);
135 if (!man || !man->use_tt)
138 for (j = 0; j < TTM_MAX_BO_PRIORITY; ++j) {
139 list_for_each_entry(bo, &man->lru[j], lru) {
143 bo->ttm->page_flags & TTM_PAGE_FLAG_SG ||
144 bo->ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)
147 num_pages = bo->ttm->num_pages;
148 ret = ttm_bo_swapout(bo, ctx, gfp_flags);
149 /* ttm_bo_swapout has dropped the lru_lock */
157 spin_unlock(&bdev->lru_lock);
160 EXPORT_SYMBOL(ttm_device_swapout);
162 static void ttm_init_sysman(struct ttm_device *bdev)
164 struct ttm_resource_manager *man = &bdev->sysman;
167 * Initialize the system memory buffer type.
168 * Other types need to be driver / IOCTL initialized.
172 ttm_resource_manager_init(man, 0);
173 ttm_set_driver_manager(bdev, TTM_PL_SYSTEM, man);
174 ttm_resource_manager_set_used(man, true);
177 static void ttm_device_delayed_workqueue(struct work_struct *work)
179 struct ttm_device *bdev =
180 container_of(work, struct ttm_device, wq.work);
182 if (!ttm_bo_delayed_delete(bdev, false))
183 schedule_delayed_work(&bdev->wq,
184 ((HZ / 100) < 1) ? 1 : HZ / 100);
190 * @bdev: A pointer to a struct ttm_device to initialize.
191 * @funcs: Function table for the device.
192 * @dev: The core kernel device pointer for DMA mappings and allocations.
193 * @mapping: The address space to use for this bo.
194 * @vma_manager: A pointer to a vma manager.
195 * @use_dma_alloc: If coherent DMA allocation API should be used.
196 * @use_dma32: If we should use GFP_DMA32 for device memory allocations.
198 * Initializes a struct ttm_device:
202 int ttm_device_init(struct ttm_device *bdev, struct ttm_device_funcs *funcs,
203 struct device *dev, struct address_space *mapping,
204 struct drm_vma_offset_manager *vma_manager,
205 bool use_dma_alloc, bool use_dma32)
207 struct ttm_global *glob = &ttm_glob;
210 if (WARN_ON(vma_manager == NULL))
213 ret = ttm_global_init();
219 ttm_init_sysman(bdev);
220 ttm_pool_init(&bdev->pool, dev, use_dma_alloc, use_dma32);
222 bdev->vma_manager = vma_manager;
223 INIT_DELAYED_WORK(&bdev->wq, ttm_device_delayed_workqueue);
224 spin_lock_init(&bdev->lru_lock);
225 INIT_LIST_HEAD(&bdev->ddestroy);
226 bdev->dev_mapping = mapping;
227 mutex_lock(&ttm_global_mutex);
228 list_add_tail(&bdev->device_list, &glob->device_list);
229 mutex_unlock(&ttm_global_mutex);
233 EXPORT_SYMBOL(ttm_device_init);
235 void ttm_device_fini(struct ttm_device *bdev)
237 struct ttm_resource_manager *man;
240 man = ttm_manager_type(bdev, TTM_PL_SYSTEM);
241 ttm_resource_manager_set_used(man, false);
242 ttm_set_driver_manager(bdev, TTM_PL_SYSTEM, NULL);
244 mutex_lock(&ttm_global_mutex);
245 list_del(&bdev->device_list);
246 mutex_unlock(&ttm_global_mutex);
248 cancel_delayed_work_sync(&bdev->wq);
250 if (ttm_bo_delayed_delete(bdev, true))
251 pr_debug("Delayed destroy list was clean\n");
253 spin_lock(&bdev->lru_lock);
254 for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
255 if (list_empty(&man->lru[0]))
256 pr_debug("Swap list %d was clean\n", i);
257 spin_unlock(&bdev->lru_lock);
259 ttm_pool_fini(&bdev->pool);
260 ttm_global_release();
262 EXPORT_SYMBOL(ttm_device_fini);