2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
28 #ifndef __AMDGPU_OBJECT_H__
29 #define __AMDGPU_OBJECT_H__
31 #include <drm/amdgpu_drm.h>
33 #include "amdgpu_res_cursor.h"
35 #ifdef CONFIG_MMU_NOTIFIER
36 #include <linux/mmu_notifier.h>
39 #define AMDGPU_BO_INVALID_OFFSET LONG_MAX
40 #define AMDGPU_BO_MAX_PLACEMENTS 3
42 /* BO flag to indicate a KFD userptr BO */
43 #define AMDGPU_AMDKFD_CREATE_USERPTR_BO (1ULL << 63)
45 #define to_amdgpu_bo_user(abo) container_of((abo), struct amdgpu_bo_user, bo)
46 #define to_amdgpu_bo_vm(abo) container_of((abo), struct amdgpu_bo_vm, bo)
48 struct amdgpu_bo_param {
55 enum ttm_bo_type type;
57 struct dma_resv *resv;
58 void (*destroy)(struct ttm_buffer_object *bo);
59 /* xcp partition number plus 1, 0 means any partition */
63 /* bo virtual addresses in a vm */
64 struct amdgpu_bo_va_mapping {
65 struct amdgpu_bo_va *bo_va;
66 struct list_head list;
70 uint64_t __subtree_last;
75 /* User space allocated BO in a VM */
77 struct amdgpu_vm_bo_base base;
79 /* protected by bo being reserved */
82 /* all other members protected by the VM PD being reserved */
83 struct dma_fence *last_pt_update;
85 /* mappings for this bo_va */
86 struct list_head invalids;
87 struct list_head valids;
89 /* If the mappings are cleared or filled */
96 /* Protected by tbo.reserved */
97 u32 preferred_domains;
99 struct ttm_place placements[AMDGPU_BO_MAX_PLACEMENTS];
100 struct ttm_placement placement;
101 struct ttm_buffer_object tbo;
102 struct ttm_bo_kmap_obj kmap;
104 /* per VM structure for page tables and with virtual addresses */
105 struct amdgpu_vm_bo_base *vm_bo;
106 /* Constant after initialization */
107 struct amdgpu_bo *parent;
109 #ifdef CONFIG_MMU_NOTIFIER
110 struct mmu_interval_notifier notifier;
112 struct kgd_mem *kfd_bo;
115 * For GPUs with spatial partitioning, xcp partition number, -1 means
116 * any partition. For other ASICs without spatial partition, always 0
117 * for memory accounting.
122 struct amdgpu_bo_user {
131 struct amdgpu_bo_vm {
133 struct amdgpu_bo *shadow;
134 struct list_head shadow_list;
135 struct amdgpu_vm_bo_base entries[];
138 struct amdgpu_mem_stats {
139 /* current VRAM usage, includes visible VRAM */
141 /* current shared VRAM usage, includes visible VRAM */
142 uint64_t vram_shared;
143 /* current visible VRAM usage */
144 uint64_t visible_vram;
145 /* current GTT usage */
147 /* current shared GTT usage */
149 /* current system memory usage */
151 /* current shared system memory usage */
153 /* sum of evicted buffers, includes visible VRAM */
154 uint64_t evicted_vram;
155 /* sum of evicted buffers due to CPU access */
156 uint64_t evicted_visible_vram;
157 /* how much userspace asked for, includes vis.VRAM */
158 uint64_t requested_vram;
159 /* how much userspace asked for */
160 uint64_t requested_visible_vram;
161 /* how much userspace asked for */
162 uint64_t requested_gtt;
165 static inline struct amdgpu_bo *ttm_to_amdgpu_bo(struct ttm_buffer_object *tbo)
167 return container_of(tbo, struct amdgpu_bo, tbo);
171 * amdgpu_mem_type_to_domain - return domain corresponding to mem_type
172 * @mem_type: ttm memory type
174 * Returns corresponding domain of the ttm mem_type
176 static inline unsigned amdgpu_mem_type_to_domain(u32 mem_type)
180 return AMDGPU_GEM_DOMAIN_VRAM;
182 return AMDGPU_GEM_DOMAIN_GTT;
184 return AMDGPU_GEM_DOMAIN_CPU;
186 return AMDGPU_GEM_DOMAIN_GDS;
188 return AMDGPU_GEM_DOMAIN_GWS;
190 return AMDGPU_GEM_DOMAIN_OA;
191 case AMDGPU_PL_DOORBELL:
192 return AMDGPU_GEM_DOMAIN_DOORBELL;
200 * amdgpu_bo_reserve - reserve bo
202 * @no_intr: don't return -ERESTARTSYS on pending signal
205 * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
206 * a signal. Release all buffer reservations and return to user-space.
208 static inline int amdgpu_bo_reserve(struct amdgpu_bo *bo, bool no_intr)
210 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
213 r = ttm_bo_reserve(&bo->tbo, !no_intr, false, NULL);
214 if (unlikely(r != 0)) {
215 if (r != -ERESTARTSYS)
216 dev_err(adev->dev, "%p reserve failed\n", bo);
222 static inline void amdgpu_bo_unreserve(struct amdgpu_bo *bo)
224 ttm_bo_unreserve(&bo->tbo);
227 static inline unsigned long amdgpu_bo_size(struct amdgpu_bo *bo)
229 return bo->tbo.base.size;
232 static inline unsigned amdgpu_bo_ngpu_pages(struct amdgpu_bo *bo)
234 return bo->tbo.base.size / AMDGPU_GPU_PAGE_SIZE;
237 static inline unsigned amdgpu_bo_gpu_page_alignment(struct amdgpu_bo *bo)
239 return (bo->tbo.page_alignment << PAGE_SHIFT) / AMDGPU_GPU_PAGE_SIZE;
243 * amdgpu_bo_mmap_offset - return mmap offset of bo
244 * @bo: amdgpu object for which we query the offset
246 * Returns mmap offset of the object.
248 static inline u64 amdgpu_bo_mmap_offset(struct amdgpu_bo *bo)
250 return drm_vma_node_offset_addr(&bo->tbo.base.vma_node);
254 * amdgpu_bo_in_cpu_visible_vram - check if BO is (partly) in visible VRAM
256 static inline bool amdgpu_bo_in_cpu_visible_vram(struct amdgpu_bo *bo)
258 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
259 struct amdgpu_res_cursor cursor;
261 if (!bo->tbo.resource || bo->tbo.resource->mem_type != TTM_PL_VRAM)
264 amdgpu_res_first(bo->tbo.resource, 0, amdgpu_bo_size(bo), &cursor);
265 while (cursor.remaining) {
266 if (cursor.start < adev->gmc.visible_vram_size)
269 amdgpu_res_next(&cursor, cursor.size);
276 * amdgpu_bo_explicit_sync - return whether the bo is explicitly synced
278 static inline bool amdgpu_bo_explicit_sync(struct amdgpu_bo *bo)
280 return bo->flags & AMDGPU_GEM_CREATE_EXPLICIT_SYNC;
284 * amdgpu_bo_encrypted - test if the BO is encrypted
285 * @bo: pointer to a buffer object
287 * Return true if the buffer object is encrypted, false otherwise.
289 static inline bool amdgpu_bo_encrypted(struct amdgpu_bo *bo)
291 return bo->flags & AMDGPU_GEM_CREATE_ENCRYPTED;
295 * amdgpu_bo_shadowed - check if the BO is shadowed
297 * @bo: BO to be tested.
300 * NULL if not shadowed or else return a BO pointer.
302 static inline struct amdgpu_bo *amdgpu_bo_shadowed(struct amdgpu_bo *bo)
304 if (bo->tbo.type == ttm_bo_type_kernel)
305 return to_amdgpu_bo_vm(bo)->shadow;
310 bool amdgpu_bo_is_amdgpu_bo(struct ttm_buffer_object *bo);
311 void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain);
313 int amdgpu_bo_create(struct amdgpu_device *adev,
314 struct amdgpu_bo_param *bp,
315 struct amdgpu_bo **bo_ptr);
316 int amdgpu_bo_create_reserved(struct amdgpu_device *adev,
317 unsigned long size, int align,
318 u32 domain, struct amdgpu_bo **bo_ptr,
319 u64 *gpu_addr, void **cpu_addr);
320 int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
321 unsigned long size, int align,
322 u32 domain, struct amdgpu_bo **bo_ptr,
323 u64 *gpu_addr, void **cpu_addr);
324 int amdgpu_bo_create_kernel_at(struct amdgpu_device *adev,
325 uint64_t offset, uint64_t size,
326 struct amdgpu_bo **bo_ptr, void **cpu_addr);
327 int amdgpu_bo_create_user(struct amdgpu_device *adev,
328 struct amdgpu_bo_param *bp,
329 struct amdgpu_bo_user **ubo_ptr);
330 int amdgpu_bo_create_vm(struct amdgpu_device *adev,
331 struct amdgpu_bo_param *bp,
332 struct amdgpu_bo_vm **ubo_ptr);
333 void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr,
335 int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr);
336 void *amdgpu_bo_kptr(struct amdgpu_bo *bo);
337 void amdgpu_bo_kunmap(struct amdgpu_bo *bo);
338 struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo);
339 void amdgpu_bo_unref(struct amdgpu_bo **bo);
340 int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain);
341 int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
342 u64 min_offset, u64 max_offset);
343 void amdgpu_bo_unpin(struct amdgpu_bo *bo);
344 int amdgpu_bo_init(struct amdgpu_device *adev);
345 void amdgpu_bo_fini(struct amdgpu_device *adev);
346 int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags);
347 void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags);
348 int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata,
349 uint32_t metadata_size, uint64_t flags);
350 int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
351 size_t buffer_size, uint32_t *metadata_size,
353 void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, bool evict);
354 void amdgpu_bo_release_notify(struct ttm_buffer_object *bo);
355 vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
356 void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
358 int amdgpu_bo_sync_wait_resv(struct amdgpu_device *adev, struct dma_resv *resv,
359 enum amdgpu_sync_mode sync_mode, void *owner,
361 int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr);
362 u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo);
363 u64 amdgpu_bo_gpu_offset_no_check(struct amdgpu_bo *bo);
364 void amdgpu_bo_get_memory(struct amdgpu_bo *bo,
365 struct amdgpu_mem_stats *stats);
366 void amdgpu_bo_add_to_shadow_list(struct amdgpu_bo_vm *vmbo);
367 int amdgpu_bo_restore_shadow(struct amdgpu_bo *shadow,
368 struct dma_fence **fence);
369 uint32_t amdgpu_bo_get_preferred_domain(struct amdgpu_device *adev,
375 static inline struct amdgpu_sa_manager *
376 to_amdgpu_sa_manager(struct drm_suballoc_manager *manager)
378 return container_of(manager, struct amdgpu_sa_manager, base);
381 static inline uint64_t amdgpu_sa_bo_gpu_addr(struct drm_suballoc *sa_bo)
383 return to_amdgpu_sa_manager(sa_bo->manager)->gpu_addr +
384 drm_suballoc_soffset(sa_bo);
387 static inline void *amdgpu_sa_bo_cpu_addr(struct drm_suballoc *sa_bo)
389 return to_amdgpu_sa_manager(sa_bo->manager)->cpu_ptr +
390 drm_suballoc_soffset(sa_bo);
393 int amdgpu_sa_bo_manager_init(struct amdgpu_device *adev,
394 struct amdgpu_sa_manager *sa_manager,
395 unsigned size, u32 align, u32 domain);
396 void amdgpu_sa_bo_manager_fini(struct amdgpu_device *adev,
397 struct amdgpu_sa_manager *sa_manager);
398 int amdgpu_sa_bo_manager_start(struct amdgpu_device *adev,
399 struct amdgpu_sa_manager *sa_manager);
400 int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
401 struct drm_suballoc **sa_bo,
403 void amdgpu_sa_bo_free(struct amdgpu_device *adev,
404 struct drm_suballoc **sa_bo,
405 struct dma_fence *fence);
406 #if defined(CONFIG_DEBUG_FS)
407 void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager,
409 u64 amdgpu_bo_print_info(int id, struct amdgpu_bo *bo, struct seq_file *m);
411 void amdgpu_debugfs_sa_init(struct amdgpu_device *adev);
413 bool amdgpu_bo_support_uswc(u64 bo_flags);