2 * Copyright 2009 Jerome Glisse.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
28 * Jerome Glisse <glisse@freedesktop.org>
29 * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
33 #include <linux/dma-mapping.h>
34 #include <linux/iommu.h>
35 #include <linux/hmm.h>
36 #include <linux/pagemap.h>
37 #include <linux/sched/task.h>
38 #include <linux/sched/mm.h>
39 #include <linux/seq_file.h>
40 #include <linux/slab.h>
41 #include <linux/swap.h>
42 #include <linux/swiotlb.h>
43 #include <linux/dma-buf.h>
44 #include <linux/sizes.h>
46 #include <drm/ttm/ttm_bo_api.h>
47 #include <drm/ttm/ttm_bo_driver.h>
48 #include <drm/ttm/ttm_placement.h>
49 #include <drm/ttm/ttm_module.h>
50 #include <drm/ttm/ttm_page_alloc.h>
52 #include <drm/drm_debugfs.h>
53 #include <drm/amdgpu_drm.h>
56 #include "amdgpu_object.h"
57 #include "amdgpu_trace.h"
58 #include "amdgpu_amdkfd.h"
59 #include "amdgpu_sdma.h"
60 #include "amdgpu_ras.h"
61 #include "amdgpu_atomfirmware.h"
62 #include "bif/bif_4_1_d.h"
64 #define AMDGPU_TTM_VRAM_MAX_DW_READ (size_t)128
66 static int amdgpu_ttm_init_on_chip(struct amdgpu_device *adev,
70 return ttm_range_man_init(&adev->mman.bdev, type,
71 false, size >> PAGE_SHIFT);
75 * amdgpu_evict_flags - Compute placement flags
77 * @bo: The buffer object to evict
78 * @placement: Possible destination(s) for evicted BO
80 * Fill in placement data when ttm_bo_evict() is called
82 static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
83 struct ttm_placement *placement)
85 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
86 struct amdgpu_bo *abo;
87 static const struct ttm_place placements = {
90 .mem_type = TTM_PL_SYSTEM,
91 .flags = TTM_PL_MASK_CACHING
94 /* Don't handle scatter gather BOs */
95 if (bo->type == ttm_bo_type_sg) {
96 placement->num_placement = 0;
97 placement->num_busy_placement = 0;
101 /* Object isn't an AMDGPU object so ignore */
102 if (!amdgpu_bo_is_amdgpu_bo(bo)) {
103 placement->placement = &placements;
104 placement->busy_placement = &placements;
105 placement->num_placement = 1;
106 placement->num_busy_placement = 1;
110 abo = ttm_to_amdgpu_bo(bo);
111 switch (bo->mem.mem_type) {
115 placement->num_placement = 0;
116 placement->num_busy_placement = 0;
120 if (!adev->mman.buffer_funcs_enabled) {
121 /* Move to system memory */
122 amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
123 } else if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
124 !(abo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) &&
125 amdgpu_bo_in_cpu_visible_vram(abo)) {
127 /* Try evicting to the CPU inaccessible part of VRAM
128 * first, but only set GTT as busy placement, so this
129 * BO will be evicted to GTT rather than causing other
130 * BOs to be evicted from VRAM
132 amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM |
133 AMDGPU_GEM_DOMAIN_GTT);
134 abo->placements[0].fpfn = adev->gmc.visible_vram_size >> PAGE_SHIFT;
135 abo->placements[0].lpfn = 0;
136 abo->placement.busy_placement = &abo->placements[1];
137 abo->placement.num_busy_placement = 1;
139 /* Move to GTT memory */
140 amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT);
145 amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
148 *placement = abo->placement;
152 * amdgpu_verify_access - Verify access for a mmap call
154 * @bo: The buffer object to map
155 * @filp: The file pointer from the process performing the mmap
157 * This is called by ttm_bo_mmap() to verify whether a process
158 * has the right to mmap a BO to their process space.
160 static int amdgpu_verify_access(struct ttm_buffer_object *bo, struct file *filp)
162 struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
165 * Don't verify access for KFD BOs. They don't have a GEM
166 * object associated with them.
171 if (amdgpu_ttm_tt_get_usermm(bo->ttm))
173 return drm_vma_node_verify_access(&abo->tbo.base.vma_node,
178 * amdgpu_mm_node_addr - Compute the GPU relative offset of a GTT buffer.
180 * @bo: The bo to assign the memory to.
181 * @mm_node: Memory manager node for drm allocator.
182 * @mem: The region where the bo resides.
185 static uint64_t amdgpu_mm_node_addr(struct ttm_buffer_object *bo,
186 struct drm_mm_node *mm_node,
187 struct ttm_resource *mem)
191 if (mm_node->start != AMDGPU_BO_INVALID_OFFSET) {
192 addr = mm_node->start << PAGE_SHIFT;
193 addr += amdgpu_ttm_domain_start(amdgpu_ttm_adev(bo->bdev),
200 * amdgpu_find_mm_node - Helper function finds the drm_mm_node corresponding to
201 * @offset. It also modifies the offset to be within the drm_mm_node returned
203 * @mem: The region where the bo resides.
204 * @offset: The offset that drm_mm_node is used for finding.
207 static struct drm_mm_node *amdgpu_find_mm_node(struct ttm_resource *mem,
210 struct drm_mm_node *mm_node = mem->mm_node;
212 while (*offset >= (mm_node->size << PAGE_SHIFT)) {
213 *offset -= (mm_node->size << PAGE_SHIFT);
220 * amdgpu_ttm_map_buffer - Map memory into the GART windows
221 * @bo: buffer object to map
222 * @mem: memory object to map
223 * @mm_node: drm_mm node object to map
224 * @num_pages: number of pages to map
225 * @offset: offset into @mm_node where to start
226 * @window: which GART window to use
227 * @ring: DMA ring to use for the copy
228 * @tmz: if we should setup a TMZ enabled mapping
229 * @addr: resulting address inside the MC address space
231 * Setup one of the GART windows to access a specific piece of memory or return
232 * the physical address for local memory.
234 static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo,
235 struct ttm_resource *mem,
236 struct drm_mm_node *mm_node,
237 unsigned num_pages, uint64_t offset,
238 unsigned window, struct amdgpu_ring *ring,
239 bool tmz, uint64_t *addr)
241 struct amdgpu_device *adev = ring->adev;
242 struct amdgpu_job *job;
243 unsigned num_dw, num_bytes;
244 struct dma_fence *fence;
245 uint64_t src_addr, dst_addr;
251 BUG_ON(adev->mman.buffer_funcs->copy_max_bytes <
252 AMDGPU_GTT_MAX_TRANSFER_SIZE * 8);
254 /* Map only what can't be accessed directly */
255 if (!tmz && mem->start != AMDGPU_BO_INVALID_OFFSET) {
256 *addr = amdgpu_mm_node_addr(bo, mm_node, mem) + offset;
260 *addr = adev->gmc.gart_start;
261 *addr += (u64)window * AMDGPU_GTT_MAX_TRANSFER_SIZE *
262 AMDGPU_GPU_PAGE_SIZE;
263 *addr += offset & ~PAGE_MASK;
265 num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8);
266 num_bytes = num_pages * 8;
268 r = amdgpu_job_alloc_with_ib(adev, num_dw * 4 + num_bytes,
269 AMDGPU_IB_POOL_DELAYED, &job);
273 src_addr = num_dw * 4;
274 src_addr += job->ibs[0].gpu_addr;
276 dst_addr = amdgpu_bo_gpu_offset(adev->gart.bo);
277 dst_addr += window * AMDGPU_GTT_MAX_TRANSFER_SIZE * 8;
278 amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr,
279 dst_addr, num_bytes, false);
281 amdgpu_ring_pad_ib(ring, &job->ibs[0]);
282 WARN_ON(job->ibs[0].length_dw > num_dw);
284 flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, mem);
286 flags |= AMDGPU_PTE_TMZ;
288 cpu_addr = &job->ibs[0].ptr[num_dw];
290 if (mem->mem_type == TTM_PL_TT) {
291 struct ttm_dma_tt *dma;
292 dma_addr_t *dma_address;
294 dma = container_of(bo->ttm, struct ttm_dma_tt, ttm);
295 dma_address = &dma->dma_address[offset >> PAGE_SHIFT];
296 r = amdgpu_gart_map(adev, 0, num_pages, dma_address, flags,
301 dma_addr_t dma_address;
303 dma_address = (mm_node->start << PAGE_SHIFT) + offset;
304 dma_address += adev->vm_manager.vram_base_offset;
306 for (i = 0; i < num_pages; ++i) {
307 r = amdgpu_gart_map(adev, i << PAGE_SHIFT, 1,
308 &dma_address, flags, cpu_addr);
312 dma_address += PAGE_SIZE;
316 r = amdgpu_job_submit(job, &adev->mman.entity,
317 AMDGPU_FENCE_OWNER_UNDEFINED, &fence);
321 dma_fence_put(fence);
326 amdgpu_job_free(job);
331 * amdgpu_copy_ttm_mem_to_mem - Helper function for copy
332 * @adev: amdgpu device
333 * @src: buffer/address where to read from
334 * @dst: buffer/address where to write to
335 * @size: number of bytes to copy
336 * @tmz: if a secure copy should be used
337 * @resv: resv object to sync to
338 * @f: Returns the last fence if multiple jobs are submitted.
340 * The function copies @size bytes from {src->mem + src->offset} to
341 * {dst->mem + dst->offset}. src->bo and dst->bo could be same BO for a
342 * move and different for a BO to BO copy.
345 int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
346 const struct amdgpu_copy_mem *src,
347 const struct amdgpu_copy_mem *dst,
348 uint64_t size, bool tmz,
349 struct dma_resv *resv,
350 struct dma_fence **f)
352 const uint32_t GTT_MAX_BYTES = (AMDGPU_GTT_MAX_TRANSFER_SIZE *
353 AMDGPU_GPU_PAGE_SIZE);
355 uint64_t src_node_size, dst_node_size, src_offset, dst_offset;
356 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
357 struct drm_mm_node *src_mm, *dst_mm;
358 struct dma_fence *fence = NULL;
361 if (!adev->mman.buffer_funcs_enabled) {
362 DRM_ERROR("Trying to move memory with ring turned off.\n");
366 src_offset = src->offset;
367 if (src->mem->mm_node) {
368 src_mm = amdgpu_find_mm_node(src->mem, &src_offset);
369 src_node_size = (src_mm->size << PAGE_SHIFT) - src_offset;
372 src_node_size = ULLONG_MAX;
375 dst_offset = dst->offset;
376 if (dst->mem->mm_node) {
377 dst_mm = amdgpu_find_mm_node(dst->mem, &dst_offset);
378 dst_node_size = (dst_mm->size << PAGE_SHIFT) - dst_offset;
381 dst_node_size = ULLONG_MAX;
384 mutex_lock(&adev->mman.gtt_window_lock);
387 uint32_t src_page_offset = src_offset & ~PAGE_MASK;
388 uint32_t dst_page_offset = dst_offset & ~PAGE_MASK;
389 struct dma_fence *next;
393 /* Copy size cannot exceed GTT_MAX_BYTES. So if src or dst
394 * begins at an offset, then adjust the size accordingly
396 cur_size = max(src_page_offset, dst_page_offset);
397 cur_size = min(min3(src_node_size, dst_node_size, size),
398 (uint64_t)(GTT_MAX_BYTES - cur_size));
400 /* Map src to window 0 and dst to window 1. */
401 r = amdgpu_ttm_map_buffer(src->bo, src->mem, src_mm,
402 PFN_UP(cur_size + src_page_offset),
403 src_offset, 0, ring, tmz, &from);
407 r = amdgpu_ttm_map_buffer(dst->bo, dst->mem, dst_mm,
408 PFN_UP(cur_size + dst_page_offset),
409 dst_offset, 1, ring, tmz, &to);
413 r = amdgpu_copy_buffer(ring, from, to, cur_size,
414 resv, &next, false, true, tmz);
418 dma_fence_put(fence);
425 src_node_size -= cur_size;
426 if (!src_node_size) {
428 src_node_size = src_mm->size << PAGE_SHIFT;
431 src_offset += cur_size;
434 dst_node_size -= cur_size;
435 if (!dst_node_size) {
437 dst_node_size = dst_mm->size << PAGE_SHIFT;
440 dst_offset += cur_size;
444 mutex_unlock(&adev->mman.gtt_window_lock);
446 *f = dma_fence_get(fence);
447 dma_fence_put(fence);
452 * amdgpu_move_blit - Copy an entire buffer to another buffer
454 * This is a helper called by amdgpu_bo_move() and amdgpu_move_vram_ram() to
455 * help move buffers to and from VRAM.
457 static int amdgpu_move_blit(struct ttm_buffer_object *bo,
459 struct ttm_resource *new_mem,
460 struct ttm_resource *old_mem)
462 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
463 struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
464 struct amdgpu_copy_mem src, dst;
465 struct dma_fence *fence = NULL;
475 r = amdgpu_ttm_copy_mem_to_mem(adev, &src, &dst,
476 new_mem->num_pages << PAGE_SHIFT,
477 amdgpu_bo_encrypted(abo),
478 bo->base.resv, &fence);
482 /* clear the space being freed */
483 if (old_mem->mem_type == TTM_PL_VRAM &&
484 (abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE)) {
485 struct dma_fence *wipe_fence = NULL;
487 r = amdgpu_fill_buffer(ttm_to_amdgpu_bo(bo), AMDGPU_POISON,
491 } else if (wipe_fence) {
492 dma_fence_put(fence);
497 /* Always block for VM page tables before committing the new location */
498 if (bo->type == ttm_bo_type_kernel)
499 r = ttm_bo_move_accel_cleanup(bo, fence, true, new_mem);
501 r = ttm_bo_pipeline_move(bo, fence, evict, new_mem);
502 dma_fence_put(fence);
507 dma_fence_wait(fence, false);
508 dma_fence_put(fence);
513 * amdgpu_move_vram_ram - Copy VRAM buffer to RAM buffer
515 * Called by amdgpu_bo_move().
517 static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo, bool evict,
518 struct ttm_operation_ctx *ctx,
519 struct ttm_resource *new_mem)
521 struct ttm_resource *old_mem = &bo->mem;
522 struct ttm_resource tmp_mem;
523 struct ttm_place placements;
524 struct ttm_placement placement;
527 /* create space/pages for new_mem in GTT space */
529 tmp_mem.mm_node = NULL;
530 placement.num_placement = 1;
531 placement.placement = &placements;
532 placement.num_busy_placement = 1;
533 placement.busy_placement = &placements;
536 placements.mem_type = TTM_PL_TT;
537 placements.flags = TTM_PL_MASK_CACHING;
538 r = ttm_bo_mem_space(bo, &placement, &tmp_mem, ctx);
540 pr_err("Failed to find GTT space for blit from VRAM\n");
544 /* set caching flags */
545 r = ttm_tt_set_placement_caching(bo->ttm, tmp_mem.placement);
550 /* Bind the memory to the GTT space */
551 r = ttm_tt_bind(bo->bdev, bo->ttm, &tmp_mem, ctx);
556 /* blit VRAM to GTT */
557 r = amdgpu_move_blit(bo, evict, &tmp_mem, old_mem);
562 /* move BO (in tmp_mem) to new_mem */
563 r = ttm_bo_move_ttm(bo, ctx, new_mem);
565 ttm_resource_free(bo, &tmp_mem);
570 * amdgpu_move_ram_vram - Copy buffer from RAM to VRAM
572 * Called by amdgpu_bo_move().
574 static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo, bool evict,
575 struct ttm_operation_ctx *ctx,
576 struct ttm_resource *new_mem)
578 struct ttm_resource *old_mem = &bo->mem;
579 struct ttm_resource tmp_mem;
580 struct ttm_placement placement;
581 struct ttm_place placements;
584 /* make space in GTT for old_mem buffer */
586 tmp_mem.mm_node = NULL;
587 placement.num_placement = 1;
588 placement.placement = &placements;
589 placement.num_busy_placement = 1;
590 placement.busy_placement = &placements;
593 placements.mem_type = TTM_PL_TT;
594 placements.flags = TTM_PL_MASK_CACHING;
595 r = ttm_bo_mem_space(bo, &placement, &tmp_mem, ctx);
597 pr_err("Failed to find GTT space for blit to VRAM\n");
601 /* move/bind old memory to GTT space */
602 r = ttm_bo_move_ttm(bo, ctx, &tmp_mem);
608 r = amdgpu_move_blit(bo, evict, new_mem, old_mem);
613 ttm_resource_free(bo, &tmp_mem);
618 * amdgpu_mem_visible - Check that memory can be accessed by ttm_bo_move_memcpy
620 * Called by amdgpu_bo_move()
622 static bool amdgpu_mem_visible(struct amdgpu_device *adev,
623 struct ttm_resource *mem)
625 struct drm_mm_node *nodes = mem->mm_node;
627 if (mem->mem_type == TTM_PL_SYSTEM ||
628 mem->mem_type == TTM_PL_TT)
630 if (mem->mem_type != TTM_PL_VRAM)
633 /* ttm_resource_ioremap only supports contiguous memory */
634 if (nodes->size != mem->num_pages)
637 return ((nodes->start + nodes->size) << PAGE_SHIFT)
638 <= adev->gmc.visible_vram_size;
642 * amdgpu_bo_move - Move a buffer object to a new memory location
644 * Called by ttm_bo_handle_move_mem()
646 static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
647 struct ttm_operation_ctx *ctx,
648 struct ttm_resource *new_mem)
650 struct amdgpu_device *adev;
651 struct amdgpu_bo *abo;
652 struct ttm_resource *old_mem = &bo->mem;
655 /* Can't move a pinned BO */
656 abo = ttm_to_amdgpu_bo(bo);
657 if (WARN_ON_ONCE(abo->pin_count > 0))
660 adev = amdgpu_ttm_adev(bo->bdev);
662 if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
663 ttm_bo_move_null(bo, new_mem);
666 if ((old_mem->mem_type == TTM_PL_TT &&
667 new_mem->mem_type == TTM_PL_SYSTEM) ||
668 (old_mem->mem_type == TTM_PL_SYSTEM &&
669 new_mem->mem_type == TTM_PL_TT)) {
671 ttm_bo_move_null(bo, new_mem);
674 if (old_mem->mem_type == AMDGPU_PL_GDS ||
675 old_mem->mem_type == AMDGPU_PL_GWS ||
676 old_mem->mem_type == AMDGPU_PL_OA ||
677 new_mem->mem_type == AMDGPU_PL_GDS ||
678 new_mem->mem_type == AMDGPU_PL_GWS ||
679 new_mem->mem_type == AMDGPU_PL_OA) {
680 /* Nothing to save here */
681 ttm_bo_move_null(bo, new_mem);
685 if (!adev->mman.buffer_funcs_enabled) {
690 if (old_mem->mem_type == TTM_PL_VRAM &&
691 new_mem->mem_type == TTM_PL_SYSTEM) {
692 r = amdgpu_move_vram_ram(bo, evict, ctx, new_mem);
693 } else if (old_mem->mem_type == TTM_PL_SYSTEM &&
694 new_mem->mem_type == TTM_PL_VRAM) {
695 r = amdgpu_move_ram_vram(bo, evict, ctx, new_mem);
697 r = amdgpu_move_blit(bo, evict,
703 /* Check that all memory is CPU accessible */
704 if (!amdgpu_mem_visible(adev, old_mem) ||
705 !amdgpu_mem_visible(adev, new_mem)) {
706 pr_err("Move buffer fallback to memcpy unavailable\n");
710 r = ttm_bo_move_memcpy(bo, ctx, new_mem);
715 if (bo->type == ttm_bo_type_device &&
716 new_mem->mem_type == TTM_PL_VRAM &&
717 old_mem->mem_type != TTM_PL_VRAM) {
718 /* amdgpu_bo_fault_reserve_notify will re-set this if the CPU
719 * accesses the BO after it's moved.
721 abo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
724 /* update statistics */
725 atomic64_add((u64)bo->num_pages << PAGE_SHIFT, &adev->num_bytes_moved);
730 * amdgpu_ttm_io_mem_reserve - Reserve a block of memory during a fault
732 * Called by ttm_mem_io_reserve() ultimately via ttm_bo_vm_fault()
734 static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_resource *mem)
736 struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
737 struct drm_mm_node *mm_node = mem->mm_node;
738 size_t bus_size = (size_t)mem->num_pages << PAGE_SHIFT;
740 switch (mem->mem_type) {
747 mem->bus.offset = mem->start << PAGE_SHIFT;
748 /* check if it's visible */
749 if ((mem->bus.offset + bus_size) > adev->gmc.visible_vram_size)
751 /* Only physically contiguous buffers apply. In a contiguous
752 * buffer, size of the first mm_node would match the number of
753 * pages in ttm_resource.
755 if (adev->mman.aper_base_kaddr &&
756 (mm_node->size == mem->num_pages))
757 mem->bus.addr = (u8 *)adev->mman.aper_base_kaddr +
760 mem->bus.offset += adev->gmc.aper_base;
761 mem->bus.is_iomem = true;
769 static unsigned long amdgpu_ttm_io_mem_pfn(struct ttm_buffer_object *bo,
770 unsigned long page_offset)
772 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
773 uint64_t offset = (page_offset << PAGE_SHIFT);
774 struct drm_mm_node *mm;
776 mm = amdgpu_find_mm_node(&bo->mem, &offset);
777 offset += adev->gmc.aper_base;
778 return mm->start + (offset >> PAGE_SHIFT);
782 * amdgpu_ttm_domain_start - Returns GPU start address
783 * @adev: amdgpu device object
784 * @type: type of the memory
787 * GPU start address of a memory domain
790 uint64_t amdgpu_ttm_domain_start(struct amdgpu_device *adev, uint32_t type)
794 return adev->gmc.gart_start;
796 return adev->gmc.vram_start;
803 * TTM backend functions.
805 struct amdgpu_ttm_tt {
806 struct ttm_dma_tt ttm;
807 struct drm_gem_object *gobj;
810 struct task_struct *usertask;
812 #if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR)
813 struct hmm_range *range;
817 #ifdef CONFIG_DRM_AMDGPU_USERPTR
819 * amdgpu_ttm_tt_get_user_pages - get device accessible pages that back user
820 * memory and start HMM tracking CPU page table update
822 * Calling function must call amdgpu_ttm_tt_userptr_range_done() once and only
823 * once afterwards to stop HMM tracking
825 int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages)
827 struct ttm_tt *ttm = bo->tbo.ttm;
828 struct amdgpu_ttm_tt *gtt = (void *)ttm;
829 unsigned long start = gtt->userptr;
830 struct vm_area_struct *vma;
831 struct hmm_range *range;
832 unsigned long timeout;
833 struct mm_struct *mm;
837 mm = bo->notifier.mm;
839 DRM_DEBUG_DRIVER("BO is not registered?\n");
843 /* Another get_user_pages is running at the same time?? */
844 if (WARN_ON(gtt->range))
847 if (!mmget_not_zero(mm)) /* Happens during process shutdown */
850 range = kzalloc(sizeof(*range), GFP_KERNEL);
851 if (unlikely(!range)) {
855 range->notifier = &bo->notifier;
856 range->start = bo->notifier.interval_tree.start;
857 range->end = bo->notifier.interval_tree.last + 1;
858 range->default_flags = HMM_PFN_REQ_FAULT;
859 if (!amdgpu_ttm_tt_is_readonly(ttm))
860 range->default_flags |= HMM_PFN_REQ_WRITE;
862 range->hmm_pfns = kvmalloc_array(ttm->num_pages,
863 sizeof(*range->hmm_pfns), GFP_KERNEL);
864 if (unlikely(!range->hmm_pfns)) {
866 goto out_free_ranges;
870 vma = find_vma(mm, start);
871 if (unlikely(!vma || start < vma->vm_start)) {
875 if (unlikely((gtt->userflags & AMDGPU_GEM_USERPTR_ANONONLY) &&
880 mmap_read_unlock(mm);
881 timeout = jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
884 range->notifier_seq = mmu_interval_read_begin(&bo->notifier);
887 r = hmm_range_fault(range);
888 mmap_read_unlock(mm);
891 * FIXME: This timeout should encompass the retry from
892 * mmu_interval_read_retry() as well.
894 if (r == -EBUSY && !time_after(jiffies, timeout))
900 * Due to default_flags, all pages are HMM_PFN_VALID or
901 * hmm_range_fault() fails. FIXME: The pages cannot be touched outside
902 * the notifier_lock, and mmu_interval_read_retry() must be done first.
904 for (i = 0; i < ttm->num_pages; i++)
905 pages[i] = hmm_pfn_to_page(range->hmm_pfns[i]);
913 mmap_read_unlock(mm);
915 kvfree(range->hmm_pfns);
924 * amdgpu_ttm_tt_userptr_range_done - stop HMM track the CPU page table change
925 * Check if the pages backing this ttm range have been invalidated
927 * Returns: true if pages are still valid
929 bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm)
931 struct amdgpu_ttm_tt *gtt = (void *)ttm;
934 if (!gtt || !gtt->userptr)
937 DRM_DEBUG_DRIVER("user_pages_done 0x%llx pages 0x%lx\n",
938 gtt->userptr, ttm->num_pages);
940 WARN_ONCE(!gtt->range || !gtt->range->hmm_pfns,
941 "No user pages to check\n");
945 * FIXME: Must always hold notifier_lock for this, and must
946 * not ignore the return code.
948 r = mmu_interval_read_retry(gtt->range->notifier,
949 gtt->range->notifier_seq);
950 kvfree(gtt->range->hmm_pfns);
960 * amdgpu_ttm_tt_set_user_pages - Copy pages in, putting old pages as necessary.
962 * Called by amdgpu_cs_list_validate(). This creates the page list
963 * that backs user memory and will ultimately be mapped into the device
966 void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct page **pages)
970 for (i = 0; i < ttm->num_pages; ++i)
971 ttm->pages[i] = pages ? pages[i] : NULL;
975 * amdgpu_ttm_tt_pin_userptr - prepare the sg table with the user pages
977 * Called by amdgpu_ttm_backend_bind()
979 static int amdgpu_ttm_tt_pin_userptr(struct ttm_bo_device *bdev,
982 struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
983 struct amdgpu_ttm_tt *gtt = (void *)ttm;
986 int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
987 enum dma_data_direction direction = write ?
988 DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
990 /* Allocate an SG array and squash pages into it */
991 r = sg_alloc_table_from_pages(ttm->sg, ttm->pages, ttm->num_pages, 0,
992 ttm->num_pages << PAGE_SHIFT,
997 /* Map SG to device */
998 r = dma_map_sgtable(adev->dev, ttm->sg, direction, 0);
1002 /* convert SG to linear array of pages and dma addresses */
1003 drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
1004 gtt->ttm.dma_address, ttm->num_pages);
1014 * amdgpu_ttm_tt_unpin_userptr - Unpin and unmap userptr pages
1016 static void amdgpu_ttm_tt_unpin_userptr(struct ttm_bo_device *bdev,
1019 struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
1020 struct amdgpu_ttm_tt *gtt = (void *)ttm;
1022 int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
1023 enum dma_data_direction direction = write ?
1024 DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
1026 /* double check that we don't free the table twice */
1030 /* unmap the pages mapped to the device */
1031 dma_unmap_sgtable(adev->dev, ttm->sg, direction, 0);
1032 sg_free_table(ttm->sg);
1034 #if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR)
1038 for (i = 0; i < ttm->num_pages; i++) {
1039 if (ttm->pages[i] !=
1040 hmm_pfn_to_page(gtt->range->hmm_pfns[i]))
1044 WARN((i == ttm->num_pages), "Missing get_user_page_done\n");
1049 static int amdgpu_ttm_gart_bind(struct amdgpu_device *adev,
1050 struct ttm_buffer_object *tbo,
1053 struct amdgpu_bo *abo = ttm_to_amdgpu_bo(tbo);
1054 struct ttm_tt *ttm = tbo->ttm;
1055 struct amdgpu_ttm_tt *gtt = (void *)ttm;
1058 if (amdgpu_bo_encrypted(abo))
1059 flags |= AMDGPU_PTE_TMZ;
1061 if (abo->flags & AMDGPU_GEM_CREATE_CP_MQD_GFX9) {
1062 uint64_t page_idx = 1;
1064 r = amdgpu_gart_bind(adev, gtt->offset, page_idx,
1065 ttm->pages, gtt->ttm.dma_address, flags);
1067 goto gart_bind_fail;
1069 /* The memory type of the first page defaults to UC. Now
1070 * modify the memory type to NC from the second page of
1073 flags &= ~AMDGPU_PTE_MTYPE_VG10_MASK;
1074 flags |= AMDGPU_PTE_MTYPE_VG10(AMDGPU_MTYPE_NC);
1076 r = amdgpu_gart_bind(adev,
1077 gtt->offset + (page_idx << PAGE_SHIFT),
1078 ttm->num_pages - page_idx,
1079 &ttm->pages[page_idx],
1080 &(gtt->ttm.dma_address[page_idx]), flags);
1082 r = amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages,
1083 ttm->pages, gtt->ttm.dma_address, flags);
1088 DRM_ERROR("failed to bind %lu pages at 0x%08llX\n",
1089 ttm->num_pages, gtt->offset);
1095 * amdgpu_ttm_backend_bind - Bind GTT memory
1097 * Called by ttm_tt_bind() on behalf of ttm_bo_handle_move_mem().
1098 * This handles binding GTT memory to the device address space.
1100 static int amdgpu_ttm_backend_bind(struct ttm_bo_device *bdev,
1102 struct ttm_resource *bo_mem)
1104 struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
1105 struct amdgpu_ttm_tt *gtt = (void*)ttm;
1110 r = amdgpu_ttm_tt_pin_userptr(bdev, ttm);
1112 DRM_ERROR("failed to pin userptr\n");
1116 if (!ttm->num_pages) {
1117 WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
1118 ttm->num_pages, bo_mem, ttm);
1121 if (bo_mem->mem_type == AMDGPU_PL_GDS ||
1122 bo_mem->mem_type == AMDGPU_PL_GWS ||
1123 bo_mem->mem_type == AMDGPU_PL_OA)
1126 if (!amdgpu_gtt_mgr_has_gart_addr(bo_mem)) {
1127 gtt->offset = AMDGPU_BO_INVALID_OFFSET;
1131 /* compute PTE flags relevant to this BO memory */
1132 flags = amdgpu_ttm_tt_pte_flags(adev, ttm, bo_mem);
1134 /* bind pages into GART page tables */
1135 gtt->offset = (u64)bo_mem->start << PAGE_SHIFT;
1136 r = amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages,
1137 ttm->pages, gtt->ttm.dma_address, flags);
1140 DRM_ERROR("failed to bind %lu pages at 0x%08llX\n",
1141 ttm->num_pages, gtt->offset);
1146 * amdgpu_ttm_alloc_gart - Allocate GART memory for buffer object
1148 int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
1150 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
1151 struct ttm_operation_ctx ctx = { false, false };
1152 struct amdgpu_ttm_tt *gtt = (void*)bo->ttm;
1153 struct ttm_resource tmp;
1154 struct ttm_placement placement;
1155 struct ttm_place placements;
1156 uint64_t addr, flags;
1159 if (bo->mem.start != AMDGPU_BO_INVALID_OFFSET)
1162 addr = amdgpu_gmc_agp_addr(bo);
1163 if (addr != AMDGPU_BO_INVALID_OFFSET) {
1164 bo->mem.start = addr >> PAGE_SHIFT;
1167 /* allocate GART space */
1170 placement.num_placement = 1;
1171 placement.placement = &placements;
1172 placement.num_busy_placement = 1;
1173 placement.busy_placement = &placements;
1174 placements.fpfn = 0;
1175 placements.lpfn = adev->gmc.gart_size >> PAGE_SHIFT;
1176 placements.mem_type = TTM_PL_TT;
1177 placements.flags = bo->mem.placement;
1179 r = ttm_bo_mem_space(bo, &placement, &tmp, &ctx);
1183 /* compute PTE flags for this buffer object */
1184 flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, &tmp);
1187 gtt->offset = (u64)tmp.start << PAGE_SHIFT;
1188 r = amdgpu_ttm_gart_bind(adev, bo, flags);
1190 ttm_resource_free(bo, &tmp);
1194 ttm_resource_free(bo, &bo->mem);
1202 * amdgpu_ttm_recover_gart - Rebind GTT pages
1204 * Called by amdgpu_gtt_mgr_recover() from amdgpu_device_reset() to
1205 * rebind GTT pages during a GPU reset.
1207 int amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo)
1209 struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
1216 flags = amdgpu_ttm_tt_pte_flags(adev, tbo->ttm, &tbo->mem);
1217 r = amdgpu_ttm_gart_bind(adev, tbo, flags);
1223 * amdgpu_ttm_backend_unbind - Unbind GTT mapped pages
1225 * Called by ttm_tt_unbind() on behalf of ttm_bo_move_ttm() and
1228 static void amdgpu_ttm_backend_unbind(struct ttm_bo_device *bdev,
1231 struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
1232 struct amdgpu_ttm_tt *gtt = (void *)ttm;
1235 /* if the pages have userptr pinning then clear that first */
1237 amdgpu_ttm_tt_unpin_userptr(bdev, ttm);
1239 if (gtt->offset == AMDGPU_BO_INVALID_OFFSET)
1242 /* unbind shouldn't be done for GDS/GWS/OA in ttm_bo_clean_mm */
1243 r = amdgpu_gart_unbind(adev, gtt->offset, ttm->num_pages);
1245 DRM_ERROR("failed to unbind %lu pages at 0x%08llX\n",
1246 gtt->ttm.ttm.num_pages, gtt->offset);
1249 static void amdgpu_ttm_backend_destroy(struct ttm_bo_device *bdev,
1252 struct amdgpu_ttm_tt *gtt = (void *)ttm;
1255 put_task_struct(gtt->usertask);
1257 ttm_dma_tt_fini(>t->ttm);
1262 * amdgpu_ttm_tt_create - Create a ttm_tt object for a given BO
1264 * @bo: The buffer object to create a GTT ttm_tt object around
1266 * Called by ttm_tt_create().
1268 static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_buffer_object *bo,
1269 uint32_t page_flags)
1271 struct amdgpu_ttm_tt *gtt;
1273 gtt = kzalloc(sizeof(struct amdgpu_ttm_tt), GFP_KERNEL);
1277 gtt->gobj = &bo->base;
1279 /* allocate space for the uninitialized page entries */
1280 if (ttm_sg_tt_init(>t->ttm, bo, page_flags)) {
1284 return >t->ttm.ttm;
1288 * amdgpu_ttm_tt_populate - Map GTT pages visible to the device
1290 * Map the pages of a ttm_tt object to an address space visible
1291 * to the underlying device.
1293 static int amdgpu_ttm_tt_populate(struct ttm_bo_device *bdev,
1295 struct ttm_operation_ctx *ctx)
1297 struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
1298 struct amdgpu_ttm_tt *gtt = (void *)ttm;
1300 /* user pages are bound by amdgpu_ttm_tt_pin_userptr() */
1301 if (gtt && gtt->userptr) {
1302 ttm->sg = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
1306 ttm->page_flags |= TTM_PAGE_FLAG_SG;
1307 ttm_tt_set_populated(ttm);
1311 if (ttm->page_flags & TTM_PAGE_FLAG_SG) {
1313 struct dma_buf_attachment *attach;
1314 struct sg_table *sgt;
1316 attach = gtt->gobj->import_attach;
1317 sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
1319 return PTR_ERR(sgt);
1324 drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
1325 gtt->ttm.dma_address,
1327 ttm_tt_set_populated(ttm);
1331 #ifdef CONFIG_SWIOTLB
1332 if (adev->need_swiotlb && swiotlb_nr_tbl()) {
1333 return ttm_dma_populate(>t->ttm, adev->dev, ctx);
1337 /* fall back to generic helper to populate the page array
1338 * and map them to the device */
1339 return ttm_populate_and_map_pages(adev->dev, >t->ttm, ctx);
1343 * amdgpu_ttm_tt_unpopulate - unmap GTT pages and unpopulate page arrays
1345 * Unmaps pages of a ttm_tt object from the device address space and
1346 * unpopulates the page array backing it.
1348 static void amdgpu_ttm_tt_unpopulate(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
1350 struct amdgpu_ttm_tt *gtt = (void *)ttm;
1351 struct amdgpu_device *adev;
1353 if (gtt && gtt->userptr) {
1354 amdgpu_ttm_tt_set_user_pages(ttm, NULL);
1356 ttm->page_flags &= ~TTM_PAGE_FLAG_SG;
1360 if (ttm->sg && gtt->gobj->import_attach) {
1361 struct dma_buf_attachment *attach;
1363 attach = gtt->gobj->import_attach;
1364 dma_buf_unmap_attachment(attach, ttm->sg, DMA_BIDIRECTIONAL);
1369 if (ttm->page_flags & TTM_PAGE_FLAG_SG)
1372 adev = amdgpu_ttm_adev(bdev);
1374 #ifdef CONFIG_SWIOTLB
1375 if (adev->need_swiotlb && swiotlb_nr_tbl()) {
1376 ttm_dma_unpopulate(>t->ttm, adev->dev);
1381 /* fall back to generic helper to unmap and unpopulate array */
1382 ttm_unmap_and_unpopulate_pages(adev->dev, >t->ttm);
1386 * amdgpu_ttm_tt_set_userptr - Initialize userptr GTT ttm_tt for the current
1389 * @bo: The ttm_buffer_object to bind this userptr to
1390 * @addr: The address in the current tasks VM space to use
1391 * @flags: Requirements of userptr object.
1393 * Called by amdgpu_gem_userptr_ioctl() to bind userptr pages
1396 int amdgpu_ttm_tt_set_userptr(struct ttm_buffer_object *bo,
1397 uint64_t addr, uint32_t flags)
1399 struct amdgpu_ttm_tt *gtt;
1402 /* TODO: We want a separate TTM object type for userptrs */
1403 bo->ttm = amdgpu_ttm_tt_create(bo, 0);
1404 if (bo->ttm == NULL)
1408 gtt = (void*)bo->ttm;
1409 gtt->userptr = addr;
1410 gtt->userflags = flags;
1413 put_task_struct(gtt->usertask);
1414 gtt->usertask = current->group_leader;
1415 get_task_struct(gtt->usertask);
1421 * amdgpu_ttm_tt_get_usermm - Return memory manager for ttm_tt object
1423 struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm)
1425 struct amdgpu_ttm_tt *gtt = (void *)ttm;
1430 if (gtt->usertask == NULL)
1433 return gtt->usertask->mm;
1437 * amdgpu_ttm_tt_affect_userptr - Determine if a ttm_tt object lays inside an
1438 * address range for the current task.
1441 bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
1444 struct amdgpu_ttm_tt *gtt = (void *)ttm;
1447 if (gtt == NULL || !gtt->userptr)
1450 /* Return false if no part of the ttm_tt object lies within
1453 size = (unsigned long)gtt->ttm.ttm.num_pages * PAGE_SIZE;
1454 if (gtt->userptr > end || gtt->userptr + size <= start)
1461 * amdgpu_ttm_tt_is_userptr - Have the pages backing by userptr?
1463 bool amdgpu_ttm_tt_is_userptr(struct ttm_tt *ttm)
1465 struct amdgpu_ttm_tt *gtt = (void *)ttm;
1467 if (gtt == NULL || !gtt->userptr)
1474 * amdgpu_ttm_tt_is_readonly - Is the ttm_tt object read only?
1476 bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm)
1478 struct amdgpu_ttm_tt *gtt = (void *)ttm;
1483 return !!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
1487 * amdgpu_ttm_tt_pde_flags - Compute PDE flags for ttm_tt object
1489 * @ttm: The ttm_tt object to compute the flags for
1490 * @mem: The memory registry backing this ttm_tt object
1492 * Figure out the flags to use for a VM PDE (Page Directory Entry).
1494 uint64_t amdgpu_ttm_tt_pde_flags(struct ttm_tt *ttm, struct ttm_resource *mem)
1498 if (mem && mem->mem_type != TTM_PL_SYSTEM)
1499 flags |= AMDGPU_PTE_VALID;
1501 if (mem && mem->mem_type == TTM_PL_TT) {
1502 flags |= AMDGPU_PTE_SYSTEM;
1504 if (ttm->caching_state == tt_cached)
1505 flags |= AMDGPU_PTE_SNOOPED;
1512 * amdgpu_ttm_tt_pte_flags - Compute PTE flags for ttm_tt object
1514 * @ttm: The ttm_tt object to compute the flags for
1515 * @mem: The memory registry backing this ttm_tt object
1517 * Figure out the flags to use for a VM PTE (Page Table Entry).
1519 uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
1520 struct ttm_resource *mem)
1522 uint64_t flags = amdgpu_ttm_tt_pde_flags(ttm, mem);
1524 flags |= adev->gart.gart_pte_flags;
1525 flags |= AMDGPU_PTE_READABLE;
1527 if (!amdgpu_ttm_tt_is_readonly(ttm))
1528 flags |= AMDGPU_PTE_WRITEABLE;
1534 * amdgpu_ttm_bo_eviction_valuable - Check to see if we can evict a buffer
1537 * Return true if eviction is sensible. Called by ttm_mem_evict_first() on
1538 * behalf of ttm_bo_mem_force_space() which tries to evict buffer objects until
1539 * it can find space for a new object and by ttm_bo_force_list_clean() which is
1540 * used to clean out a memory space.
1542 static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
1543 const struct ttm_place *place)
1545 unsigned long num_pages = bo->mem.num_pages;
1546 struct drm_mm_node *node = bo->mem.mm_node;
1547 struct dma_resv_list *flist;
1548 struct dma_fence *f;
1551 if (bo->type == ttm_bo_type_kernel &&
1552 !amdgpu_vm_evictable(ttm_to_amdgpu_bo(bo)))
1555 /* If bo is a KFD BO, check if the bo belongs to the current process.
1556 * If true, then return false as any KFD process needs all its BOs to
1557 * be resident to run successfully
1559 flist = dma_resv_get_list(bo->base.resv);
1561 for (i = 0; i < flist->shared_count; ++i) {
1562 f = rcu_dereference_protected(flist->shared[i],
1563 dma_resv_held(bo->base.resv));
1564 if (amdkfd_fence_check_mm(f, current->mm))
1569 switch (bo->mem.mem_type) {
1571 if (amdgpu_bo_is_amdgpu_bo(bo) &&
1572 amdgpu_bo_encrypted(ttm_to_amdgpu_bo(bo)))
1577 /* Check each drm MM node individually */
1579 if (place->fpfn < (node->start + node->size) &&
1580 !(place->lpfn && place->lpfn <= node->start))
1583 num_pages -= node->size;
1592 return ttm_bo_eviction_valuable(bo, place);
1596 * amdgpu_ttm_access_memory - Read or Write memory that backs a buffer object.
1598 * @bo: The buffer object to read/write
1599 * @offset: Offset into buffer object
1600 * @buf: Secondary buffer to write/read from
1601 * @len: Length in bytes of access
1602 * @write: true if writing
1604 * This is used to access VRAM that backs a buffer object via MMIO
1605 * access for debugging purposes.
1607 static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo,
1608 unsigned long offset,
1609 void *buf, int len, int write)
1611 struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
1612 struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
1613 struct drm_mm_node *nodes;
1617 unsigned long flags;
1619 if (bo->mem.mem_type != TTM_PL_VRAM)
1623 nodes = amdgpu_find_mm_node(&abo->tbo.mem, &pos);
1624 pos += (nodes->start << PAGE_SHIFT);
1626 while (len && pos < adev->gmc.mc_vram_size) {
1627 uint64_t aligned_pos = pos & ~(uint64_t)3;
1628 uint64_t bytes = 4 - (pos & 3);
1629 uint32_t shift = (pos & 3) * 8;
1630 uint32_t mask = 0xffffffff << shift;
1633 mask &= 0xffffffff >> (bytes - len) * 8;
1637 if (mask != 0xffffffff) {
1638 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
1639 WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)aligned_pos) | 0x80000000);
1640 WREG32_NO_KIQ(mmMM_INDEX_HI, aligned_pos >> 31);
1641 if (!write || mask != 0xffffffff)
1642 value = RREG32_NO_KIQ(mmMM_DATA);
1645 value |= (*(uint32_t *)buf << shift) & mask;
1646 WREG32_NO_KIQ(mmMM_DATA, value);
1648 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
1650 value = (value & mask) >> shift;
1651 memcpy(buf, &value, bytes);
1654 bytes = (nodes->start + nodes->size) << PAGE_SHIFT;
1655 bytes = min(bytes - pos, (uint64_t)len & ~0x3ull);
1657 amdgpu_device_vram_access(adev, pos, (uint32_t *)buf,
1662 buf = (uint8_t *)buf + bytes;
1665 if (pos >= (nodes->start + nodes->size) << PAGE_SHIFT) {
1667 pos = (nodes->start << PAGE_SHIFT);
1674 static struct ttm_bo_driver amdgpu_bo_driver = {
1675 .ttm_tt_create = &amdgpu_ttm_tt_create,
1676 .ttm_tt_populate = &amdgpu_ttm_tt_populate,
1677 .ttm_tt_unpopulate = &amdgpu_ttm_tt_unpopulate,
1678 .ttm_tt_bind = &amdgpu_ttm_backend_bind,
1679 .ttm_tt_unbind = &amdgpu_ttm_backend_unbind,
1680 .ttm_tt_destroy = &amdgpu_ttm_backend_destroy,
1681 .eviction_valuable = amdgpu_ttm_bo_eviction_valuable,
1682 .evict_flags = &amdgpu_evict_flags,
1683 .move = &amdgpu_bo_move,
1684 .verify_access = &amdgpu_verify_access,
1685 .move_notify = &amdgpu_bo_move_notify,
1686 .release_notify = &amdgpu_bo_release_notify,
1687 .fault_reserve_notify = &amdgpu_bo_fault_reserve_notify,
1688 .io_mem_reserve = &amdgpu_ttm_io_mem_reserve,
1689 .io_mem_pfn = amdgpu_ttm_io_mem_pfn,
1690 .access_memory = &amdgpu_ttm_access_memory,
1691 .del_from_lru_notify = &amdgpu_vm_del_from_lru_notify
1695 * Firmware Reservation functions
1698 * amdgpu_ttm_fw_reserve_vram_fini - free fw reserved vram
1700 * @adev: amdgpu_device pointer
1702 * free fw reserved vram if it has been reserved.
1704 static void amdgpu_ttm_fw_reserve_vram_fini(struct amdgpu_device *adev)
1706 amdgpu_bo_free_kernel(&adev->mman.fw_vram_usage_reserved_bo,
1707 NULL, &adev->mman.fw_vram_usage_va);
1711 * amdgpu_ttm_fw_reserve_vram_init - create bo vram reservation from fw
1713 * @adev: amdgpu_device pointer
1715 * create bo vram reservation from fw.
1717 static int amdgpu_ttm_fw_reserve_vram_init(struct amdgpu_device *adev)
1719 uint64_t vram_size = adev->gmc.visible_vram_size;
1721 adev->mman.fw_vram_usage_va = NULL;
1722 adev->mman.fw_vram_usage_reserved_bo = NULL;
1724 if (adev->mman.fw_vram_usage_size == 0 ||
1725 adev->mman.fw_vram_usage_size > vram_size)
1728 return amdgpu_bo_create_kernel_at(adev,
1729 adev->mman.fw_vram_usage_start_offset,
1730 adev->mman.fw_vram_usage_size,
1731 AMDGPU_GEM_DOMAIN_VRAM,
1732 &adev->mman.fw_vram_usage_reserved_bo,
1733 &adev->mman.fw_vram_usage_va);
1737 * Memoy training reservation functions
1741 * amdgpu_ttm_training_reserve_vram_fini - free memory training reserved vram
1743 * @adev: amdgpu_device pointer
1745 * free memory training reserved vram if it has been reserved.
1747 static int amdgpu_ttm_training_reserve_vram_fini(struct amdgpu_device *adev)
1749 struct psp_memory_training_context *ctx = &adev->psp.mem_train_ctx;
1751 ctx->init = PSP_MEM_TRAIN_NOT_SUPPORT;
1752 amdgpu_bo_free_kernel(&ctx->c2p_bo, NULL, NULL);
1758 static void amdgpu_ttm_training_data_block_init(struct amdgpu_device *adev)
1760 struct psp_memory_training_context *ctx = &adev->psp.mem_train_ctx;
1762 memset(ctx, 0, sizeof(*ctx));
1764 ctx->c2p_train_data_offset =
1765 ALIGN((adev->gmc.mc_vram_size - adev->mman.discovery_tmr_size - SZ_1M), SZ_1M);
1766 ctx->p2c_train_data_offset =
1767 (adev->gmc.mc_vram_size - GDDR6_MEM_TRAINING_OFFSET);
1768 ctx->train_data_size =
1769 GDDR6_MEM_TRAINING_DATA_SIZE_IN_BYTES;
1771 DRM_DEBUG("train_data_size:%llx,p2c_train_data_offset:%llx,c2p_train_data_offset:%llx.\n",
1772 ctx->train_data_size,
1773 ctx->p2c_train_data_offset,
1774 ctx->c2p_train_data_offset);
1778 * reserve TMR memory at the top of VRAM which holds
1779 * IP Discovery data and is protected by PSP.
1781 static int amdgpu_ttm_reserve_tmr(struct amdgpu_device *adev)
1784 struct psp_memory_training_context *ctx = &adev->psp.mem_train_ctx;
1785 bool mem_train_support = false;
1787 if (!amdgpu_sriov_vf(adev)) {
1788 ret = amdgpu_mem_train_support(adev);
1790 mem_train_support = true;
1794 DRM_DEBUG("memory training does not support!\n");
1798 * Query reserved tmr size through atom firmwareinfo for Sienna_Cichlid and onwards for all
1799 * the use cases (IP discovery/G6 memory training/profiling/diagnostic data.etc)
1801 * Otherwise, fallback to legacy approach to check and reserve tmr block for ip
1802 * discovery data and G6 memory training data respectively
1804 adev->mman.discovery_tmr_size =
1805 amdgpu_atomfirmware_get_fw_reserved_fb_size(adev);
1806 if (!adev->mman.discovery_tmr_size)
1807 adev->mman.discovery_tmr_size = DISCOVERY_TMR_OFFSET;
1809 if (mem_train_support) {
1810 /* reserve vram for mem train according to TMR location */
1811 amdgpu_ttm_training_data_block_init(adev);
1812 ret = amdgpu_bo_create_kernel_at(adev,
1813 ctx->c2p_train_data_offset,
1814 ctx->train_data_size,
1815 AMDGPU_GEM_DOMAIN_VRAM,
1819 DRM_ERROR("alloc c2p_bo failed(%d)!\n", ret);
1820 amdgpu_ttm_training_reserve_vram_fini(adev);
1823 ctx->init = PSP_MEM_TRAIN_RESERVE_SUCCESS;
1826 ret = amdgpu_bo_create_kernel_at(adev,
1827 adev->gmc.real_vram_size - adev->mman.discovery_tmr_size,
1828 adev->mman.discovery_tmr_size,
1829 AMDGPU_GEM_DOMAIN_VRAM,
1830 &adev->mman.discovery_memory,
1833 DRM_ERROR("alloc tmr failed(%d)!\n", ret);
1834 amdgpu_bo_free_kernel(&adev->mman.discovery_memory, NULL, NULL);
1842 * amdgpu_ttm_init - Init the memory management (ttm) as well as various
1843 * gtt/vram related fields.
1845 * This initializes all of the memory space pools that the TTM layer
1846 * will need such as the GTT space (system memory mapped to the device),
1847 * VRAM (on-board memory), and on-chip memories (GDS, GWS, OA) which
1848 * can be mapped per VMID.
1850 int amdgpu_ttm_init(struct amdgpu_device *adev)
1856 mutex_init(&adev->mman.gtt_window_lock);
1858 /* No others user of address space so set it to 0 */
1859 r = ttm_bo_device_init(&adev->mman.bdev,
1861 adev_to_drm(adev)->anon_inode->i_mapping,
1862 adev_to_drm(adev)->vma_offset_manager,
1863 dma_addressing_limited(adev->dev));
1865 DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
1868 adev->mman.initialized = true;
1870 /* We opt to avoid OOM on system pages allocations */
1871 adev->mman.bdev.no_retry = true;
1873 /* Initialize VRAM pool with all of VRAM divided into pages */
1874 r = amdgpu_vram_mgr_init(adev);
1876 DRM_ERROR("Failed initializing VRAM heap.\n");
1880 /* Reduce size of CPU-visible VRAM if requested */
1881 vis_vram_limit = (u64)amdgpu_vis_vram_limit * 1024 * 1024;
1882 if (amdgpu_vis_vram_limit > 0 &&
1883 vis_vram_limit <= adev->gmc.visible_vram_size)
1884 adev->gmc.visible_vram_size = vis_vram_limit;
1886 /* Change the size here instead of the init above so only lpfn is affected */
1887 amdgpu_ttm_set_buffer_funcs_status(adev, false);
1889 adev->mman.aper_base_kaddr = ioremap_wc(adev->gmc.aper_base,
1890 adev->gmc.visible_vram_size);
1894 *The reserved vram for firmware must be pinned to the specified
1895 *place on the VRAM, so reserve it early.
1897 r = amdgpu_ttm_fw_reserve_vram_init(adev);
1903 * only NAVI10 and onwards ASIC support for IP discovery.
1904 * If IP discovery enabled, a block of memory should be
1905 * reserved for IP discovey.
1907 if (adev->mman.discovery_bin) {
1908 r = amdgpu_ttm_reserve_tmr(adev);
1913 /* allocate memory as required for VGA
1914 * This is used for VGA emulation and pre-OS scanout buffers to
1915 * avoid display artifacts while transitioning between pre-OS
1917 r = amdgpu_bo_create_kernel_at(adev, 0, adev->mman.stolen_vga_size,
1918 AMDGPU_GEM_DOMAIN_VRAM,
1919 &adev->mman.stolen_vga_memory,
1923 r = amdgpu_bo_create_kernel_at(adev, adev->mman.stolen_vga_size,
1924 adev->mman.stolen_extended_size,
1925 AMDGPU_GEM_DOMAIN_VRAM,
1926 &adev->mman.stolen_extended_memory,
1931 DRM_INFO("amdgpu: %uM of VRAM memory ready\n",
1932 (unsigned) (adev->gmc.real_vram_size / (1024 * 1024)));
1934 /* Compute GTT size, either bsaed on 3/4th the size of RAM size
1935 * or whatever the user passed on module init */
1936 if (amdgpu_gtt_size == -1) {
1940 gtt_size = min(max((AMDGPU_DEFAULT_GTT_SIZE_MB << 20),
1941 adev->gmc.mc_vram_size),
1942 ((uint64_t)si.totalram * si.mem_unit * 3/4));
1945 gtt_size = (uint64_t)amdgpu_gtt_size << 20;
1947 /* Initialize GTT memory pool */
1948 r = amdgpu_gtt_mgr_init(adev, gtt_size);
1950 DRM_ERROR("Failed initializing GTT heap.\n");
1953 DRM_INFO("amdgpu: %uM of GTT memory ready.\n",
1954 (unsigned)(gtt_size / (1024 * 1024)));
1956 /* Initialize various on-chip memory pools */
1957 r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_GDS, adev->gds.gds_size);
1959 DRM_ERROR("Failed initializing GDS heap.\n");
1963 r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_GWS, adev->gds.gws_size);
1965 DRM_ERROR("Failed initializing gws heap.\n");
1969 r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_OA, adev->gds.oa_size);
1971 DRM_ERROR("Failed initializing oa heap.\n");
1979 * amdgpu_ttm_late_init - Handle any late initialization for amdgpu_ttm
1981 void amdgpu_ttm_late_init(struct amdgpu_device *adev)
1983 /* return the VGA stolen memory (if any) back to VRAM */
1984 if (!adev->mman.keep_stolen_vga_memory)
1985 amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
1986 amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
1990 * amdgpu_ttm_fini - De-initialize the TTM memory pools
1992 void amdgpu_ttm_fini(struct amdgpu_device *adev)
1994 if (!adev->mman.initialized)
1997 amdgpu_ttm_training_reserve_vram_fini(adev);
1998 /* return the stolen vga memory back to VRAM */
1999 if (adev->mman.keep_stolen_vga_memory)
2000 amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
2001 /* return the IP Discovery TMR memory back to VRAM */
2002 amdgpu_bo_free_kernel(&adev->mman.discovery_memory, NULL, NULL);
2003 amdgpu_ttm_fw_reserve_vram_fini(adev);
2005 if (adev->mman.aper_base_kaddr)
2006 iounmap(adev->mman.aper_base_kaddr);
2007 adev->mman.aper_base_kaddr = NULL;
2009 amdgpu_vram_mgr_fini(adev);
2010 amdgpu_gtt_mgr_fini(adev);
2011 ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_GDS);
2012 ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_GWS);
2013 ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_OA);
2014 ttm_bo_device_release(&adev->mman.bdev);
2015 adev->mman.initialized = false;
2016 DRM_INFO("amdgpu: ttm finalized\n");
2020 * amdgpu_ttm_set_buffer_funcs_status - enable/disable use of buffer functions
2022 * @adev: amdgpu_device pointer
2023 * @enable: true when we can use buffer functions.
2025 * Enable/disable use of buffer functions during suspend/resume. This should
2026 * only be called at bootup or when userspace isn't running.
2028 void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
2030 struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
2034 if (!adev->mman.initialized || amdgpu_in_reset(adev) ||
2035 adev->mman.buffer_funcs_enabled == enable)
2039 struct amdgpu_ring *ring;
2040 struct drm_gpu_scheduler *sched;
2042 ring = adev->mman.buffer_funcs_ring;
2043 sched = &ring->sched;
2044 r = drm_sched_entity_init(&adev->mman.entity,
2045 DRM_SCHED_PRIORITY_KERNEL, &sched,
2048 DRM_ERROR("Failed setting up TTM BO move entity (%d)\n",
2053 drm_sched_entity_destroy(&adev->mman.entity);
2054 dma_fence_put(man->move);
2058 /* this just adjusts TTM size idea, which sets lpfn to the correct value */
2060 size = adev->gmc.real_vram_size;
2062 size = adev->gmc.visible_vram_size;
2063 man->size = size >> PAGE_SHIFT;
2064 adev->mman.buffer_funcs_enabled = enable;
2067 int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma)
2069 struct drm_file *file_priv = filp->private_data;
2070 struct amdgpu_device *adev = drm_to_adev(file_priv->minor->dev);
2075 return ttm_bo_mmap(filp, vma, &adev->mman.bdev);
2078 int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
2079 uint64_t dst_offset, uint32_t byte_count,
2080 struct dma_resv *resv,
2081 struct dma_fence **fence, bool direct_submit,
2082 bool vm_needs_flush, bool tmz)
2084 enum amdgpu_ib_pool_type pool = direct_submit ? AMDGPU_IB_POOL_DIRECT :
2085 AMDGPU_IB_POOL_DELAYED;
2086 struct amdgpu_device *adev = ring->adev;
2087 struct amdgpu_job *job;
2090 unsigned num_loops, num_dw;
2094 if (direct_submit && !ring->sched.ready) {
2095 DRM_ERROR("Trying to move memory with ring turned off.\n");
2099 max_bytes = adev->mman.buffer_funcs->copy_max_bytes;
2100 num_loops = DIV_ROUND_UP(byte_count, max_bytes);
2101 num_dw = ALIGN(num_loops * adev->mman.buffer_funcs->copy_num_dw, 8);
2103 r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, pool, &job);
2107 if (vm_needs_flush) {
2108 job->vm_pd_addr = amdgpu_gmc_pd_addr(adev->gart.bo);
2109 job->vm_needs_flush = true;
2112 r = amdgpu_sync_resv(adev, &job->sync, resv,
2114 AMDGPU_FENCE_OWNER_UNDEFINED);
2116 DRM_ERROR("sync failed (%d).\n", r);
2121 for (i = 0; i < num_loops; i++) {
2122 uint32_t cur_size_in_bytes = min(byte_count, max_bytes);
2124 amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_offset,
2125 dst_offset, cur_size_in_bytes, tmz);
2127 src_offset += cur_size_in_bytes;
2128 dst_offset += cur_size_in_bytes;
2129 byte_count -= cur_size_in_bytes;
2132 amdgpu_ring_pad_ib(ring, &job->ibs[0]);
2133 WARN_ON(job->ibs[0].length_dw > num_dw);
2135 r = amdgpu_job_submit_direct(job, ring, fence);
2137 r = amdgpu_job_submit(job, &adev->mman.entity,
2138 AMDGPU_FENCE_OWNER_UNDEFINED, fence);
2145 amdgpu_job_free(job);
2146 DRM_ERROR("Error scheduling IBs (%d)\n", r);
2150 int amdgpu_fill_buffer(struct amdgpu_bo *bo,
2152 struct dma_resv *resv,
2153 struct dma_fence **fence)
2155 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
2156 uint32_t max_bytes = adev->mman.buffer_funcs->fill_max_bytes;
2157 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
2159 struct drm_mm_node *mm_node;
2160 unsigned long num_pages;
2161 unsigned int num_loops, num_dw;
2163 struct amdgpu_job *job;
2166 if (!adev->mman.buffer_funcs_enabled) {
2167 DRM_ERROR("Trying to clear memory with ring turned off.\n");
2171 if (bo->tbo.mem.mem_type == TTM_PL_TT) {
2172 r = amdgpu_ttm_alloc_gart(&bo->tbo);
2177 num_pages = bo->tbo.num_pages;
2178 mm_node = bo->tbo.mem.mm_node;
2181 uint64_t byte_count = mm_node->size << PAGE_SHIFT;
2183 num_loops += DIV_ROUND_UP_ULL(byte_count, max_bytes);
2184 num_pages -= mm_node->size;
2187 num_dw = num_loops * adev->mman.buffer_funcs->fill_num_dw;
2189 /* for IB padding */
2192 r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, AMDGPU_IB_POOL_DELAYED,
2198 r = amdgpu_sync_resv(adev, &job->sync, resv,
2200 AMDGPU_FENCE_OWNER_UNDEFINED);
2202 DRM_ERROR("sync failed (%d).\n", r);
2207 num_pages = bo->tbo.num_pages;
2208 mm_node = bo->tbo.mem.mm_node;
2211 uint64_t byte_count = mm_node->size << PAGE_SHIFT;
2214 dst_addr = amdgpu_mm_node_addr(&bo->tbo, mm_node, &bo->tbo.mem);
2215 while (byte_count) {
2216 uint32_t cur_size_in_bytes = min_t(uint64_t, byte_count,
2219 amdgpu_emit_fill_buffer(adev, &job->ibs[0], src_data,
2220 dst_addr, cur_size_in_bytes);
2222 dst_addr += cur_size_in_bytes;
2223 byte_count -= cur_size_in_bytes;
2226 num_pages -= mm_node->size;
2230 amdgpu_ring_pad_ib(ring, &job->ibs[0]);
2231 WARN_ON(job->ibs[0].length_dw > num_dw);
2232 r = amdgpu_job_submit(job, &adev->mman.entity,
2233 AMDGPU_FENCE_OWNER_UNDEFINED, fence);
2240 amdgpu_job_free(job);
2244 #if defined(CONFIG_DEBUG_FS)
2246 static int amdgpu_mm_dump_table(struct seq_file *m, void *data)
2248 struct drm_info_node *node = (struct drm_info_node *)m->private;
2249 unsigned ttm_pl = (uintptr_t)node->info_ent->data;
2250 struct drm_device *dev = node->minor->dev;
2251 struct amdgpu_device *adev = drm_to_adev(dev);
2252 struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, ttm_pl);
2253 struct drm_printer p = drm_seq_file_printer(m);
2255 man->func->debug(man, &p);
2259 static const struct drm_info_list amdgpu_ttm_debugfs_list[] = {
2260 {"amdgpu_vram_mm", amdgpu_mm_dump_table, 0, (void *)TTM_PL_VRAM},
2261 {"amdgpu_gtt_mm", amdgpu_mm_dump_table, 0, (void *)TTM_PL_TT},
2262 {"amdgpu_gds_mm", amdgpu_mm_dump_table, 0, (void *)AMDGPU_PL_GDS},
2263 {"amdgpu_gws_mm", amdgpu_mm_dump_table, 0, (void *)AMDGPU_PL_GWS},
2264 {"amdgpu_oa_mm", amdgpu_mm_dump_table, 0, (void *)AMDGPU_PL_OA},
2265 {"ttm_page_pool", ttm_page_alloc_debugfs, 0, NULL},
2266 #ifdef CONFIG_SWIOTLB
2267 {"ttm_dma_page_pool", ttm_dma_page_alloc_debugfs, 0, NULL}
2272 * amdgpu_ttm_vram_read - Linear read access to VRAM
2274 * Accesses VRAM via MMIO for debugging purposes.
2276 static ssize_t amdgpu_ttm_vram_read(struct file *f, char __user *buf,
2277 size_t size, loff_t *pos)
2279 struct amdgpu_device *adev = file_inode(f)->i_private;
2282 if (size & 0x3 || *pos & 0x3)
2285 if (*pos >= adev->gmc.mc_vram_size)
2288 size = min(size, (size_t)(adev->gmc.mc_vram_size - *pos));
2290 size_t bytes = min(size, AMDGPU_TTM_VRAM_MAX_DW_READ * 4);
2291 uint32_t value[AMDGPU_TTM_VRAM_MAX_DW_READ];
2293 amdgpu_device_vram_access(adev, *pos, value, bytes, false);
2294 if (copy_to_user(buf, value, bytes))
2307 * amdgpu_ttm_vram_write - Linear write access to VRAM
2309 * Accesses VRAM via MMIO for debugging purposes.
2311 static ssize_t amdgpu_ttm_vram_write(struct file *f, const char __user *buf,
2312 size_t size, loff_t *pos)
2314 struct amdgpu_device *adev = file_inode(f)->i_private;
2318 if (size & 0x3 || *pos & 0x3)
2321 if (*pos >= adev->gmc.mc_vram_size)
2325 unsigned long flags;
2328 if (*pos >= adev->gmc.mc_vram_size)
2331 r = get_user(value, (uint32_t *)buf);
2335 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
2336 WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)*pos) | 0x80000000);
2337 WREG32_NO_KIQ(mmMM_INDEX_HI, *pos >> 31);
2338 WREG32_NO_KIQ(mmMM_DATA, value);
2339 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
2350 static const struct file_operations amdgpu_ttm_vram_fops = {
2351 .owner = THIS_MODULE,
2352 .read = amdgpu_ttm_vram_read,
2353 .write = amdgpu_ttm_vram_write,
2354 .llseek = default_llseek,
2357 #ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
2360 * amdgpu_ttm_gtt_read - Linear read access to GTT memory
2362 static ssize_t amdgpu_ttm_gtt_read(struct file *f, char __user *buf,
2363 size_t size, loff_t *pos)
2365 struct amdgpu_device *adev = file_inode(f)->i_private;
2370 loff_t p = *pos / PAGE_SIZE;
2371 unsigned off = *pos & ~PAGE_MASK;
2372 size_t cur_size = min_t(size_t, size, PAGE_SIZE - off);
2376 if (p >= adev->gart.num_cpu_pages)
2379 page = adev->gart.pages[p];
2384 r = copy_to_user(buf, ptr, cur_size);
2385 kunmap(adev->gart.pages[p]);
2387 r = clear_user(buf, cur_size);
2401 static const struct file_operations amdgpu_ttm_gtt_fops = {
2402 .owner = THIS_MODULE,
2403 .read = amdgpu_ttm_gtt_read,
2404 .llseek = default_llseek
2410 * amdgpu_iomem_read - Virtual read access to GPU mapped memory
2412 * This function is used to read memory that has been mapped to the
2413 * GPU and the known addresses are not physical addresses but instead
2414 * bus addresses (e.g., what you'd put in an IB or ring buffer).
2416 static ssize_t amdgpu_iomem_read(struct file *f, char __user *buf,
2417 size_t size, loff_t *pos)
2419 struct amdgpu_device *adev = file_inode(f)->i_private;
2420 struct iommu_domain *dom;
2424 /* retrieve the IOMMU domain if any for this device */
2425 dom = iommu_get_domain_for_dev(adev->dev);
2428 phys_addr_t addr = *pos & PAGE_MASK;
2429 loff_t off = *pos & ~PAGE_MASK;
2430 size_t bytes = PAGE_SIZE - off;
2435 bytes = bytes < size ? bytes : size;
2437 /* Translate the bus address to a physical address. If
2438 * the domain is NULL it means there is no IOMMU active
2439 * and the address translation is the identity
2441 addr = dom ? iommu_iova_to_phys(dom, addr) : addr;
2443 pfn = addr >> PAGE_SHIFT;
2444 if (!pfn_valid(pfn))
2447 p = pfn_to_page(pfn);
2448 if (p->mapping != adev->mman.bdev.dev_mapping)
2452 r = copy_to_user(buf, ptr + off, bytes);
2466 * amdgpu_iomem_write - Virtual write access to GPU mapped memory
2468 * This function is used to write memory that has been mapped to the
2469 * GPU and the known addresses are not physical addresses but instead
2470 * bus addresses (e.g., what you'd put in an IB or ring buffer).
2472 static ssize_t amdgpu_iomem_write(struct file *f, const char __user *buf,
2473 size_t size, loff_t *pos)
2475 struct amdgpu_device *adev = file_inode(f)->i_private;
2476 struct iommu_domain *dom;
2480 dom = iommu_get_domain_for_dev(adev->dev);
2483 phys_addr_t addr = *pos & PAGE_MASK;
2484 loff_t off = *pos & ~PAGE_MASK;
2485 size_t bytes = PAGE_SIZE - off;
2490 bytes = bytes < size ? bytes : size;
2492 addr = dom ? iommu_iova_to_phys(dom, addr) : addr;
2494 pfn = addr >> PAGE_SHIFT;
2495 if (!pfn_valid(pfn))
2498 p = pfn_to_page(pfn);
2499 if (p->mapping != adev->mman.bdev.dev_mapping)
2503 r = copy_from_user(ptr + off, buf, bytes);
2516 static const struct file_operations amdgpu_ttm_iomem_fops = {
2517 .owner = THIS_MODULE,
2518 .read = amdgpu_iomem_read,
2519 .write = amdgpu_iomem_write,
2520 .llseek = default_llseek
2523 static const struct {
2525 const struct file_operations *fops;
2527 } ttm_debugfs_entries[] = {
2528 { "amdgpu_vram", &amdgpu_ttm_vram_fops, TTM_PL_VRAM },
2529 #ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
2530 { "amdgpu_gtt", &amdgpu_ttm_gtt_fops, TTM_PL_TT },
2532 { "amdgpu_iomem", &amdgpu_ttm_iomem_fops, TTM_PL_SYSTEM },
2537 int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev)
2539 #if defined(CONFIG_DEBUG_FS)
2542 struct drm_minor *minor = adev_to_drm(adev)->primary;
2543 struct dentry *ent, *root = minor->debugfs_root;
2545 for (count = 0; count < ARRAY_SIZE(ttm_debugfs_entries); count++) {
2546 ent = debugfs_create_file(
2547 ttm_debugfs_entries[count].name,
2548 S_IFREG | S_IRUGO, root,
2550 ttm_debugfs_entries[count].fops);
2552 return PTR_ERR(ent);
2553 if (ttm_debugfs_entries[count].domain == TTM_PL_VRAM)
2554 i_size_write(ent->d_inode, adev->gmc.mc_vram_size);
2555 else if (ttm_debugfs_entries[count].domain == TTM_PL_TT)
2556 i_size_write(ent->d_inode, adev->gmc.gart_size);
2557 adev->mman.debugfs_entries[count] = ent;
2560 count = ARRAY_SIZE(amdgpu_ttm_debugfs_list);
2562 #ifdef CONFIG_SWIOTLB
2563 if (!(adev->need_swiotlb && swiotlb_nr_tbl()))
2567 return amdgpu_debugfs_add_files(adev, amdgpu_ttm_debugfs_list, count);