Merge tag 'sound-fix-5.9-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tiwai...
[linux-2.6-microblaze.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_ttm.c
1 /*
2  * Copyright 2009 Jerome Glisse.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sub license, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19  * USE OR OTHER DEALINGS IN THE SOFTWARE.
20  *
21  * The above copyright notice and this permission notice (including the
22  * next paragraph) shall be included in all copies or substantial portions
23  * of the Software.
24  *
25  */
26 /*
27  * Authors:
28  *    Jerome Glisse <glisse@freedesktop.org>
29  *    Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
30  *    Dave Airlie
31  */
32
33 #include <linux/dma-mapping.h>
34 #include <linux/iommu.h>
35 #include <linux/hmm.h>
36 #include <linux/pagemap.h>
37 #include <linux/sched/task.h>
38 #include <linux/sched/mm.h>
39 #include <linux/seq_file.h>
40 #include <linux/slab.h>
41 #include <linux/swap.h>
42 #include <linux/swiotlb.h>
43 #include <linux/dma-buf.h>
44 #include <linux/sizes.h>
45
46 #include <drm/ttm/ttm_bo_api.h>
47 #include <drm/ttm/ttm_bo_driver.h>
48 #include <drm/ttm/ttm_placement.h>
49 #include <drm/ttm/ttm_module.h>
50 #include <drm/ttm/ttm_page_alloc.h>
51
52 #include <drm/drm_debugfs.h>
53 #include <drm/amdgpu_drm.h>
54
55 #include "amdgpu.h"
56 #include "amdgpu_object.h"
57 #include "amdgpu_trace.h"
58 #include "amdgpu_amdkfd.h"
59 #include "amdgpu_sdma.h"
60 #include "amdgpu_ras.h"
61 #include "amdgpu_atomfirmware.h"
62 #include "bif/bif_4_1_d.h"
63
64 #define AMDGPU_TTM_VRAM_MAX_DW_READ     (size_t)128
65
66
67 /**
68  * amdgpu_init_mem_type - Initialize a memory manager for a specific type of
69  * memory request.
70  *
71  * @bdev: The TTM BO device object (contains a reference to amdgpu_device)
72  * @type: The type of memory requested
73  * @man: The memory type manager for each domain
74  *
75  * This is called by ttm_bo_init_mm() when a buffer object is being
76  * initialized.
77  */
78 static int amdgpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
79                                 struct ttm_mem_type_manager *man)
80 {
81         struct amdgpu_device *adev;
82
83         adev = amdgpu_ttm_adev(bdev);
84
85         switch (type) {
86         case TTM_PL_SYSTEM:
87                 /* System memory */
88                 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
89                 man->available_caching = TTM_PL_MASK_CACHING;
90                 man->default_caching = TTM_PL_FLAG_CACHED;
91                 break;
92         case TTM_PL_TT:
93                 /* GTT memory  */
94                 man->func = &amdgpu_gtt_mgr_func;
95                 man->available_caching = TTM_PL_MASK_CACHING;
96                 man->default_caching = TTM_PL_FLAG_CACHED;
97                 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
98                 break;
99         case TTM_PL_VRAM:
100                 /* "On-card" video ram */
101                 man->func = &amdgpu_vram_mgr_func;
102                 man->flags = TTM_MEMTYPE_FLAG_FIXED |
103                              TTM_MEMTYPE_FLAG_MAPPABLE;
104                 man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
105                 man->default_caching = TTM_PL_FLAG_WC;
106                 break;
107         case AMDGPU_PL_GDS:
108         case AMDGPU_PL_GWS:
109         case AMDGPU_PL_OA:
110                 /* On-chip GDS memory*/
111                 man->func = &ttm_bo_manager_func;
112                 man->flags = TTM_MEMTYPE_FLAG_FIXED;
113                 man->available_caching = TTM_PL_FLAG_UNCACHED;
114                 man->default_caching = TTM_PL_FLAG_UNCACHED;
115                 break;
116         default:
117                 DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
118                 return -EINVAL;
119         }
120         return 0;
121 }
122
123 /**
124  * amdgpu_evict_flags - Compute placement flags
125  *
126  * @bo: The buffer object to evict
127  * @placement: Possible destination(s) for evicted BO
128  *
129  * Fill in placement data when ttm_bo_evict() is called
130  */
131 static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
132                                 struct ttm_placement *placement)
133 {
134         struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
135         struct amdgpu_bo *abo;
136         static const struct ttm_place placements = {
137                 .fpfn = 0,
138                 .lpfn = 0,
139                 .flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM
140         };
141
142         /* Don't handle scatter gather BOs */
143         if (bo->type == ttm_bo_type_sg) {
144                 placement->num_placement = 0;
145                 placement->num_busy_placement = 0;
146                 return;
147         }
148
149         /* Object isn't an AMDGPU object so ignore */
150         if (!amdgpu_bo_is_amdgpu_bo(bo)) {
151                 placement->placement = &placements;
152                 placement->busy_placement = &placements;
153                 placement->num_placement = 1;
154                 placement->num_busy_placement = 1;
155                 return;
156         }
157
158         abo = ttm_to_amdgpu_bo(bo);
159         switch (bo->mem.mem_type) {
160         case AMDGPU_PL_GDS:
161         case AMDGPU_PL_GWS:
162         case AMDGPU_PL_OA:
163                 placement->num_placement = 0;
164                 placement->num_busy_placement = 0;
165                 return;
166
167         case TTM_PL_VRAM:
168                 if (!adev->mman.buffer_funcs_enabled) {
169                         /* Move to system memory */
170                         amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
171                 } else if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
172                            !(abo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) &&
173                            amdgpu_bo_in_cpu_visible_vram(abo)) {
174
175                         /* Try evicting to the CPU inaccessible part of VRAM
176                          * first, but only set GTT as busy placement, so this
177                          * BO will be evicted to GTT rather than causing other
178                          * BOs to be evicted from VRAM
179                          */
180                         amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM |
181                                                          AMDGPU_GEM_DOMAIN_GTT);
182                         abo->placements[0].fpfn = adev->gmc.visible_vram_size >> PAGE_SHIFT;
183                         abo->placements[0].lpfn = 0;
184                         abo->placement.busy_placement = &abo->placements[1];
185                         abo->placement.num_busy_placement = 1;
186                 } else {
187                         /* Move to GTT memory */
188                         amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT);
189                 }
190                 break;
191         case TTM_PL_TT:
192         default:
193                 amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
194                 break;
195         }
196         *placement = abo->placement;
197 }
198
199 /**
200  * amdgpu_verify_access - Verify access for a mmap call
201  *
202  * @bo: The buffer object to map
203  * @filp: The file pointer from the process performing the mmap
204  *
205  * This is called by ttm_bo_mmap() to verify whether a process
206  * has the right to mmap a BO to their process space.
207  */
208 static int amdgpu_verify_access(struct ttm_buffer_object *bo, struct file *filp)
209 {
210         struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
211
212         /*
213          * Don't verify access for KFD BOs. They don't have a GEM
214          * object associated with them.
215          */
216         if (abo->kfd_bo)
217                 return 0;
218
219         if (amdgpu_ttm_tt_get_usermm(bo->ttm))
220                 return -EPERM;
221         return drm_vma_node_verify_access(&abo->tbo.base.vma_node,
222                                           filp->private_data);
223 }
224
225 /**
226  * amdgpu_move_null - Register memory for a buffer object
227  *
228  * @bo: The bo to assign the memory to
229  * @new_mem: The memory to be assigned.
230  *
231  * Assign the memory from new_mem to the memory of the buffer object bo.
232  */
233 static void amdgpu_move_null(struct ttm_buffer_object *bo,
234                              struct ttm_mem_reg *new_mem)
235 {
236         struct ttm_mem_reg *old_mem = &bo->mem;
237
238         BUG_ON(old_mem->mm_node != NULL);
239         *old_mem = *new_mem;
240         new_mem->mm_node = NULL;
241 }
242
243 /**
244  * amdgpu_mm_node_addr - Compute the GPU relative offset of a GTT buffer.
245  *
246  * @bo: The bo to assign the memory to.
247  * @mm_node: Memory manager node for drm allocator.
248  * @mem: The region where the bo resides.
249  *
250  */
251 static uint64_t amdgpu_mm_node_addr(struct ttm_buffer_object *bo,
252                                     struct drm_mm_node *mm_node,
253                                     struct ttm_mem_reg *mem)
254 {
255         uint64_t addr = 0;
256
257         if (mm_node->start != AMDGPU_BO_INVALID_OFFSET) {
258                 addr = mm_node->start << PAGE_SHIFT;
259                 addr += amdgpu_ttm_domain_start(amdgpu_ttm_adev(bo->bdev),
260                                                 mem->mem_type);
261         }
262         return addr;
263 }
264
265 /**
266  * amdgpu_find_mm_node - Helper function finds the drm_mm_node corresponding to
267  * @offset. It also modifies the offset to be within the drm_mm_node returned
268  *
269  * @mem: The region where the bo resides.
270  * @offset: The offset that drm_mm_node is used for finding.
271  *
272  */
273 static struct drm_mm_node *amdgpu_find_mm_node(struct ttm_mem_reg *mem,
274                                                uint64_t *offset)
275 {
276         struct drm_mm_node *mm_node = mem->mm_node;
277
278         while (*offset >= (mm_node->size << PAGE_SHIFT)) {
279                 *offset -= (mm_node->size << PAGE_SHIFT);
280                 ++mm_node;
281         }
282         return mm_node;
283 }
284
285 /**
286  * amdgpu_ttm_map_buffer - Map memory into the GART windows
287  * @bo: buffer object to map
288  * @mem: memory object to map
289  * @mm_node: drm_mm node object to map
290  * @num_pages: number of pages to map
291  * @offset: offset into @mm_node where to start
292  * @window: which GART window to use
293  * @ring: DMA ring to use for the copy
294  * @tmz: if we should setup a TMZ enabled mapping
295  * @addr: resulting address inside the MC address space
296  *
297  * Setup one of the GART windows to access a specific piece of memory or return
298  * the physical address for local memory.
299  */
300 static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo,
301                                  struct ttm_mem_reg *mem,
302                                  struct drm_mm_node *mm_node,
303                                  unsigned num_pages, uint64_t offset,
304                                  unsigned window, struct amdgpu_ring *ring,
305                                  bool tmz, uint64_t *addr)
306 {
307         struct amdgpu_device *adev = ring->adev;
308         struct amdgpu_job *job;
309         unsigned num_dw, num_bytes;
310         struct dma_fence *fence;
311         uint64_t src_addr, dst_addr;
312         void *cpu_addr;
313         uint64_t flags;
314         unsigned int i;
315         int r;
316
317         BUG_ON(adev->mman.buffer_funcs->copy_max_bytes <
318                AMDGPU_GTT_MAX_TRANSFER_SIZE * 8);
319
320         /* Map only what can't be accessed directly */
321         if (!tmz && mem->start != AMDGPU_BO_INVALID_OFFSET) {
322                 *addr = amdgpu_mm_node_addr(bo, mm_node, mem) + offset;
323                 return 0;
324         }
325
326         *addr = adev->gmc.gart_start;
327         *addr += (u64)window * AMDGPU_GTT_MAX_TRANSFER_SIZE *
328                 AMDGPU_GPU_PAGE_SIZE;
329         *addr += offset & ~PAGE_MASK;
330
331         num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8);
332         num_bytes = num_pages * 8;
333
334         r = amdgpu_job_alloc_with_ib(adev, num_dw * 4 + num_bytes,
335                                      AMDGPU_IB_POOL_DELAYED, &job);
336         if (r)
337                 return r;
338
339         src_addr = num_dw * 4;
340         src_addr += job->ibs[0].gpu_addr;
341
342         dst_addr = amdgpu_bo_gpu_offset(adev->gart.bo);
343         dst_addr += window * AMDGPU_GTT_MAX_TRANSFER_SIZE * 8;
344         amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr,
345                                 dst_addr, num_bytes, false);
346
347         amdgpu_ring_pad_ib(ring, &job->ibs[0]);
348         WARN_ON(job->ibs[0].length_dw > num_dw);
349
350         flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, mem);
351         if (tmz)
352                 flags |= AMDGPU_PTE_TMZ;
353
354         cpu_addr = &job->ibs[0].ptr[num_dw];
355
356         if (mem->mem_type == TTM_PL_TT) {
357                 struct ttm_dma_tt *dma;
358                 dma_addr_t *dma_address;
359
360                 dma = container_of(bo->ttm, struct ttm_dma_tt, ttm);
361                 dma_address = &dma->dma_address[offset >> PAGE_SHIFT];
362                 r = amdgpu_gart_map(adev, 0, num_pages, dma_address, flags,
363                                     cpu_addr);
364                 if (r)
365                         goto error_free;
366         } else {
367                 dma_addr_t dma_address;
368
369                 dma_address = (mm_node->start << PAGE_SHIFT) + offset;
370                 dma_address += adev->vm_manager.vram_base_offset;
371
372                 for (i = 0; i < num_pages; ++i) {
373                         r = amdgpu_gart_map(adev, i << PAGE_SHIFT, 1,
374                                             &dma_address, flags, cpu_addr);
375                         if (r)
376                                 goto error_free;
377
378                         dma_address += PAGE_SIZE;
379                 }
380         }
381
382         r = amdgpu_job_submit(job, &adev->mman.entity,
383                               AMDGPU_FENCE_OWNER_UNDEFINED, &fence);
384         if (r)
385                 goto error_free;
386
387         dma_fence_put(fence);
388
389         return r;
390
391 error_free:
392         amdgpu_job_free(job);
393         return r;
394 }
395
396 /**
397  * amdgpu_copy_ttm_mem_to_mem - Helper function for copy
398  * @adev: amdgpu device
399  * @src: buffer/address where to read from
400  * @dst: buffer/address where to write to
401  * @size: number of bytes to copy
402  * @tmz: if a secure copy should be used
403  * @resv: resv object to sync to
404  * @f: Returns the last fence if multiple jobs are submitted.
405  *
406  * The function copies @size bytes from {src->mem + src->offset} to
407  * {dst->mem + dst->offset}. src->bo and dst->bo could be same BO for a
408  * move and different for a BO to BO copy.
409  *
410  */
411 int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
412                                const struct amdgpu_copy_mem *src,
413                                const struct amdgpu_copy_mem *dst,
414                                uint64_t size, bool tmz,
415                                struct dma_resv *resv,
416                                struct dma_fence **f)
417 {
418         const uint32_t GTT_MAX_BYTES = (AMDGPU_GTT_MAX_TRANSFER_SIZE *
419                                         AMDGPU_GPU_PAGE_SIZE);
420
421         uint64_t src_node_size, dst_node_size, src_offset, dst_offset;
422         struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
423         struct drm_mm_node *src_mm, *dst_mm;
424         struct dma_fence *fence = NULL;
425         int r = 0;
426
427         if (!adev->mman.buffer_funcs_enabled) {
428                 DRM_ERROR("Trying to move memory with ring turned off.\n");
429                 return -EINVAL;
430         }
431
432         src_offset = src->offset;
433         if (src->mem->mm_node) {
434                 src_mm = amdgpu_find_mm_node(src->mem, &src_offset);
435                 src_node_size = (src_mm->size << PAGE_SHIFT) - src_offset;
436         } else {
437                 src_mm = NULL;
438                 src_node_size = ULLONG_MAX;
439         }
440
441         dst_offset = dst->offset;
442         if (dst->mem->mm_node) {
443                 dst_mm = amdgpu_find_mm_node(dst->mem, &dst_offset);
444                 dst_node_size = (dst_mm->size << PAGE_SHIFT) - dst_offset;
445         } else {
446                 dst_mm = NULL;
447                 dst_node_size = ULLONG_MAX;
448         }
449
450         mutex_lock(&adev->mman.gtt_window_lock);
451
452         while (size) {
453                 uint32_t src_page_offset = src_offset & ~PAGE_MASK;
454                 uint32_t dst_page_offset = dst_offset & ~PAGE_MASK;
455                 struct dma_fence *next;
456                 uint32_t cur_size;
457                 uint64_t from, to;
458
459                 /* Copy size cannot exceed GTT_MAX_BYTES. So if src or dst
460                  * begins at an offset, then adjust the size accordingly
461                  */
462                 cur_size = max(src_page_offset, dst_page_offset);
463                 cur_size = min(min3(src_node_size, dst_node_size, size),
464                                (uint64_t)(GTT_MAX_BYTES - cur_size));
465
466                 /* Map src to window 0 and dst to window 1. */
467                 r = amdgpu_ttm_map_buffer(src->bo, src->mem, src_mm,
468                                           PFN_UP(cur_size + src_page_offset),
469                                           src_offset, 0, ring, tmz, &from);
470                 if (r)
471                         goto error;
472
473                 r = amdgpu_ttm_map_buffer(dst->bo, dst->mem, dst_mm,
474                                           PFN_UP(cur_size + dst_page_offset),
475                                           dst_offset, 1, ring, tmz, &to);
476                 if (r)
477                         goto error;
478
479                 r = amdgpu_copy_buffer(ring, from, to, cur_size,
480                                        resv, &next, false, true, tmz);
481                 if (r)
482                         goto error;
483
484                 dma_fence_put(fence);
485                 fence = next;
486
487                 size -= cur_size;
488                 if (!size)
489                         break;
490
491                 src_node_size -= cur_size;
492                 if (!src_node_size) {
493                         ++src_mm;
494                         src_node_size = src_mm->size << PAGE_SHIFT;
495                         src_offset = 0;
496                 } else {
497                         src_offset += cur_size;
498                 }
499
500                 dst_node_size -= cur_size;
501                 if (!dst_node_size) {
502                         ++dst_mm;
503                         dst_node_size = dst_mm->size << PAGE_SHIFT;
504                         dst_offset = 0;
505                 } else {
506                         dst_offset += cur_size;
507                 }
508         }
509 error:
510         mutex_unlock(&adev->mman.gtt_window_lock);
511         if (f)
512                 *f = dma_fence_get(fence);
513         dma_fence_put(fence);
514         return r;
515 }
516
517 /**
518  * amdgpu_move_blit - Copy an entire buffer to another buffer
519  *
520  * This is a helper called by amdgpu_bo_move() and amdgpu_move_vram_ram() to
521  * help move buffers to and from VRAM.
522  */
523 static int amdgpu_move_blit(struct ttm_buffer_object *bo,
524                             bool evict, bool no_wait_gpu,
525                             struct ttm_mem_reg *new_mem,
526                             struct ttm_mem_reg *old_mem)
527 {
528         struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
529         struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
530         struct amdgpu_copy_mem src, dst;
531         struct dma_fence *fence = NULL;
532         int r;
533
534         src.bo = bo;
535         dst.bo = bo;
536         src.mem = old_mem;
537         dst.mem = new_mem;
538         src.offset = 0;
539         dst.offset = 0;
540
541         r = amdgpu_ttm_copy_mem_to_mem(adev, &src, &dst,
542                                        new_mem->num_pages << PAGE_SHIFT,
543                                        amdgpu_bo_encrypted(abo),
544                                        bo->base.resv, &fence);
545         if (r)
546                 goto error;
547
548         /* clear the space being freed */
549         if (old_mem->mem_type == TTM_PL_VRAM &&
550             (abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE)) {
551                 struct dma_fence *wipe_fence = NULL;
552
553                 r = amdgpu_fill_buffer(ttm_to_amdgpu_bo(bo), AMDGPU_POISON,
554                                        NULL, &wipe_fence);
555                 if (r) {
556                         goto error;
557                 } else if (wipe_fence) {
558                         dma_fence_put(fence);
559                         fence = wipe_fence;
560                 }
561         }
562
563         /* Always block for VM page tables before committing the new location */
564         if (bo->type == ttm_bo_type_kernel)
565                 r = ttm_bo_move_accel_cleanup(bo, fence, true, new_mem);
566         else
567                 r = ttm_bo_pipeline_move(bo, fence, evict, new_mem);
568         dma_fence_put(fence);
569         return r;
570
571 error:
572         if (fence)
573                 dma_fence_wait(fence, false);
574         dma_fence_put(fence);
575         return r;
576 }
577
578 /**
579  * amdgpu_move_vram_ram - Copy VRAM buffer to RAM buffer
580  *
581  * Called by amdgpu_bo_move().
582  */
583 static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo, bool evict,
584                                 struct ttm_operation_ctx *ctx,
585                                 struct ttm_mem_reg *new_mem)
586 {
587         struct ttm_mem_reg *old_mem = &bo->mem;
588         struct ttm_mem_reg tmp_mem;
589         struct ttm_place placements;
590         struct ttm_placement placement;
591         int r;
592
593         /* create space/pages for new_mem in GTT space */
594         tmp_mem = *new_mem;
595         tmp_mem.mm_node = NULL;
596         placement.num_placement = 1;
597         placement.placement = &placements;
598         placement.num_busy_placement = 1;
599         placement.busy_placement = &placements;
600         placements.fpfn = 0;
601         placements.lpfn = 0;
602         placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
603         r = ttm_bo_mem_space(bo, &placement, &tmp_mem, ctx);
604         if (unlikely(r)) {
605                 pr_err("Failed to find GTT space for blit from VRAM\n");
606                 return r;
607         }
608
609         /* set caching flags */
610         r = ttm_tt_set_placement_caching(bo->ttm, tmp_mem.placement);
611         if (unlikely(r)) {
612                 goto out_cleanup;
613         }
614
615         /* Bind the memory to the GTT space */
616         r = ttm_tt_bind(bo->ttm, &tmp_mem, ctx);
617         if (unlikely(r)) {
618                 goto out_cleanup;
619         }
620
621         /* blit VRAM to GTT */
622         r = amdgpu_move_blit(bo, evict, ctx->no_wait_gpu, &tmp_mem, old_mem);
623         if (unlikely(r)) {
624                 goto out_cleanup;
625         }
626
627         /* move BO (in tmp_mem) to new_mem */
628         r = ttm_bo_move_ttm(bo, ctx, new_mem);
629 out_cleanup:
630         ttm_bo_mem_put(bo, &tmp_mem);
631         return r;
632 }
633
634 /**
635  * amdgpu_move_ram_vram - Copy buffer from RAM to VRAM
636  *
637  * Called by amdgpu_bo_move().
638  */
639 static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo, bool evict,
640                                 struct ttm_operation_ctx *ctx,
641                                 struct ttm_mem_reg *new_mem)
642 {
643         struct ttm_mem_reg *old_mem = &bo->mem;
644         struct ttm_mem_reg tmp_mem;
645         struct ttm_placement placement;
646         struct ttm_place placements;
647         int r;
648
649         /* make space in GTT for old_mem buffer */
650         tmp_mem = *new_mem;
651         tmp_mem.mm_node = NULL;
652         placement.num_placement = 1;
653         placement.placement = &placements;
654         placement.num_busy_placement = 1;
655         placement.busy_placement = &placements;
656         placements.fpfn = 0;
657         placements.lpfn = 0;
658         placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
659         r = ttm_bo_mem_space(bo, &placement, &tmp_mem, ctx);
660         if (unlikely(r)) {
661                 pr_err("Failed to find GTT space for blit to VRAM\n");
662                 return r;
663         }
664
665         /* move/bind old memory to GTT space */
666         r = ttm_bo_move_ttm(bo, ctx, &tmp_mem);
667         if (unlikely(r)) {
668                 goto out_cleanup;
669         }
670
671         /* copy to VRAM */
672         r = amdgpu_move_blit(bo, evict, ctx->no_wait_gpu, new_mem, old_mem);
673         if (unlikely(r)) {
674                 goto out_cleanup;
675         }
676 out_cleanup:
677         ttm_bo_mem_put(bo, &tmp_mem);
678         return r;
679 }
680
681 /**
682  * amdgpu_mem_visible - Check that memory can be accessed by ttm_bo_move_memcpy
683  *
684  * Called by amdgpu_bo_move()
685  */
686 static bool amdgpu_mem_visible(struct amdgpu_device *adev,
687                                struct ttm_mem_reg *mem)
688 {
689         struct drm_mm_node *nodes = mem->mm_node;
690
691         if (mem->mem_type == TTM_PL_SYSTEM ||
692             mem->mem_type == TTM_PL_TT)
693                 return true;
694         if (mem->mem_type != TTM_PL_VRAM)
695                 return false;
696
697         /* ttm_mem_reg_ioremap only supports contiguous memory */
698         if (nodes->size != mem->num_pages)
699                 return false;
700
701         return ((nodes->start + nodes->size) << PAGE_SHIFT)
702                 <= adev->gmc.visible_vram_size;
703 }
704
705 /**
706  * amdgpu_bo_move - Move a buffer object to a new memory location
707  *
708  * Called by ttm_bo_handle_move_mem()
709  */
710 static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
711                           struct ttm_operation_ctx *ctx,
712                           struct ttm_mem_reg *new_mem)
713 {
714         struct amdgpu_device *adev;
715         struct amdgpu_bo *abo;
716         struct ttm_mem_reg *old_mem = &bo->mem;
717         int r;
718
719         /* Can't move a pinned BO */
720         abo = ttm_to_amdgpu_bo(bo);
721         if (WARN_ON_ONCE(abo->pin_count > 0))
722                 return -EINVAL;
723
724         adev = amdgpu_ttm_adev(bo->bdev);
725
726         if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
727                 amdgpu_move_null(bo, new_mem);
728                 return 0;
729         }
730         if ((old_mem->mem_type == TTM_PL_TT &&
731              new_mem->mem_type == TTM_PL_SYSTEM) ||
732             (old_mem->mem_type == TTM_PL_SYSTEM &&
733              new_mem->mem_type == TTM_PL_TT)) {
734                 /* bind is enough */
735                 amdgpu_move_null(bo, new_mem);
736                 return 0;
737         }
738         if (old_mem->mem_type == AMDGPU_PL_GDS ||
739             old_mem->mem_type == AMDGPU_PL_GWS ||
740             old_mem->mem_type == AMDGPU_PL_OA ||
741             new_mem->mem_type == AMDGPU_PL_GDS ||
742             new_mem->mem_type == AMDGPU_PL_GWS ||
743             new_mem->mem_type == AMDGPU_PL_OA) {
744                 /* Nothing to save here */
745                 amdgpu_move_null(bo, new_mem);
746                 return 0;
747         }
748
749         if (!adev->mman.buffer_funcs_enabled) {
750                 r = -ENODEV;
751                 goto memcpy;
752         }
753
754         if (old_mem->mem_type == TTM_PL_VRAM &&
755             new_mem->mem_type == TTM_PL_SYSTEM) {
756                 r = amdgpu_move_vram_ram(bo, evict, ctx, new_mem);
757         } else if (old_mem->mem_type == TTM_PL_SYSTEM &&
758                    new_mem->mem_type == TTM_PL_VRAM) {
759                 r = amdgpu_move_ram_vram(bo, evict, ctx, new_mem);
760         } else {
761                 r = amdgpu_move_blit(bo, evict, ctx->no_wait_gpu,
762                                      new_mem, old_mem);
763         }
764
765         if (r) {
766 memcpy:
767                 /* Check that all memory is CPU accessible */
768                 if (!amdgpu_mem_visible(adev, old_mem) ||
769                     !amdgpu_mem_visible(adev, new_mem)) {
770                         pr_err("Move buffer fallback to memcpy unavailable\n");
771                         return r;
772                 }
773
774                 r = ttm_bo_move_memcpy(bo, ctx, new_mem);
775                 if (r)
776                         return r;
777         }
778
779         if (bo->type == ttm_bo_type_device &&
780             new_mem->mem_type == TTM_PL_VRAM &&
781             old_mem->mem_type != TTM_PL_VRAM) {
782                 /* amdgpu_bo_fault_reserve_notify will re-set this if the CPU
783                  * accesses the BO after it's moved.
784                  */
785                 abo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
786         }
787
788         /* update statistics */
789         atomic64_add((u64)bo->num_pages << PAGE_SHIFT, &adev->num_bytes_moved);
790         return 0;
791 }
792
793 /**
794  * amdgpu_ttm_io_mem_reserve - Reserve a block of memory during a fault
795  *
796  * Called by ttm_mem_io_reserve() ultimately via ttm_bo_vm_fault()
797  */
798 static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
799 {
800         struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
801         struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
802         struct drm_mm_node *mm_node = mem->mm_node;
803
804         mem->bus.addr = NULL;
805         mem->bus.offset = 0;
806         mem->bus.size = mem->num_pages << PAGE_SHIFT;
807         mem->bus.base = 0;
808         mem->bus.is_iomem = false;
809         if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
810                 return -EINVAL;
811         switch (mem->mem_type) {
812         case TTM_PL_SYSTEM:
813                 /* system memory */
814                 return 0;
815         case TTM_PL_TT:
816                 break;
817         case TTM_PL_VRAM:
818                 mem->bus.offset = mem->start << PAGE_SHIFT;
819                 /* check if it's visible */
820                 if ((mem->bus.offset + mem->bus.size) > adev->gmc.visible_vram_size)
821                         return -EINVAL;
822                 /* Only physically contiguous buffers apply. In a contiguous
823                  * buffer, size of the first mm_node would match the number of
824                  * pages in ttm_mem_reg.
825                  */
826                 if (adev->mman.aper_base_kaddr &&
827                     (mm_node->size == mem->num_pages))
828                         mem->bus.addr = (u8 *)adev->mman.aper_base_kaddr +
829                                         mem->bus.offset;
830
831                 mem->bus.base = adev->gmc.aper_base;
832                 mem->bus.is_iomem = true;
833                 break;
834         default:
835                 return -EINVAL;
836         }
837         return 0;
838 }
839
840 static unsigned long amdgpu_ttm_io_mem_pfn(struct ttm_buffer_object *bo,
841                                            unsigned long page_offset)
842 {
843         uint64_t offset = (page_offset << PAGE_SHIFT);
844         struct drm_mm_node *mm;
845
846         mm = amdgpu_find_mm_node(&bo->mem, &offset);
847         return (bo->mem.bus.base >> PAGE_SHIFT) + mm->start +
848                 (offset >> PAGE_SHIFT);
849 }
850
851 /**
852  * amdgpu_ttm_domain_start - Returns GPU start address
853  * @adev: amdgpu device object
854  * @type: type of the memory
855  *
856  * Returns:
857  * GPU start address of a memory domain
858  */
859
860 uint64_t amdgpu_ttm_domain_start(struct amdgpu_device *adev, uint32_t type)
861 {
862         switch (type) {
863         case TTM_PL_TT:
864                 return adev->gmc.gart_start;
865         case TTM_PL_VRAM:
866                 return adev->gmc.vram_start;
867         }
868
869         return 0;
870 }
871
872 /*
873  * TTM backend functions.
874  */
875 struct amdgpu_ttm_tt {
876         struct ttm_dma_tt       ttm;
877         struct drm_gem_object   *gobj;
878         u64                     offset;
879         uint64_t                userptr;
880         struct task_struct      *usertask;
881         uint32_t                userflags;
882 #if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR)
883         struct hmm_range        *range;
884 #endif
885 };
886
887 #ifdef CONFIG_DRM_AMDGPU_USERPTR
888 /**
889  * amdgpu_ttm_tt_get_user_pages - get device accessible pages that back user
890  * memory and start HMM tracking CPU page table update
891  *
892  * Calling function must call amdgpu_ttm_tt_userptr_range_done() once and only
893  * once afterwards to stop HMM tracking
894  */
895 int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages)
896 {
897         struct ttm_tt *ttm = bo->tbo.ttm;
898         struct amdgpu_ttm_tt *gtt = (void *)ttm;
899         unsigned long start = gtt->userptr;
900         struct vm_area_struct *vma;
901         struct hmm_range *range;
902         unsigned long timeout;
903         struct mm_struct *mm;
904         unsigned long i;
905         int r = 0;
906
907         mm = bo->notifier.mm;
908         if (unlikely(!mm)) {
909                 DRM_DEBUG_DRIVER("BO is not registered?\n");
910                 return -EFAULT;
911         }
912
913         /* Another get_user_pages is running at the same time?? */
914         if (WARN_ON(gtt->range))
915                 return -EFAULT;
916
917         if (!mmget_not_zero(mm)) /* Happens during process shutdown */
918                 return -ESRCH;
919
920         range = kzalloc(sizeof(*range), GFP_KERNEL);
921         if (unlikely(!range)) {
922                 r = -ENOMEM;
923                 goto out;
924         }
925         range->notifier = &bo->notifier;
926         range->start = bo->notifier.interval_tree.start;
927         range->end = bo->notifier.interval_tree.last + 1;
928         range->default_flags = HMM_PFN_REQ_FAULT;
929         if (!amdgpu_ttm_tt_is_readonly(ttm))
930                 range->default_flags |= HMM_PFN_REQ_WRITE;
931
932         range->hmm_pfns = kvmalloc_array(ttm->num_pages,
933                                          sizeof(*range->hmm_pfns), GFP_KERNEL);
934         if (unlikely(!range->hmm_pfns)) {
935                 r = -ENOMEM;
936                 goto out_free_ranges;
937         }
938
939         mmap_read_lock(mm);
940         vma = find_vma(mm, start);
941         if (unlikely(!vma || start < vma->vm_start)) {
942                 r = -EFAULT;
943                 goto out_unlock;
944         }
945         if (unlikely((gtt->userflags & AMDGPU_GEM_USERPTR_ANONONLY) &&
946                 vma->vm_file)) {
947                 r = -EPERM;
948                 goto out_unlock;
949         }
950         mmap_read_unlock(mm);
951         timeout = jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
952
953 retry:
954         range->notifier_seq = mmu_interval_read_begin(&bo->notifier);
955
956         mmap_read_lock(mm);
957         r = hmm_range_fault(range);
958         mmap_read_unlock(mm);
959         if (unlikely(r)) {
960                 /*
961                  * FIXME: This timeout should encompass the retry from
962                  * mmu_interval_read_retry() as well.
963                  */
964                 if (r == -EBUSY && !time_after(jiffies, timeout))
965                         goto retry;
966                 goto out_free_pfns;
967         }
968
969         /*
970          * Due to default_flags, all pages are HMM_PFN_VALID or
971          * hmm_range_fault() fails. FIXME: The pages cannot be touched outside
972          * the notifier_lock, and mmu_interval_read_retry() must be done first.
973          */
974         for (i = 0; i < ttm->num_pages; i++)
975                 pages[i] = hmm_pfn_to_page(range->hmm_pfns[i]);
976
977         gtt->range = range;
978         mmput(mm);
979
980         return 0;
981
982 out_unlock:
983         mmap_read_unlock(mm);
984 out_free_pfns:
985         kvfree(range->hmm_pfns);
986 out_free_ranges:
987         kfree(range);
988 out:
989         mmput(mm);
990         return r;
991 }
992
993 /**
994  * amdgpu_ttm_tt_userptr_range_done - stop HMM track the CPU page table change
995  * Check if the pages backing this ttm range have been invalidated
996  *
997  * Returns: true if pages are still valid
998  */
999 bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm)
1000 {
1001         struct amdgpu_ttm_tt *gtt = (void *)ttm;
1002         bool r = false;
1003
1004         if (!gtt || !gtt->userptr)
1005                 return false;
1006
1007         DRM_DEBUG_DRIVER("user_pages_done 0x%llx pages 0x%lx\n",
1008                 gtt->userptr, ttm->num_pages);
1009
1010         WARN_ONCE(!gtt->range || !gtt->range->hmm_pfns,
1011                 "No user pages to check\n");
1012
1013         if (gtt->range) {
1014                 /*
1015                  * FIXME: Must always hold notifier_lock for this, and must
1016                  * not ignore the return code.
1017                  */
1018                 r = mmu_interval_read_retry(gtt->range->notifier,
1019                                          gtt->range->notifier_seq);
1020                 kvfree(gtt->range->hmm_pfns);
1021                 kfree(gtt->range);
1022                 gtt->range = NULL;
1023         }
1024
1025         return !r;
1026 }
1027 #endif
1028
1029 /**
1030  * amdgpu_ttm_tt_set_user_pages - Copy pages in, putting old pages as necessary.
1031  *
1032  * Called by amdgpu_cs_list_validate(). This creates the page list
1033  * that backs user memory and will ultimately be mapped into the device
1034  * address space.
1035  */
1036 void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct page **pages)
1037 {
1038         unsigned long i;
1039
1040         for (i = 0; i < ttm->num_pages; ++i)
1041                 ttm->pages[i] = pages ? pages[i] : NULL;
1042 }
1043
1044 /**
1045  * amdgpu_ttm_tt_pin_userptr -  prepare the sg table with the user pages
1046  *
1047  * Called by amdgpu_ttm_backend_bind()
1048  **/
1049 static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt *ttm)
1050 {
1051         struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
1052         struct amdgpu_ttm_tt *gtt = (void *)ttm;
1053         int r;
1054
1055         int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
1056         enum dma_data_direction direction = write ?
1057                 DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
1058
1059         /* Allocate an SG array and squash pages into it */
1060         r = sg_alloc_table_from_pages(ttm->sg, ttm->pages, ttm->num_pages, 0,
1061                                       ttm->num_pages << PAGE_SHIFT,
1062                                       GFP_KERNEL);
1063         if (r)
1064                 goto release_sg;
1065
1066         /* Map SG to device */
1067         r = dma_map_sgtable(adev->dev, ttm->sg, direction, 0);
1068         if (r)
1069                 goto release_sg;
1070
1071         /* convert SG to linear array of pages and dma addresses */
1072         drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
1073                                          gtt->ttm.dma_address, ttm->num_pages);
1074
1075         return 0;
1076
1077 release_sg:
1078         kfree(ttm->sg);
1079         return r;
1080 }
1081
1082 /**
1083  * amdgpu_ttm_tt_unpin_userptr - Unpin and unmap userptr pages
1084  */
1085 static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
1086 {
1087         struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
1088         struct amdgpu_ttm_tt *gtt = (void *)ttm;
1089
1090         int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
1091         enum dma_data_direction direction = write ?
1092                 DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
1093
1094         /* double check that we don't free the table twice */
1095         if (!ttm->sg->sgl)
1096                 return;
1097
1098         /* unmap the pages mapped to the device */
1099         dma_unmap_sgtable(adev->dev, ttm->sg, direction, 0);
1100         sg_free_table(ttm->sg);
1101
1102 #if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR)
1103         if (gtt->range) {
1104                 unsigned long i;
1105
1106                 for (i = 0; i < ttm->num_pages; i++) {
1107                         if (ttm->pages[i] !=
1108                             hmm_pfn_to_page(gtt->range->hmm_pfns[i]))
1109                                 break;
1110                 }
1111
1112                 WARN((i == ttm->num_pages), "Missing get_user_page_done\n");
1113         }
1114 #endif
1115 }
1116
1117 static int amdgpu_ttm_gart_bind(struct amdgpu_device *adev,
1118                                 struct ttm_buffer_object *tbo,
1119                                 uint64_t flags)
1120 {
1121         struct amdgpu_bo *abo = ttm_to_amdgpu_bo(tbo);
1122         struct ttm_tt *ttm = tbo->ttm;
1123         struct amdgpu_ttm_tt *gtt = (void *)ttm;
1124         int r;
1125
1126         if (amdgpu_bo_encrypted(abo))
1127                 flags |= AMDGPU_PTE_TMZ;
1128
1129         if (abo->flags & AMDGPU_GEM_CREATE_CP_MQD_GFX9) {
1130                 uint64_t page_idx = 1;
1131
1132                 r = amdgpu_gart_bind(adev, gtt->offset, page_idx,
1133                                 ttm->pages, gtt->ttm.dma_address, flags);
1134                 if (r)
1135                         goto gart_bind_fail;
1136
1137                 /* The memory type of the first page defaults to UC. Now
1138                  * modify the memory type to NC from the second page of
1139                  * the BO onward.
1140                  */
1141                 flags &= ~AMDGPU_PTE_MTYPE_VG10_MASK;
1142                 flags |= AMDGPU_PTE_MTYPE_VG10(AMDGPU_MTYPE_NC);
1143
1144                 r = amdgpu_gart_bind(adev,
1145                                 gtt->offset + (page_idx << PAGE_SHIFT),
1146                                 ttm->num_pages - page_idx,
1147                                 &ttm->pages[page_idx],
1148                                 &(gtt->ttm.dma_address[page_idx]), flags);
1149         } else {
1150                 r = amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages,
1151                                      ttm->pages, gtt->ttm.dma_address, flags);
1152         }
1153
1154 gart_bind_fail:
1155         if (r)
1156                 DRM_ERROR("failed to bind %lu pages at 0x%08llX\n",
1157                           ttm->num_pages, gtt->offset);
1158
1159         return r;
1160 }
1161
1162 /**
1163  * amdgpu_ttm_backend_bind - Bind GTT memory
1164  *
1165  * Called by ttm_tt_bind() on behalf of ttm_bo_handle_move_mem().
1166  * This handles binding GTT memory to the device address space.
1167  */
1168 static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm,
1169                                    struct ttm_mem_reg *bo_mem)
1170 {
1171         struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
1172         struct amdgpu_ttm_tt *gtt = (void*)ttm;
1173         uint64_t flags;
1174         int r = 0;
1175
1176         if (gtt->userptr) {
1177                 r = amdgpu_ttm_tt_pin_userptr(ttm);
1178                 if (r) {
1179                         DRM_ERROR("failed to pin userptr\n");
1180                         return r;
1181                 }
1182         }
1183         if (!ttm->num_pages) {
1184                 WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
1185                      ttm->num_pages, bo_mem, ttm);
1186         }
1187
1188         if (bo_mem->mem_type == AMDGPU_PL_GDS ||
1189             bo_mem->mem_type == AMDGPU_PL_GWS ||
1190             bo_mem->mem_type == AMDGPU_PL_OA)
1191                 return -EINVAL;
1192
1193         if (!amdgpu_gtt_mgr_has_gart_addr(bo_mem)) {
1194                 gtt->offset = AMDGPU_BO_INVALID_OFFSET;
1195                 return 0;
1196         }
1197
1198         /* compute PTE flags relevant to this BO memory */
1199         flags = amdgpu_ttm_tt_pte_flags(adev, ttm, bo_mem);
1200
1201         /* bind pages into GART page tables */
1202         gtt->offset = (u64)bo_mem->start << PAGE_SHIFT;
1203         r = amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages,
1204                 ttm->pages, gtt->ttm.dma_address, flags);
1205
1206         if (r)
1207                 DRM_ERROR("failed to bind %lu pages at 0x%08llX\n",
1208                           ttm->num_pages, gtt->offset);
1209         return r;
1210 }
1211
1212 /**
1213  * amdgpu_ttm_alloc_gart - Allocate GART memory for buffer object
1214  */
1215 int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
1216 {
1217         struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
1218         struct ttm_operation_ctx ctx = { false, false };
1219         struct amdgpu_ttm_tt *gtt = (void*)bo->ttm;
1220         struct ttm_mem_reg tmp;
1221         struct ttm_placement placement;
1222         struct ttm_place placements;
1223         uint64_t addr, flags;
1224         int r;
1225
1226         if (bo->mem.start != AMDGPU_BO_INVALID_OFFSET)
1227                 return 0;
1228
1229         addr = amdgpu_gmc_agp_addr(bo);
1230         if (addr != AMDGPU_BO_INVALID_OFFSET) {
1231                 bo->mem.start = addr >> PAGE_SHIFT;
1232         } else {
1233
1234                 /* allocate GART space */
1235                 tmp = bo->mem;
1236                 tmp.mm_node = NULL;
1237                 placement.num_placement = 1;
1238                 placement.placement = &placements;
1239                 placement.num_busy_placement = 1;
1240                 placement.busy_placement = &placements;
1241                 placements.fpfn = 0;
1242                 placements.lpfn = adev->gmc.gart_size >> PAGE_SHIFT;
1243                 placements.flags = (bo->mem.placement & ~TTM_PL_MASK_MEM) |
1244                         TTM_PL_FLAG_TT;
1245
1246                 r = ttm_bo_mem_space(bo, &placement, &tmp, &ctx);
1247                 if (unlikely(r))
1248                         return r;
1249
1250                 /* compute PTE flags for this buffer object */
1251                 flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, &tmp);
1252
1253                 /* Bind pages */
1254                 gtt->offset = (u64)tmp.start << PAGE_SHIFT;
1255                 r = amdgpu_ttm_gart_bind(adev, bo, flags);
1256                 if (unlikely(r)) {
1257                         ttm_bo_mem_put(bo, &tmp);
1258                         return r;
1259                 }
1260
1261                 ttm_bo_mem_put(bo, &bo->mem);
1262                 bo->mem = tmp;
1263         }
1264
1265         return 0;
1266 }
1267
1268 /**
1269  * amdgpu_ttm_recover_gart - Rebind GTT pages
1270  *
1271  * Called by amdgpu_gtt_mgr_recover() from amdgpu_device_reset() to
1272  * rebind GTT pages during a GPU reset.
1273  */
1274 int amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo)
1275 {
1276         struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
1277         uint64_t flags;
1278         int r;
1279
1280         if (!tbo->ttm)
1281                 return 0;
1282
1283         flags = amdgpu_ttm_tt_pte_flags(adev, tbo->ttm, &tbo->mem);
1284         r = amdgpu_ttm_gart_bind(adev, tbo, flags);
1285
1286         return r;
1287 }
1288
1289 /**
1290  * amdgpu_ttm_backend_unbind - Unbind GTT mapped pages
1291  *
1292  * Called by ttm_tt_unbind() on behalf of ttm_bo_move_ttm() and
1293  * ttm_tt_destroy().
1294  */
1295 static void amdgpu_ttm_backend_unbind(struct ttm_tt *ttm)
1296 {
1297         struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
1298         struct amdgpu_ttm_tt *gtt = (void *)ttm;
1299         int r;
1300
1301         /* if the pages have userptr pinning then clear that first */
1302         if (gtt->userptr)
1303                 amdgpu_ttm_tt_unpin_userptr(ttm);
1304
1305         if (gtt->offset == AMDGPU_BO_INVALID_OFFSET)
1306                 return;
1307
1308         /* unbind shouldn't be done for GDS/GWS/OA in ttm_bo_clean_mm */
1309         r = amdgpu_gart_unbind(adev, gtt->offset, ttm->num_pages);
1310         if (r)
1311                 DRM_ERROR("failed to unbind %lu pages at 0x%08llX\n",
1312                           gtt->ttm.ttm.num_pages, gtt->offset);
1313 }
1314
1315 static void amdgpu_ttm_backend_destroy(struct ttm_tt *ttm)
1316 {
1317         struct amdgpu_ttm_tt *gtt = (void *)ttm;
1318
1319         if (gtt->usertask)
1320                 put_task_struct(gtt->usertask);
1321
1322         ttm_dma_tt_fini(&gtt->ttm);
1323         kfree(gtt);
1324 }
1325
1326 static struct ttm_backend_func amdgpu_backend_func = {
1327         .bind = &amdgpu_ttm_backend_bind,
1328         .unbind = &amdgpu_ttm_backend_unbind,
1329         .destroy = &amdgpu_ttm_backend_destroy,
1330 };
1331
1332 /**
1333  * amdgpu_ttm_tt_create - Create a ttm_tt object for a given BO
1334  *
1335  * @bo: The buffer object to create a GTT ttm_tt object around
1336  *
1337  * Called by ttm_tt_create().
1338  */
1339 static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_buffer_object *bo,
1340                                            uint32_t page_flags)
1341 {
1342         struct amdgpu_ttm_tt *gtt;
1343
1344         gtt = kzalloc(sizeof(struct amdgpu_ttm_tt), GFP_KERNEL);
1345         if (gtt == NULL) {
1346                 return NULL;
1347         }
1348         gtt->ttm.ttm.func = &amdgpu_backend_func;
1349         gtt->gobj = &bo->base;
1350
1351         /* allocate space for the uninitialized page entries */
1352         if (ttm_sg_tt_init(&gtt->ttm, bo, page_flags)) {
1353                 kfree(gtt);
1354                 return NULL;
1355         }
1356         return &gtt->ttm.ttm;
1357 }
1358
1359 /**
1360  * amdgpu_ttm_tt_populate - Map GTT pages visible to the device
1361  *
1362  * Map the pages of a ttm_tt object to an address space visible
1363  * to the underlying device.
1364  */
1365 static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm,
1366                         struct ttm_operation_ctx *ctx)
1367 {
1368         struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
1369         struct amdgpu_ttm_tt *gtt = (void *)ttm;
1370
1371         /* user pages are bound by amdgpu_ttm_tt_pin_userptr() */
1372         if (gtt && gtt->userptr) {
1373                 ttm->sg = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
1374                 if (!ttm->sg)
1375                         return -ENOMEM;
1376
1377                 ttm->page_flags |= TTM_PAGE_FLAG_SG;
1378                 ttm->state = tt_unbound;
1379                 return 0;
1380         }
1381
1382         if (ttm->page_flags & TTM_PAGE_FLAG_SG) {
1383                 if (!ttm->sg) {
1384                         struct dma_buf_attachment *attach;
1385                         struct sg_table *sgt;
1386
1387                         attach = gtt->gobj->import_attach;
1388                         sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
1389                         if (IS_ERR(sgt))
1390                                 return PTR_ERR(sgt);
1391
1392                         ttm->sg = sgt;
1393                 }
1394
1395                 drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
1396                                                  gtt->ttm.dma_address,
1397                                                  ttm->num_pages);
1398                 ttm->state = tt_unbound;
1399                 return 0;
1400         }
1401
1402 #ifdef CONFIG_SWIOTLB
1403         if (adev->need_swiotlb && swiotlb_nr_tbl()) {
1404                 return ttm_dma_populate(&gtt->ttm, adev->dev, ctx);
1405         }
1406 #endif
1407
1408         /* fall back to generic helper to populate the page array
1409          * and map them to the device */
1410         return ttm_populate_and_map_pages(adev->dev, &gtt->ttm, ctx);
1411 }
1412
1413 /**
1414  * amdgpu_ttm_tt_unpopulate - unmap GTT pages and unpopulate page arrays
1415  *
1416  * Unmaps pages of a ttm_tt object from the device address space and
1417  * unpopulates the page array backing it.
1418  */
1419 static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm)
1420 {
1421         struct amdgpu_ttm_tt *gtt = (void *)ttm;
1422         struct amdgpu_device *adev;
1423
1424         if (gtt && gtt->userptr) {
1425                 amdgpu_ttm_tt_set_user_pages(ttm, NULL);
1426                 kfree(ttm->sg);
1427                 ttm->page_flags &= ~TTM_PAGE_FLAG_SG;
1428                 return;
1429         }
1430
1431         if (ttm->sg && gtt->gobj->import_attach) {
1432                 struct dma_buf_attachment *attach;
1433
1434                 attach = gtt->gobj->import_attach;
1435                 dma_buf_unmap_attachment(attach, ttm->sg, DMA_BIDIRECTIONAL);
1436                 ttm->sg = NULL;
1437                 return;
1438         }
1439
1440         if (ttm->page_flags & TTM_PAGE_FLAG_SG)
1441                 return;
1442
1443         adev = amdgpu_ttm_adev(ttm->bdev);
1444
1445 #ifdef CONFIG_SWIOTLB
1446         if (adev->need_swiotlb && swiotlb_nr_tbl()) {
1447                 ttm_dma_unpopulate(&gtt->ttm, adev->dev);
1448                 return;
1449         }
1450 #endif
1451
1452         /* fall back to generic helper to unmap and unpopulate array */
1453         ttm_unmap_and_unpopulate_pages(adev->dev, &gtt->ttm);
1454 }
1455
1456 /**
1457  * amdgpu_ttm_tt_set_userptr - Initialize userptr GTT ttm_tt for the current
1458  * task
1459  *
1460  * @ttm: The ttm_tt object to bind this userptr object to
1461  * @addr:  The address in the current tasks VM space to use
1462  * @flags: Requirements of userptr object.
1463  *
1464  * Called by amdgpu_gem_userptr_ioctl() to bind userptr pages
1465  * to current task
1466  */
1467 int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
1468                               uint32_t flags)
1469 {
1470         struct amdgpu_ttm_tt *gtt = (void *)ttm;
1471
1472         if (gtt == NULL)
1473                 return -EINVAL;
1474
1475         gtt->userptr = addr;
1476         gtt->userflags = flags;
1477
1478         if (gtt->usertask)
1479                 put_task_struct(gtt->usertask);
1480         gtt->usertask = current->group_leader;
1481         get_task_struct(gtt->usertask);
1482
1483         return 0;
1484 }
1485
1486 /**
1487  * amdgpu_ttm_tt_get_usermm - Return memory manager for ttm_tt object
1488  */
1489 struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm)
1490 {
1491         struct amdgpu_ttm_tt *gtt = (void *)ttm;
1492
1493         if (gtt == NULL)
1494                 return NULL;
1495
1496         if (gtt->usertask == NULL)
1497                 return NULL;
1498
1499         return gtt->usertask->mm;
1500 }
1501
1502 /**
1503  * amdgpu_ttm_tt_affect_userptr - Determine if a ttm_tt object lays inside an
1504  * address range for the current task.
1505  *
1506  */
1507 bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
1508                                   unsigned long end)
1509 {
1510         struct amdgpu_ttm_tt *gtt = (void *)ttm;
1511         unsigned long size;
1512
1513         if (gtt == NULL || !gtt->userptr)
1514                 return false;
1515
1516         /* Return false if no part of the ttm_tt object lies within
1517          * the range
1518          */
1519         size = (unsigned long)gtt->ttm.ttm.num_pages * PAGE_SIZE;
1520         if (gtt->userptr > end || gtt->userptr + size <= start)
1521                 return false;
1522
1523         return true;
1524 }
1525
1526 /**
1527  * amdgpu_ttm_tt_is_userptr - Have the pages backing by userptr?
1528  */
1529 bool amdgpu_ttm_tt_is_userptr(struct ttm_tt *ttm)
1530 {
1531         struct amdgpu_ttm_tt *gtt = (void *)ttm;
1532
1533         if (gtt == NULL || !gtt->userptr)
1534                 return false;
1535
1536         return true;
1537 }
1538
1539 /**
1540  * amdgpu_ttm_tt_is_readonly - Is the ttm_tt object read only?
1541  */
1542 bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm)
1543 {
1544         struct amdgpu_ttm_tt *gtt = (void *)ttm;
1545
1546         if (gtt == NULL)
1547                 return false;
1548
1549         return !!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
1550 }
1551
1552 /**
1553  * amdgpu_ttm_tt_pde_flags - Compute PDE flags for ttm_tt object
1554  *
1555  * @ttm: The ttm_tt object to compute the flags for
1556  * @mem: The memory registry backing this ttm_tt object
1557  *
1558  * Figure out the flags to use for a VM PDE (Page Directory Entry).
1559  */
1560 uint64_t amdgpu_ttm_tt_pde_flags(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
1561 {
1562         uint64_t flags = 0;
1563
1564         if (mem && mem->mem_type != TTM_PL_SYSTEM)
1565                 flags |= AMDGPU_PTE_VALID;
1566
1567         if (mem && mem->mem_type == TTM_PL_TT) {
1568                 flags |= AMDGPU_PTE_SYSTEM;
1569
1570                 if (ttm->caching_state == tt_cached)
1571                         flags |= AMDGPU_PTE_SNOOPED;
1572         }
1573
1574         return flags;
1575 }
1576
1577 /**
1578  * amdgpu_ttm_tt_pte_flags - Compute PTE flags for ttm_tt object
1579  *
1580  * @ttm: The ttm_tt object to compute the flags for
1581  * @mem: The memory registry backing this ttm_tt object
1582
1583  * Figure out the flags to use for a VM PTE (Page Table Entry).
1584  */
1585 uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
1586                                  struct ttm_mem_reg *mem)
1587 {
1588         uint64_t flags = amdgpu_ttm_tt_pde_flags(ttm, mem);
1589
1590         flags |= adev->gart.gart_pte_flags;
1591         flags |= AMDGPU_PTE_READABLE;
1592
1593         if (!amdgpu_ttm_tt_is_readonly(ttm))
1594                 flags |= AMDGPU_PTE_WRITEABLE;
1595
1596         return flags;
1597 }
1598
1599 /**
1600  * amdgpu_ttm_bo_eviction_valuable - Check to see if we can evict a buffer
1601  * object.
1602  *
1603  * Return true if eviction is sensible. Called by ttm_mem_evict_first() on
1604  * behalf of ttm_bo_mem_force_space() which tries to evict buffer objects until
1605  * it can find space for a new object and by ttm_bo_force_list_clean() which is
1606  * used to clean out a memory space.
1607  */
1608 static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
1609                                             const struct ttm_place *place)
1610 {
1611         unsigned long num_pages = bo->mem.num_pages;
1612         struct drm_mm_node *node = bo->mem.mm_node;
1613         struct dma_resv_list *flist;
1614         struct dma_fence *f;
1615         int i;
1616
1617         if (bo->type == ttm_bo_type_kernel &&
1618             !amdgpu_vm_evictable(ttm_to_amdgpu_bo(bo)))
1619                 return false;
1620
1621         /* If bo is a KFD BO, check if the bo belongs to the current process.
1622          * If true, then return false as any KFD process needs all its BOs to
1623          * be resident to run successfully
1624          */
1625         flist = dma_resv_get_list(bo->base.resv);
1626         if (flist) {
1627                 for (i = 0; i < flist->shared_count; ++i) {
1628                         f = rcu_dereference_protected(flist->shared[i],
1629                                 dma_resv_held(bo->base.resv));
1630                         if (amdkfd_fence_check_mm(f, current->mm))
1631                                 return false;
1632                 }
1633         }
1634
1635         switch (bo->mem.mem_type) {
1636         case TTM_PL_TT:
1637                 if (amdgpu_bo_is_amdgpu_bo(bo) &&
1638                     amdgpu_bo_encrypted(ttm_to_amdgpu_bo(bo)))
1639                         return false;
1640                 return true;
1641
1642         case TTM_PL_VRAM:
1643                 /* Check each drm MM node individually */
1644                 while (num_pages) {
1645                         if (place->fpfn < (node->start + node->size) &&
1646                             !(place->lpfn && place->lpfn <= node->start))
1647                                 return true;
1648
1649                         num_pages -= node->size;
1650                         ++node;
1651                 }
1652                 return false;
1653
1654         default:
1655                 break;
1656         }
1657
1658         return ttm_bo_eviction_valuable(bo, place);
1659 }
1660
1661 /**
1662  * amdgpu_ttm_access_memory - Read or Write memory that backs a buffer object.
1663  *
1664  * @bo:  The buffer object to read/write
1665  * @offset:  Offset into buffer object
1666  * @buf:  Secondary buffer to write/read from
1667  * @len: Length in bytes of access
1668  * @write:  true if writing
1669  *
1670  * This is used to access VRAM that backs a buffer object via MMIO
1671  * access for debugging purposes.
1672  */
1673 static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo,
1674                                     unsigned long offset,
1675                                     void *buf, int len, int write)
1676 {
1677         struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
1678         struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
1679         struct drm_mm_node *nodes;
1680         uint32_t value = 0;
1681         int ret = 0;
1682         uint64_t pos;
1683         unsigned long flags;
1684
1685         if (bo->mem.mem_type != TTM_PL_VRAM)
1686                 return -EIO;
1687
1688         pos = offset;
1689         nodes = amdgpu_find_mm_node(&abo->tbo.mem, &pos);
1690         pos += (nodes->start << PAGE_SHIFT);
1691
1692         while (len && pos < adev->gmc.mc_vram_size) {
1693                 uint64_t aligned_pos = pos & ~(uint64_t)3;
1694                 uint64_t bytes = 4 - (pos & 3);
1695                 uint32_t shift = (pos & 3) * 8;
1696                 uint32_t mask = 0xffffffff << shift;
1697
1698                 if (len < bytes) {
1699                         mask &= 0xffffffff >> (bytes - len) * 8;
1700                         bytes = len;
1701                 }
1702
1703                 if (mask != 0xffffffff) {
1704                         spin_lock_irqsave(&adev->mmio_idx_lock, flags);
1705                         WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)aligned_pos) | 0x80000000);
1706                         WREG32_NO_KIQ(mmMM_INDEX_HI, aligned_pos >> 31);
1707                         if (!write || mask != 0xffffffff)
1708                                 value = RREG32_NO_KIQ(mmMM_DATA);
1709                         if (write) {
1710                                 value &= ~mask;
1711                                 value |= (*(uint32_t *)buf << shift) & mask;
1712                                 WREG32_NO_KIQ(mmMM_DATA, value);
1713                         }
1714                         spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
1715                         if (!write) {
1716                                 value = (value & mask) >> shift;
1717                                 memcpy(buf, &value, bytes);
1718                         }
1719                 } else {
1720                         bytes = (nodes->start + nodes->size) << PAGE_SHIFT;
1721                         bytes = min(bytes - pos, (uint64_t)len & ~0x3ull);
1722
1723                         amdgpu_device_vram_access(adev, pos, (uint32_t *)buf,
1724                                                   bytes, write);
1725                 }
1726
1727                 ret += bytes;
1728                 buf = (uint8_t *)buf + bytes;
1729                 pos += bytes;
1730                 len -= bytes;
1731                 if (pos >= (nodes->start + nodes->size) << PAGE_SHIFT) {
1732                         ++nodes;
1733                         pos = (nodes->start << PAGE_SHIFT);
1734                 }
1735         }
1736
1737         return ret;
1738 }
1739
1740 static struct ttm_bo_driver amdgpu_bo_driver = {
1741         .ttm_tt_create = &amdgpu_ttm_tt_create,
1742         .ttm_tt_populate = &amdgpu_ttm_tt_populate,
1743         .ttm_tt_unpopulate = &amdgpu_ttm_tt_unpopulate,
1744         .init_mem_type = &amdgpu_init_mem_type,
1745         .eviction_valuable = amdgpu_ttm_bo_eviction_valuable,
1746         .evict_flags = &amdgpu_evict_flags,
1747         .move = &amdgpu_bo_move,
1748         .verify_access = &amdgpu_verify_access,
1749         .move_notify = &amdgpu_bo_move_notify,
1750         .release_notify = &amdgpu_bo_release_notify,
1751         .fault_reserve_notify = &amdgpu_bo_fault_reserve_notify,
1752         .io_mem_reserve = &amdgpu_ttm_io_mem_reserve,
1753         .io_mem_pfn = amdgpu_ttm_io_mem_pfn,
1754         .access_memory = &amdgpu_ttm_access_memory,
1755         .del_from_lru_notify = &amdgpu_vm_del_from_lru_notify
1756 };
1757
1758 /*
1759  * Firmware Reservation functions
1760  */
1761 /**
1762  * amdgpu_ttm_fw_reserve_vram_fini - free fw reserved vram
1763  *
1764  * @adev: amdgpu_device pointer
1765  *
1766  * free fw reserved vram if it has been reserved.
1767  */
1768 static void amdgpu_ttm_fw_reserve_vram_fini(struct amdgpu_device *adev)
1769 {
1770         amdgpu_bo_free_kernel(&adev->fw_vram_usage.reserved_bo,
1771                 NULL, &adev->fw_vram_usage.va);
1772 }
1773
1774 /**
1775  * amdgpu_ttm_fw_reserve_vram_init - create bo vram reservation from fw
1776  *
1777  * @adev: amdgpu_device pointer
1778  *
1779  * create bo vram reservation from fw.
1780  */
1781 static int amdgpu_ttm_fw_reserve_vram_init(struct amdgpu_device *adev)
1782 {
1783         uint64_t vram_size = adev->gmc.visible_vram_size;
1784
1785         adev->fw_vram_usage.va = NULL;
1786         adev->fw_vram_usage.reserved_bo = NULL;
1787
1788         if (adev->fw_vram_usage.size == 0 ||
1789             adev->fw_vram_usage.size > vram_size)
1790                 return 0;
1791
1792         return amdgpu_bo_create_kernel_at(adev,
1793                                           adev->fw_vram_usage.start_offset,
1794                                           adev->fw_vram_usage.size,
1795                                           AMDGPU_GEM_DOMAIN_VRAM,
1796                                           &adev->fw_vram_usage.reserved_bo,
1797                                           &adev->fw_vram_usage.va);
1798 }
1799
1800 /*
1801  * Memoy training reservation functions
1802  */
1803
1804 /**
1805  * amdgpu_ttm_training_reserve_vram_fini - free memory training reserved vram
1806  *
1807  * @adev: amdgpu_device pointer
1808  *
1809  * free memory training reserved vram if it has been reserved.
1810  */
1811 static int amdgpu_ttm_training_reserve_vram_fini(struct amdgpu_device *adev)
1812 {
1813         struct psp_memory_training_context *ctx = &adev->psp.mem_train_ctx;
1814
1815         ctx->init = PSP_MEM_TRAIN_NOT_SUPPORT;
1816         amdgpu_bo_free_kernel(&ctx->c2p_bo, NULL, NULL);
1817         ctx->c2p_bo = NULL;
1818
1819         return 0;
1820 }
1821
1822 static void amdgpu_ttm_training_data_block_init(struct amdgpu_device *adev)
1823 {
1824         struct psp_memory_training_context *ctx = &adev->psp.mem_train_ctx;
1825
1826         memset(ctx, 0, sizeof(*ctx));
1827
1828         ctx->c2p_train_data_offset =
1829                 ALIGN((adev->gmc.mc_vram_size - adev->discovery_tmr_size - SZ_1M), SZ_1M);
1830         ctx->p2c_train_data_offset =
1831                 (adev->gmc.mc_vram_size - GDDR6_MEM_TRAINING_OFFSET);
1832         ctx->train_data_size =
1833                 GDDR6_MEM_TRAINING_DATA_SIZE_IN_BYTES;
1834         
1835         DRM_DEBUG("train_data_size:%llx,p2c_train_data_offset:%llx,c2p_train_data_offset:%llx.\n",
1836                         ctx->train_data_size,
1837                         ctx->p2c_train_data_offset,
1838                         ctx->c2p_train_data_offset);
1839 }
1840
1841 /*
1842  * reserve TMR memory at the top of VRAM which holds
1843  * IP Discovery data and is protected by PSP.
1844  */
1845 static int amdgpu_ttm_reserve_tmr(struct amdgpu_device *adev)
1846 {
1847         int ret;
1848         struct psp_memory_training_context *ctx = &adev->psp.mem_train_ctx;
1849         bool mem_train_support = false;
1850
1851         if (!amdgpu_sriov_vf(adev)) {
1852                 ret = amdgpu_mem_train_support(adev);
1853                 if (ret == 1)
1854                         mem_train_support = true;
1855                 else if (ret == -1)
1856                         return -EINVAL;
1857                 else
1858                         DRM_DEBUG("memory training does not support!\n");
1859         }
1860
1861         /*
1862          * Query reserved tmr size through atom firmwareinfo for Sienna_Cichlid and onwards for all
1863          * the use cases (IP discovery/G6 memory training/profiling/diagnostic data.etc)
1864          *
1865          * Otherwise, fallback to legacy approach to check and reserve tmr block for ip
1866          * discovery data and G6 memory training data respectively
1867          */
1868         adev->discovery_tmr_size =
1869                 amdgpu_atomfirmware_get_fw_reserved_fb_size(adev);
1870         if (!adev->discovery_tmr_size)
1871                 adev->discovery_tmr_size = DISCOVERY_TMR_OFFSET;
1872
1873         if (mem_train_support) {
1874                 /* reserve vram for mem train according to TMR location */
1875                 amdgpu_ttm_training_data_block_init(adev);
1876                 ret = amdgpu_bo_create_kernel_at(adev,
1877                                          ctx->c2p_train_data_offset,
1878                                          ctx->train_data_size,
1879                                          AMDGPU_GEM_DOMAIN_VRAM,
1880                                          &ctx->c2p_bo,
1881                                          NULL);
1882                 if (ret) {
1883                         DRM_ERROR("alloc c2p_bo failed(%d)!\n", ret);
1884                         amdgpu_ttm_training_reserve_vram_fini(adev);
1885                         return ret;
1886                 }
1887                 ctx->init = PSP_MEM_TRAIN_RESERVE_SUCCESS;
1888         }
1889
1890         ret = amdgpu_bo_create_kernel_at(adev,
1891                                 adev->gmc.real_vram_size - adev->discovery_tmr_size,
1892                                 adev->discovery_tmr_size,
1893                                 AMDGPU_GEM_DOMAIN_VRAM,
1894                                 &adev->discovery_memory,
1895                                 NULL);
1896         if (ret) {
1897                 DRM_ERROR("alloc tmr failed(%d)!\n", ret);
1898                 amdgpu_bo_free_kernel(&adev->discovery_memory, NULL, NULL);
1899                 return ret;
1900         }
1901
1902         return 0;
1903 }
1904
1905 /**
1906  * amdgpu_ttm_init - Init the memory management (ttm) as well as various
1907  * gtt/vram related fields.
1908  *
1909  * This initializes all of the memory space pools that the TTM layer
1910  * will need such as the GTT space (system memory mapped to the device),
1911  * VRAM (on-board memory), and on-chip memories (GDS, GWS, OA) which
1912  * can be mapped per VMID.
1913  */
1914 int amdgpu_ttm_init(struct amdgpu_device *adev)
1915 {
1916         uint64_t gtt_size;
1917         int r;
1918         u64 vis_vram_limit;
1919         void *stolen_vga_buf;
1920
1921         mutex_init(&adev->mman.gtt_window_lock);
1922
1923         /* No others user of address space so set it to 0 */
1924         r = ttm_bo_device_init(&adev->mman.bdev,
1925                                &amdgpu_bo_driver,
1926                                adev->ddev->anon_inode->i_mapping,
1927                                adev->ddev->vma_offset_manager,
1928                                dma_addressing_limited(adev->dev));
1929         if (r) {
1930                 DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
1931                 return r;
1932         }
1933         adev->mman.initialized = true;
1934
1935         /* We opt to avoid OOM on system pages allocations */
1936         adev->mman.bdev.no_retry = true;
1937
1938         /* Initialize VRAM pool with all of VRAM divided into pages */
1939         r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_VRAM,
1940                                 adev->gmc.real_vram_size >> PAGE_SHIFT);
1941         if (r) {
1942                 DRM_ERROR("Failed initializing VRAM heap.\n");
1943                 return r;
1944         }
1945
1946         /* Reduce size of CPU-visible VRAM if requested */
1947         vis_vram_limit = (u64)amdgpu_vis_vram_limit * 1024 * 1024;
1948         if (amdgpu_vis_vram_limit > 0 &&
1949             vis_vram_limit <= adev->gmc.visible_vram_size)
1950                 adev->gmc.visible_vram_size = vis_vram_limit;
1951
1952         /* Change the size here instead of the init above so only lpfn is affected */
1953         amdgpu_ttm_set_buffer_funcs_status(adev, false);
1954 #ifdef CONFIG_64BIT
1955         adev->mman.aper_base_kaddr = ioremap_wc(adev->gmc.aper_base,
1956                                                 adev->gmc.visible_vram_size);
1957 #endif
1958
1959         /*
1960          *The reserved vram for firmware must be pinned to the specified
1961          *place on the VRAM, so reserve it early.
1962          */
1963         r = amdgpu_ttm_fw_reserve_vram_init(adev);
1964         if (r) {
1965                 return r;
1966         }
1967
1968         /*
1969          * only NAVI10 and onwards ASIC support for IP discovery.
1970          * If IP discovery enabled, a block of memory should be
1971          * reserved for IP discovey.
1972          */
1973         if (adev->discovery_bin) {
1974                 r = amdgpu_ttm_reserve_tmr(adev);
1975                 if (r)
1976                         return r;
1977         }
1978
1979         /* allocate memory as required for VGA
1980          * This is used for VGA emulation and pre-OS scanout buffers to
1981          * avoid display artifacts while transitioning between pre-OS
1982          * and driver.  */
1983         r = amdgpu_bo_create_kernel(adev, adev->gmc.stolen_size, PAGE_SIZE,
1984                                     AMDGPU_GEM_DOMAIN_VRAM,
1985                                     &adev->stolen_vga_memory,
1986                                     NULL, &stolen_vga_buf);
1987         if (r)
1988                 return r;
1989
1990         DRM_INFO("amdgpu: %uM of VRAM memory ready\n",
1991                  (unsigned) (adev->gmc.real_vram_size / (1024 * 1024)));
1992
1993         /* Compute GTT size, either bsaed on 3/4th the size of RAM size
1994          * or whatever the user passed on module init */
1995         if (amdgpu_gtt_size == -1) {
1996                 struct sysinfo si;
1997
1998                 si_meminfo(&si);
1999                 gtt_size = min(max((AMDGPU_DEFAULT_GTT_SIZE_MB << 20),
2000                                adev->gmc.mc_vram_size),
2001                                ((uint64_t)si.totalram * si.mem_unit * 3/4));
2002         }
2003         else
2004                 gtt_size = (uint64_t)amdgpu_gtt_size << 20;
2005
2006         /* Initialize GTT memory pool */
2007         r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_TT, gtt_size >> PAGE_SHIFT);
2008         if (r) {
2009                 DRM_ERROR("Failed initializing GTT heap.\n");
2010                 return r;
2011         }
2012         DRM_INFO("amdgpu: %uM of GTT memory ready.\n",
2013                  (unsigned)(gtt_size / (1024 * 1024)));
2014
2015         /* Initialize various on-chip memory pools */
2016         r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_GDS,
2017                            adev->gds.gds_size);
2018         if (r) {
2019                 DRM_ERROR("Failed initializing GDS heap.\n");
2020                 return r;
2021         }
2022
2023         r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_GWS,
2024                            adev->gds.gws_size);
2025         if (r) {
2026                 DRM_ERROR("Failed initializing gws heap.\n");
2027                 return r;
2028         }
2029
2030         r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_OA,
2031                            adev->gds.oa_size);
2032         if (r) {
2033                 DRM_ERROR("Failed initializing oa heap.\n");
2034                 return r;
2035         }
2036
2037         return 0;
2038 }
2039
2040 /**
2041  * amdgpu_ttm_late_init - Handle any late initialization for amdgpu_ttm
2042  */
2043 void amdgpu_ttm_late_init(struct amdgpu_device *adev)
2044 {
2045         void *stolen_vga_buf;
2046         /* return the VGA stolen memory (if any) back to VRAM */
2047         amdgpu_bo_free_kernel(&adev->stolen_vga_memory, NULL, &stolen_vga_buf);
2048 }
2049
2050 /**
2051  * amdgpu_ttm_fini - De-initialize the TTM memory pools
2052  */
2053 void amdgpu_ttm_fini(struct amdgpu_device *adev)
2054 {
2055         if (!adev->mman.initialized)
2056                 return;
2057
2058         amdgpu_ttm_training_reserve_vram_fini(adev);
2059         /* return the IP Discovery TMR memory back to VRAM */
2060         amdgpu_bo_free_kernel(&adev->discovery_memory, NULL, NULL);
2061         amdgpu_ttm_fw_reserve_vram_fini(adev);
2062
2063         if (adev->mman.aper_base_kaddr)
2064                 iounmap(adev->mman.aper_base_kaddr);
2065         adev->mman.aper_base_kaddr = NULL;
2066
2067         ttm_bo_clean_mm(&adev->mman.bdev, TTM_PL_VRAM);
2068         ttm_bo_clean_mm(&adev->mman.bdev, TTM_PL_TT);
2069         ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_GDS);
2070         ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_GWS);
2071         ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_OA);
2072         ttm_bo_device_release(&adev->mman.bdev);
2073         adev->mman.initialized = false;
2074         DRM_INFO("amdgpu: ttm finalized\n");
2075 }
2076
2077 /**
2078  * amdgpu_ttm_set_buffer_funcs_status - enable/disable use of buffer functions
2079  *
2080  * @adev: amdgpu_device pointer
2081  * @enable: true when we can use buffer functions.
2082  *
2083  * Enable/disable use of buffer functions during suspend/resume. This should
2084  * only be called at bootup or when userspace isn't running.
2085  */
2086 void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
2087 {
2088         struct ttm_mem_type_manager *man = &adev->mman.bdev.man[TTM_PL_VRAM];
2089         uint64_t size;
2090         int r;
2091
2092         if (!adev->mman.initialized || adev->in_gpu_reset ||
2093             adev->mman.buffer_funcs_enabled == enable)
2094                 return;
2095
2096         if (enable) {
2097                 struct amdgpu_ring *ring;
2098                 struct drm_gpu_scheduler *sched;
2099
2100                 ring = adev->mman.buffer_funcs_ring;
2101                 sched = &ring->sched;
2102                 r = drm_sched_entity_init(&adev->mman.entity,
2103                                           DRM_SCHED_PRIORITY_KERNEL, &sched,
2104                                           1, NULL);
2105                 if (r) {
2106                         DRM_ERROR("Failed setting up TTM BO move entity (%d)\n",
2107                                   r);
2108                         return;
2109                 }
2110         } else {
2111                 drm_sched_entity_destroy(&adev->mman.entity);
2112                 dma_fence_put(man->move);
2113                 man->move = NULL;
2114         }
2115
2116         /* this just adjusts TTM size idea, which sets lpfn to the correct value */
2117         if (enable)
2118                 size = adev->gmc.real_vram_size;
2119         else
2120                 size = adev->gmc.visible_vram_size;
2121         man->size = size >> PAGE_SHIFT;
2122         adev->mman.buffer_funcs_enabled = enable;
2123 }
2124
2125 int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma)
2126 {
2127         struct drm_file *file_priv = filp->private_data;
2128         struct amdgpu_device *adev = file_priv->minor->dev->dev_private;
2129
2130         if (adev == NULL)
2131                 return -EINVAL;
2132
2133         return ttm_bo_mmap(filp, vma, &adev->mman.bdev);
2134 }
2135
2136 int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
2137                        uint64_t dst_offset, uint32_t byte_count,
2138                        struct dma_resv *resv,
2139                        struct dma_fence **fence, bool direct_submit,
2140                        bool vm_needs_flush, bool tmz)
2141 {
2142         enum amdgpu_ib_pool_type pool = direct_submit ? AMDGPU_IB_POOL_DIRECT :
2143                 AMDGPU_IB_POOL_DELAYED;
2144         struct amdgpu_device *adev = ring->adev;
2145         struct amdgpu_job *job;
2146
2147         uint32_t max_bytes;
2148         unsigned num_loops, num_dw;
2149         unsigned i;
2150         int r;
2151
2152         if (direct_submit && !ring->sched.ready) {
2153                 DRM_ERROR("Trying to move memory with ring turned off.\n");
2154                 return -EINVAL;
2155         }
2156
2157         max_bytes = adev->mman.buffer_funcs->copy_max_bytes;
2158         num_loops = DIV_ROUND_UP(byte_count, max_bytes);
2159         num_dw = ALIGN(num_loops * adev->mman.buffer_funcs->copy_num_dw, 8);
2160
2161         r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, pool, &job);
2162         if (r)
2163                 return r;
2164
2165         if (vm_needs_flush) {
2166                 job->vm_pd_addr = amdgpu_gmc_pd_addr(adev->gart.bo);
2167                 job->vm_needs_flush = true;
2168         }
2169         if (resv) {
2170                 r = amdgpu_sync_resv(adev, &job->sync, resv,
2171                                      AMDGPU_SYNC_ALWAYS,
2172                                      AMDGPU_FENCE_OWNER_UNDEFINED);
2173                 if (r) {
2174                         DRM_ERROR("sync failed (%d).\n", r);
2175                         goto error_free;
2176                 }
2177         }
2178
2179         for (i = 0; i < num_loops; i++) {
2180                 uint32_t cur_size_in_bytes = min(byte_count, max_bytes);
2181
2182                 amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_offset,
2183                                         dst_offset, cur_size_in_bytes, tmz);
2184
2185                 src_offset += cur_size_in_bytes;
2186                 dst_offset += cur_size_in_bytes;
2187                 byte_count -= cur_size_in_bytes;
2188         }
2189
2190         amdgpu_ring_pad_ib(ring, &job->ibs[0]);
2191         WARN_ON(job->ibs[0].length_dw > num_dw);
2192         if (direct_submit)
2193                 r = amdgpu_job_submit_direct(job, ring, fence);
2194         else
2195                 r = amdgpu_job_submit(job, &adev->mman.entity,
2196                                       AMDGPU_FENCE_OWNER_UNDEFINED, fence);
2197         if (r)
2198                 goto error_free;
2199
2200         return r;
2201
2202 error_free:
2203         amdgpu_job_free(job);
2204         DRM_ERROR("Error scheduling IBs (%d)\n", r);
2205         return r;
2206 }
2207
2208 int amdgpu_fill_buffer(struct amdgpu_bo *bo,
2209                        uint32_t src_data,
2210                        struct dma_resv *resv,
2211                        struct dma_fence **fence)
2212 {
2213         struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
2214         uint32_t max_bytes = adev->mman.buffer_funcs->fill_max_bytes;
2215         struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
2216
2217         struct drm_mm_node *mm_node;
2218         unsigned long num_pages;
2219         unsigned int num_loops, num_dw;
2220
2221         struct amdgpu_job *job;
2222         int r;
2223
2224         if (!adev->mman.buffer_funcs_enabled) {
2225                 DRM_ERROR("Trying to clear memory with ring turned off.\n");
2226                 return -EINVAL;
2227         }
2228
2229         if (bo->tbo.mem.mem_type == TTM_PL_TT) {
2230                 r = amdgpu_ttm_alloc_gart(&bo->tbo);
2231                 if (r)
2232                         return r;
2233         }
2234
2235         num_pages = bo->tbo.num_pages;
2236         mm_node = bo->tbo.mem.mm_node;
2237         num_loops = 0;
2238         while (num_pages) {
2239                 uint64_t byte_count = mm_node->size << PAGE_SHIFT;
2240
2241                 num_loops += DIV_ROUND_UP_ULL(byte_count, max_bytes);
2242                 num_pages -= mm_node->size;
2243                 ++mm_node;
2244         }
2245         num_dw = num_loops * adev->mman.buffer_funcs->fill_num_dw;
2246
2247         /* for IB padding */
2248         num_dw += 64;
2249
2250         r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, AMDGPU_IB_POOL_DELAYED,
2251                                      &job);
2252         if (r)
2253                 return r;
2254
2255         if (resv) {
2256                 r = amdgpu_sync_resv(adev, &job->sync, resv,
2257                                      AMDGPU_SYNC_ALWAYS,
2258                                      AMDGPU_FENCE_OWNER_UNDEFINED);
2259                 if (r) {
2260                         DRM_ERROR("sync failed (%d).\n", r);
2261                         goto error_free;
2262                 }
2263         }
2264
2265         num_pages = bo->tbo.num_pages;
2266         mm_node = bo->tbo.mem.mm_node;
2267
2268         while (num_pages) {
2269                 uint64_t byte_count = mm_node->size << PAGE_SHIFT;
2270                 uint64_t dst_addr;
2271
2272                 dst_addr = amdgpu_mm_node_addr(&bo->tbo, mm_node, &bo->tbo.mem);
2273                 while (byte_count) {
2274                         uint32_t cur_size_in_bytes = min_t(uint64_t, byte_count,
2275                                                            max_bytes);
2276
2277                         amdgpu_emit_fill_buffer(adev, &job->ibs[0], src_data,
2278                                                 dst_addr, cur_size_in_bytes);
2279
2280                         dst_addr += cur_size_in_bytes;
2281                         byte_count -= cur_size_in_bytes;
2282                 }
2283
2284                 num_pages -= mm_node->size;
2285                 ++mm_node;
2286         }
2287
2288         amdgpu_ring_pad_ib(ring, &job->ibs[0]);
2289         WARN_ON(job->ibs[0].length_dw > num_dw);
2290         r = amdgpu_job_submit(job, &adev->mman.entity,
2291                               AMDGPU_FENCE_OWNER_UNDEFINED, fence);
2292         if (r)
2293                 goto error_free;
2294
2295         return 0;
2296
2297 error_free:
2298         amdgpu_job_free(job);
2299         return r;
2300 }
2301
2302 #if defined(CONFIG_DEBUG_FS)
2303
2304 static int amdgpu_mm_dump_table(struct seq_file *m, void *data)
2305 {
2306         struct drm_info_node *node = (struct drm_info_node *)m->private;
2307         unsigned ttm_pl = (uintptr_t)node->info_ent->data;
2308         struct drm_device *dev = node->minor->dev;
2309         struct amdgpu_device *adev = dev->dev_private;
2310         struct ttm_mem_type_manager *man = &adev->mman.bdev.man[ttm_pl];
2311         struct drm_printer p = drm_seq_file_printer(m);
2312
2313         man->func->debug(man, &p);
2314         return 0;
2315 }
2316
2317 static const struct drm_info_list amdgpu_ttm_debugfs_list[] = {
2318         {"amdgpu_vram_mm", amdgpu_mm_dump_table, 0, (void *)TTM_PL_VRAM},
2319         {"amdgpu_gtt_mm", amdgpu_mm_dump_table, 0, (void *)TTM_PL_TT},
2320         {"amdgpu_gds_mm", amdgpu_mm_dump_table, 0, (void *)AMDGPU_PL_GDS},
2321         {"amdgpu_gws_mm", amdgpu_mm_dump_table, 0, (void *)AMDGPU_PL_GWS},
2322         {"amdgpu_oa_mm", amdgpu_mm_dump_table, 0, (void *)AMDGPU_PL_OA},
2323         {"ttm_page_pool", ttm_page_alloc_debugfs, 0, NULL},
2324 #ifdef CONFIG_SWIOTLB
2325         {"ttm_dma_page_pool", ttm_dma_page_alloc_debugfs, 0, NULL}
2326 #endif
2327 };
2328
2329 /**
2330  * amdgpu_ttm_vram_read - Linear read access to VRAM
2331  *
2332  * Accesses VRAM via MMIO for debugging purposes.
2333  */
2334 static ssize_t amdgpu_ttm_vram_read(struct file *f, char __user *buf,
2335                                     size_t size, loff_t *pos)
2336 {
2337         struct amdgpu_device *adev = file_inode(f)->i_private;
2338         ssize_t result = 0;
2339
2340         if (size & 0x3 || *pos & 0x3)
2341                 return -EINVAL;
2342
2343         if (*pos >= adev->gmc.mc_vram_size)
2344                 return -ENXIO;
2345
2346         size = min(size, (size_t)(adev->gmc.mc_vram_size - *pos));
2347         while (size) {
2348                 size_t bytes = min(size, AMDGPU_TTM_VRAM_MAX_DW_READ * 4);
2349                 uint32_t value[AMDGPU_TTM_VRAM_MAX_DW_READ];
2350
2351                 amdgpu_device_vram_access(adev, *pos, value, bytes, false);
2352                 if (copy_to_user(buf, value, bytes))
2353                         return -EFAULT;
2354
2355                 result += bytes;
2356                 buf += bytes;
2357                 *pos += bytes;
2358                 size -= bytes;
2359         }
2360
2361         return result;
2362 }
2363
2364 /**
2365  * amdgpu_ttm_vram_write - Linear write access to VRAM
2366  *
2367  * Accesses VRAM via MMIO for debugging purposes.
2368  */
2369 static ssize_t amdgpu_ttm_vram_write(struct file *f, const char __user *buf,
2370                                     size_t size, loff_t *pos)
2371 {
2372         struct amdgpu_device *adev = file_inode(f)->i_private;
2373         ssize_t result = 0;
2374         int r;
2375
2376         if (size & 0x3 || *pos & 0x3)
2377                 return -EINVAL;
2378
2379         if (*pos >= adev->gmc.mc_vram_size)
2380                 return -ENXIO;
2381
2382         while (size) {
2383                 unsigned long flags;
2384                 uint32_t value;
2385
2386                 if (*pos >= adev->gmc.mc_vram_size)
2387                         return result;
2388
2389                 r = get_user(value, (uint32_t *)buf);
2390                 if (r)
2391                         return r;
2392
2393                 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
2394                 WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)*pos) | 0x80000000);
2395                 WREG32_NO_KIQ(mmMM_INDEX_HI, *pos >> 31);
2396                 WREG32_NO_KIQ(mmMM_DATA, value);
2397                 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
2398
2399                 result += 4;
2400                 buf += 4;
2401                 *pos += 4;
2402                 size -= 4;
2403         }
2404
2405         return result;
2406 }
2407
2408 static const struct file_operations amdgpu_ttm_vram_fops = {
2409         .owner = THIS_MODULE,
2410         .read = amdgpu_ttm_vram_read,
2411         .write = amdgpu_ttm_vram_write,
2412         .llseek = default_llseek,
2413 };
2414
2415 #ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
2416
2417 /**
2418  * amdgpu_ttm_gtt_read - Linear read access to GTT memory
2419  */
2420 static ssize_t amdgpu_ttm_gtt_read(struct file *f, char __user *buf,
2421                                    size_t size, loff_t *pos)
2422 {
2423         struct amdgpu_device *adev = file_inode(f)->i_private;
2424         ssize_t result = 0;
2425         int r;
2426
2427         while (size) {
2428                 loff_t p = *pos / PAGE_SIZE;
2429                 unsigned off = *pos & ~PAGE_MASK;
2430                 size_t cur_size = min_t(size_t, size, PAGE_SIZE - off);
2431                 struct page *page;
2432                 void *ptr;
2433
2434                 if (p >= adev->gart.num_cpu_pages)
2435                         return result;
2436
2437                 page = adev->gart.pages[p];
2438                 if (page) {
2439                         ptr = kmap(page);
2440                         ptr += off;
2441
2442                         r = copy_to_user(buf, ptr, cur_size);
2443                         kunmap(adev->gart.pages[p]);
2444                 } else
2445                         r = clear_user(buf, cur_size);
2446
2447                 if (r)
2448                         return -EFAULT;
2449
2450                 result += cur_size;
2451                 buf += cur_size;
2452                 *pos += cur_size;
2453                 size -= cur_size;
2454         }
2455
2456         return result;
2457 }
2458
2459 static const struct file_operations amdgpu_ttm_gtt_fops = {
2460         .owner = THIS_MODULE,
2461         .read = amdgpu_ttm_gtt_read,
2462         .llseek = default_llseek
2463 };
2464
2465 #endif
2466
2467 /**
2468  * amdgpu_iomem_read - Virtual read access to GPU mapped memory
2469  *
2470  * This function is used to read memory that has been mapped to the
2471  * GPU and the known addresses are not physical addresses but instead
2472  * bus addresses (e.g., what you'd put in an IB or ring buffer).
2473  */
2474 static ssize_t amdgpu_iomem_read(struct file *f, char __user *buf,
2475                                  size_t size, loff_t *pos)
2476 {
2477         struct amdgpu_device *adev = file_inode(f)->i_private;
2478         struct iommu_domain *dom;
2479         ssize_t result = 0;
2480         int r;
2481
2482         /* retrieve the IOMMU domain if any for this device */
2483         dom = iommu_get_domain_for_dev(adev->dev);
2484
2485         while (size) {
2486                 phys_addr_t addr = *pos & PAGE_MASK;
2487                 loff_t off = *pos & ~PAGE_MASK;
2488                 size_t bytes = PAGE_SIZE - off;
2489                 unsigned long pfn;
2490                 struct page *p;
2491                 void *ptr;
2492
2493                 bytes = bytes < size ? bytes : size;
2494
2495                 /* Translate the bus address to a physical address.  If
2496                  * the domain is NULL it means there is no IOMMU active
2497                  * and the address translation is the identity
2498                  */
2499                 addr = dom ? iommu_iova_to_phys(dom, addr) : addr;
2500
2501                 pfn = addr >> PAGE_SHIFT;
2502                 if (!pfn_valid(pfn))
2503                         return -EPERM;
2504
2505                 p = pfn_to_page(pfn);
2506                 if (p->mapping != adev->mman.bdev.dev_mapping)
2507                         return -EPERM;
2508
2509                 ptr = kmap(p);
2510                 r = copy_to_user(buf, ptr + off, bytes);
2511                 kunmap(p);
2512                 if (r)
2513                         return -EFAULT;
2514
2515                 size -= bytes;
2516                 *pos += bytes;
2517                 result += bytes;
2518         }
2519
2520         return result;
2521 }
2522
2523 /**
2524  * amdgpu_iomem_write - Virtual write access to GPU mapped memory
2525  *
2526  * This function is used to write memory that has been mapped to the
2527  * GPU and the known addresses are not physical addresses but instead
2528  * bus addresses (e.g., what you'd put in an IB or ring buffer).
2529  */
2530 static ssize_t amdgpu_iomem_write(struct file *f, const char __user *buf,
2531                                  size_t size, loff_t *pos)
2532 {
2533         struct amdgpu_device *adev = file_inode(f)->i_private;
2534         struct iommu_domain *dom;
2535         ssize_t result = 0;
2536         int r;
2537
2538         dom = iommu_get_domain_for_dev(adev->dev);
2539
2540         while (size) {
2541                 phys_addr_t addr = *pos & PAGE_MASK;
2542                 loff_t off = *pos & ~PAGE_MASK;
2543                 size_t bytes = PAGE_SIZE - off;
2544                 unsigned long pfn;
2545                 struct page *p;
2546                 void *ptr;
2547
2548                 bytes = bytes < size ? bytes : size;
2549
2550                 addr = dom ? iommu_iova_to_phys(dom, addr) : addr;
2551
2552                 pfn = addr >> PAGE_SHIFT;
2553                 if (!pfn_valid(pfn))
2554                         return -EPERM;
2555
2556                 p = pfn_to_page(pfn);
2557                 if (p->mapping != adev->mman.bdev.dev_mapping)
2558                         return -EPERM;
2559
2560                 ptr = kmap(p);
2561                 r = copy_from_user(ptr + off, buf, bytes);
2562                 kunmap(p);
2563                 if (r)
2564                         return -EFAULT;
2565
2566                 size -= bytes;
2567                 *pos += bytes;
2568                 result += bytes;
2569         }
2570
2571         return result;
2572 }
2573
2574 static const struct file_operations amdgpu_ttm_iomem_fops = {
2575         .owner = THIS_MODULE,
2576         .read = amdgpu_iomem_read,
2577         .write = amdgpu_iomem_write,
2578         .llseek = default_llseek
2579 };
2580
2581 static const struct {
2582         char *name;
2583         const struct file_operations *fops;
2584         int domain;
2585 } ttm_debugfs_entries[] = {
2586         { "amdgpu_vram", &amdgpu_ttm_vram_fops, TTM_PL_VRAM },
2587 #ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
2588         { "amdgpu_gtt", &amdgpu_ttm_gtt_fops, TTM_PL_TT },
2589 #endif
2590         { "amdgpu_iomem", &amdgpu_ttm_iomem_fops, TTM_PL_SYSTEM },
2591 };
2592
2593 #endif
2594
2595 int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev)
2596 {
2597 #if defined(CONFIG_DEBUG_FS)
2598         unsigned count;
2599
2600         struct drm_minor *minor = adev->ddev->primary;
2601         struct dentry *ent, *root = minor->debugfs_root;
2602
2603         for (count = 0; count < ARRAY_SIZE(ttm_debugfs_entries); count++) {
2604                 ent = debugfs_create_file(
2605                                 ttm_debugfs_entries[count].name,
2606                                 S_IFREG | S_IRUGO, root,
2607                                 adev,
2608                                 ttm_debugfs_entries[count].fops);
2609                 if (IS_ERR(ent))
2610                         return PTR_ERR(ent);
2611                 if (ttm_debugfs_entries[count].domain == TTM_PL_VRAM)
2612                         i_size_write(ent->d_inode, adev->gmc.mc_vram_size);
2613                 else if (ttm_debugfs_entries[count].domain == TTM_PL_TT)
2614                         i_size_write(ent->d_inode, adev->gmc.gart_size);
2615                 adev->mman.debugfs_entries[count] = ent;
2616         }
2617
2618         count = ARRAY_SIZE(amdgpu_ttm_debugfs_list);
2619
2620 #ifdef CONFIG_SWIOTLB
2621         if (!(adev->need_swiotlb && swiotlb_nr_tbl()))
2622                 --count;
2623 #endif
2624
2625         return amdgpu_debugfs_add_files(adev, amdgpu_ttm_debugfs_list, count);
2626 #else
2627         return 0;
2628 #endif
2629 }