Merge remote-tracking branch 'drm-misc/drm-misc-next-fixes' into drm-misc-fixes
[linux-2.6-microblaze.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_ttm.c
1 /*
2  * Copyright 2009 Jerome Glisse.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sub license, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19  * USE OR OTHER DEALINGS IN THE SOFTWARE.
20  *
21  * The above copyright notice and this permission notice (including the
22  * next paragraph) shall be included in all copies or substantial portions
23  * of the Software.
24  *
25  */
26 /*
27  * Authors:
28  *    Jerome Glisse <glisse@freedesktop.org>
29  *    Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
30  *    Dave Airlie
31  */
32
33 #include <linux/dma-mapping.h>
34 #include <linux/iommu.h>
35 #include <linux/pagemap.h>
36 #include <linux/sched/task.h>
37 #include <linux/sched/mm.h>
38 #include <linux/seq_file.h>
39 #include <linux/slab.h>
40 #include <linux/swap.h>
41 #include <linux/swiotlb.h>
42 #include <linux/dma-buf.h>
43 #include <linux/sizes.h>
44
45 #include <drm/ttm/ttm_bo_api.h>
46 #include <drm/ttm/ttm_bo_driver.h>
47 #include <drm/ttm/ttm_placement.h>
48 #include <drm/ttm/ttm_range_manager.h>
49
50 #include <drm/amdgpu_drm.h>
51
52 #include "amdgpu.h"
53 #include "amdgpu_object.h"
54 #include "amdgpu_trace.h"
55 #include "amdgpu_amdkfd.h"
56 #include "amdgpu_sdma.h"
57 #include "amdgpu_ras.h"
58 #include "amdgpu_atomfirmware.h"
59 #include "amdgpu_res_cursor.h"
60 #include "bif/bif_4_1_d.h"
61
62 #define AMDGPU_TTM_VRAM_MAX_DW_READ     (size_t)128
63
64 static int amdgpu_ttm_backend_bind(struct ttm_device *bdev,
65                                    struct ttm_tt *ttm,
66                                    struct ttm_resource *bo_mem);
67 static void amdgpu_ttm_backend_unbind(struct ttm_device *bdev,
68                                       struct ttm_tt *ttm);
69
70 static int amdgpu_ttm_init_on_chip(struct amdgpu_device *adev,
71                                     unsigned int type,
72                                     uint64_t size_in_page)
73 {
74         return ttm_range_man_init(&adev->mman.bdev, type,
75                                   false, size_in_page);
76 }
77
78 /**
79  * amdgpu_evict_flags - Compute placement flags
80  *
81  * @bo: The buffer object to evict
82  * @placement: Possible destination(s) for evicted BO
83  *
84  * Fill in placement data when ttm_bo_evict() is called
85  */
86 static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
87                                 struct ttm_placement *placement)
88 {
89         struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
90         struct amdgpu_bo *abo;
91         static const struct ttm_place placements = {
92                 .fpfn = 0,
93                 .lpfn = 0,
94                 .mem_type = TTM_PL_SYSTEM,
95                 .flags = 0
96         };
97
98         /* Don't handle scatter gather BOs */
99         if (bo->type == ttm_bo_type_sg) {
100                 placement->num_placement = 0;
101                 placement->num_busy_placement = 0;
102                 return;
103         }
104
105         /* Object isn't an AMDGPU object so ignore */
106         if (!amdgpu_bo_is_amdgpu_bo(bo)) {
107                 placement->placement = &placements;
108                 placement->busy_placement = &placements;
109                 placement->num_placement = 1;
110                 placement->num_busy_placement = 1;
111                 return;
112         }
113
114         abo = ttm_to_amdgpu_bo(bo);
115         if (abo->flags & AMDGPU_AMDKFD_CREATE_SVM_BO) {
116                 struct dma_fence *fence;
117                 struct dma_resv *resv = &bo->base._resv;
118
119                 rcu_read_lock();
120                 fence = rcu_dereference(resv->fence_excl);
121                 if (fence && !fence->ops->signaled)
122                         dma_fence_enable_sw_signaling(fence);
123
124                 placement->num_placement = 0;
125                 placement->num_busy_placement = 0;
126                 rcu_read_unlock();
127                 return;
128         }
129
130         switch (bo->resource->mem_type) {
131         case AMDGPU_PL_GDS:
132         case AMDGPU_PL_GWS:
133         case AMDGPU_PL_OA:
134                 placement->num_placement = 0;
135                 placement->num_busy_placement = 0;
136                 return;
137
138         case TTM_PL_VRAM:
139                 if (!adev->mman.buffer_funcs_enabled) {
140                         /* Move to system memory */
141                         amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
142                 } else if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
143                            !(abo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) &&
144                            amdgpu_bo_in_cpu_visible_vram(abo)) {
145
146                         /* Try evicting to the CPU inaccessible part of VRAM
147                          * first, but only set GTT as busy placement, so this
148                          * BO will be evicted to GTT rather than causing other
149                          * BOs to be evicted from VRAM
150                          */
151                         amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM |
152                                                          AMDGPU_GEM_DOMAIN_GTT);
153                         abo->placements[0].fpfn = adev->gmc.visible_vram_size >> PAGE_SHIFT;
154                         abo->placements[0].lpfn = 0;
155                         abo->placement.busy_placement = &abo->placements[1];
156                         abo->placement.num_busy_placement = 1;
157                 } else {
158                         /* Move to GTT memory */
159                         amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT);
160                 }
161                 break;
162         case TTM_PL_TT:
163         case AMDGPU_PL_PREEMPT:
164         default:
165                 amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
166                 break;
167         }
168         *placement = abo->placement;
169 }
170
171 /**
172  * amdgpu_ttm_map_buffer - Map memory into the GART windows
173  * @bo: buffer object to map
174  * @mem: memory object to map
175  * @mm_cur: range to map
176  * @num_pages: number of pages to map
177  * @window: which GART window to use
178  * @ring: DMA ring to use for the copy
179  * @tmz: if we should setup a TMZ enabled mapping
180  * @addr: resulting address inside the MC address space
181  *
182  * Setup one of the GART windows to access a specific piece of memory or return
183  * the physical address for local memory.
184  */
185 static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo,
186                                  struct ttm_resource *mem,
187                                  struct amdgpu_res_cursor *mm_cur,
188                                  unsigned num_pages, unsigned window,
189                                  struct amdgpu_ring *ring, bool tmz,
190                                  uint64_t *addr)
191 {
192         struct amdgpu_device *adev = ring->adev;
193         struct amdgpu_job *job;
194         unsigned num_dw, num_bytes;
195         struct dma_fence *fence;
196         uint64_t src_addr, dst_addr;
197         void *cpu_addr;
198         uint64_t flags;
199         unsigned int i;
200         int r;
201
202         BUG_ON(adev->mman.buffer_funcs->copy_max_bytes <
203                AMDGPU_GTT_MAX_TRANSFER_SIZE * 8);
204         BUG_ON(mem->mem_type == AMDGPU_PL_PREEMPT);
205
206         /* Map only what can't be accessed directly */
207         if (!tmz && mem->start != AMDGPU_BO_INVALID_OFFSET) {
208                 *addr = amdgpu_ttm_domain_start(adev, mem->mem_type) +
209                         mm_cur->start;
210                 return 0;
211         }
212
213         *addr = adev->gmc.gart_start;
214         *addr += (u64)window * AMDGPU_GTT_MAX_TRANSFER_SIZE *
215                 AMDGPU_GPU_PAGE_SIZE;
216         *addr += mm_cur->start & ~PAGE_MASK;
217
218         num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8);
219         num_bytes = num_pages * 8 * AMDGPU_GPU_PAGES_IN_CPU_PAGE;
220
221         r = amdgpu_job_alloc_with_ib(adev, num_dw * 4 + num_bytes,
222                                      AMDGPU_IB_POOL_DELAYED, &job);
223         if (r)
224                 return r;
225
226         src_addr = num_dw * 4;
227         src_addr += job->ibs[0].gpu_addr;
228
229         dst_addr = amdgpu_bo_gpu_offset(adev->gart.bo);
230         dst_addr += window * AMDGPU_GTT_MAX_TRANSFER_SIZE * 8;
231         amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr,
232                                 dst_addr, num_bytes, false);
233
234         amdgpu_ring_pad_ib(ring, &job->ibs[0]);
235         WARN_ON(job->ibs[0].length_dw > num_dw);
236
237         flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, mem);
238         if (tmz)
239                 flags |= AMDGPU_PTE_TMZ;
240
241         cpu_addr = &job->ibs[0].ptr[num_dw];
242
243         if (mem->mem_type == TTM_PL_TT) {
244                 dma_addr_t *dma_addr;
245
246                 dma_addr = &bo->ttm->dma_address[mm_cur->start >> PAGE_SHIFT];
247                 r = amdgpu_gart_map(adev, 0, num_pages, dma_addr, flags,
248                                     cpu_addr);
249                 if (r)
250                         goto error_free;
251         } else {
252                 dma_addr_t dma_address;
253
254                 dma_address = mm_cur->start;
255                 dma_address += adev->vm_manager.vram_base_offset;
256
257                 for (i = 0; i < num_pages; ++i) {
258                         r = amdgpu_gart_map(adev, i << PAGE_SHIFT, 1,
259                                             &dma_address, flags, cpu_addr);
260                         if (r)
261                                 goto error_free;
262
263                         dma_address += PAGE_SIZE;
264                 }
265         }
266
267         r = amdgpu_job_submit(job, &adev->mman.entity,
268                               AMDGPU_FENCE_OWNER_UNDEFINED, &fence);
269         if (r)
270                 goto error_free;
271
272         dma_fence_put(fence);
273
274         return r;
275
276 error_free:
277         amdgpu_job_free(job);
278         return r;
279 }
280
281 /**
282  * amdgpu_ttm_copy_mem_to_mem - Helper function for copy
283  * @adev: amdgpu device
284  * @src: buffer/address where to read from
285  * @dst: buffer/address where to write to
286  * @size: number of bytes to copy
287  * @tmz: if a secure copy should be used
288  * @resv: resv object to sync to
289  * @f: Returns the last fence if multiple jobs are submitted.
290  *
291  * The function copies @size bytes from {src->mem + src->offset} to
292  * {dst->mem + dst->offset}. src->bo and dst->bo could be same BO for a
293  * move and different for a BO to BO copy.
294  *
295  */
296 int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
297                                const struct amdgpu_copy_mem *src,
298                                const struct amdgpu_copy_mem *dst,
299                                uint64_t size, bool tmz,
300                                struct dma_resv *resv,
301                                struct dma_fence **f)
302 {
303         const uint32_t GTT_MAX_BYTES = (AMDGPU_GTT_MAX_TRANSFER_SIZE *
304                                         AMDGPU_GPU_PAGE_SIZE);
305
306         struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
307         struct amdgpu_res_cursor src_mm, dst_mm;
308         struct dma_fence *fence = NULL;
309         int r = 0;
310
311         if (!adev->mman.buffer_funcs_enabled) {
312                 DRM_ERROR("Trying to move memory with ring turned off.\n");
313                 return -EINVAL;
314         }
315
316         amdgpu_res_first(src->mem, src->offset, size, &src_mm);
317         amdgpu_res_first(dst->mem, dst->offset, size, &dst_mm);
318
319         mutex_lock(&adev->mman.gtt_window_lock);
320         while (src_mm.remaining) {
321                 uint32_t src_page_offset = src_mm.start & ~PAGE_MASK;
322                 uint32_t dst_page_offset = dst_mm.start & ~PAGE_MASK;
323                 struct dma_fence *next;
324                 uint32_t cur_size;
325                 uint64_t from, to;
326
327                 /* Copy size cannot exceed GTT_MAX_BYTES. So if src or dst
328                  * begins at an offset, then adjust the size accordingly
329                  */
330                 cur_size = max(src_page_offset, dst_page_offset);
331                 cur_size = min(min3(src_mm.size, dst_mm.size, size),
332                                (uint64_t)(GTT_MAX_BYTES - cur_size));
333
334                 /* Map src to window 0 and dst to window 1. */
335                 r = amdgpu_ttm_map_buffer(src->bo, src->mem, &src_mm,
336                                           PFN_UP(cur_size + src_page_offset),
337                                           0, ring, tmz, &from);
338                 if (r)
339                         goto error;
340
341                 r = amdgpu_ttm_map_buffer(dst->bo, dst->mem, &dst_mm,
342                                           PFN_UP(cur_size + dst_page_offset),
343                                           1, ring, tmz, &to);
344                 if (r)
345                         goto error;
346
347                 r = amdgpu_copy_buffer(ring, from, to, cur_size,
348                                        resv, &next, false, true, tmz);
349                 if (r)
350                         goto error;
351
352                 dma_fence_put(fence);
353                 fence = next;
354
355                 amdgpu_res_next(&src_mm, cur_size);
356                 amdgpu_res_next(&dst_mm, cur_size);
357         }
358 error:
359         mutex_unlock(&adev->mman.gtt_window_lock);
360         if (f)
361                 *f = dma_fence_get(fence);
362         dma_fence_put(fence);
363         return r;
364 }
365
366 /*
367  * amdgpu_move_blit - Copy an entire buffer to another buffer
368  *
369  * This is a helper called by amdgpu_bo_move() and amdgpu_move_vram_ram() to
370  * help move buffers to and from VRAM.
371  */
372 static int amdgpu_move_blit(struct ttm_buffer_object *bo,
373                             bool evict,
374                             struct ttm_resource *new_mem,
375                             struct ttm_resource *old_mem)
376 {
377         struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
378         struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
379         struct amdgpu_copy_mem src, dst;
380         struct dma_fence *fence = NULL;
381         int r;
382
383         src.bo = bo;
384         dst.bo = bo;
385         src.mem = old_mem;
386         dst.mem = new_mem;
387         src.offset = 0;
388         dst.offset = 0;
389
390         r = amdgpu_ttm_copy_mem_to_mem(adev, &src, &dst,
391                                        new_mem->num_pages << PAGE_SHIFT,
392                                        amdgpu_bo_encrypted(abo),
393                                        bo->base.resv, &fence);
394         if (r)
395                 goto error;
396
397         /* clear the space being freed */
398         if (old_mem->mem_type == TTM_PL_VRAM &&
399             (abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE)) {
400                 struct dma_fence *wipe_fence = NULL;
401
402                 r = amdgpu_fill_buffer(ttm_to_amdgpu_bo(bo), AMDGPU_POISON,
403                                        NULL, &wipe_fence);
404                 if (r) {
405                         goto error;
406                 } else if (wipe_fence) {
407                         dma_fence_put(fence);
408                         fence = wipe_fence;
409                 }
410         }
411
412         /* Always block for VM page tables before committing the new location */
413         if (bo->type == ttm_bo_type_kernel)
414                 r = ttm_bo_move_accel_cleanup(bo, fence, true, false, new_mem);
415         else
416                 r = ttm_bo_move_accel_cleanup(bo, fence, evict, true, new_mem);
417         dma_fence_put(fence);
418         return r;
419
420 error:
421         if (fence)
422                 dma_fence_wait(fence, false);
423         dma_fence_put(fence);
424         return r;
425 }
426
427 /*
428  * amdgpu_mem_visible - Check that memory can be accessed by ttm_bo_move_memcpy
429  *
430  * Called by amdgpu_bo_move()
431  */
432 static bool amdgpu_mem_visible(struct amdgpu_device *adev,
433                                struct ttm_resource *mem)
434 {
435         uint64_t mem_size = (u64)mem->num_pages << PAGE_SHIFT;
436         struct amdgpu_res_cursor cursor;
437
438         if (mem->mem_type == TTM_PL_SYSTEM ||
439             mem->mem_type == TTM_PL_TT)
440                 return true;
441         if (mem->mem_type != TTM_PL_VRAM)
442                 return false;
443
444         amdgpu_res_first(mem, 0, mem_size, &cursor);
445
446         /* ttm_resource_ioremap only supports contiguous memory */
447         if (cursor.size != mem_size)
448                 return false;
449
450         return cursor.start + cursor.size <= adev->gmc.visible_vram_size;
451 }
452
453 /*
454  * amdgpu_bo_move - Move a buffer object to a new memory location
455  *
456  * Called by ttm_bo_handle_move_mem()
457  */
458 static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
459                           struct ttm_operation_ctx *ctx,
460                           struct ttm_resource *new_mem,
461                           struct ttm_place *hop)
462 {
463         struct amdgpu_device *adev;
464         struct amdgpu_bo *abo;
465         struct ttm_resource *old_mem = bo->resource;
466         int r;
467
468         if (new_mem->mem_type == TTM_PL_TT ||
469             new_mem->mem_type == AMDGPU_PL_PREEMPT) {
470                 r = amdgpu_ttm_backend_bind(bo->bdev, bo->ttm, new_mem);
471                 if (r)
472                         return r;
473         }
474
475         /* Can't move a pinned BO */
476         abo = ttm_to_amdgpu_bo(bo);
477         if (WARN_ON_ONCE(abo->tbo.pin_count > 0))
478                 return -EINVAL;
479
480         adev = amdgpu_ttm_adev(bo->bdev);
481
482         if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
483                 ttm_bo_move_null(bo, new_mem);
484                 goto out;
485         }
486         if (old_mem->mem_type == TTM_PL_SYSTEM &&
487             (new_mem->mem_type == TTM_PL_TT ||
488              new_mem->mem_type == AMDGPU_PL_PREEMPT)) {
489                 ttm_bo_move_null(bo, new_mem);
490                 goto out;
491         }
492         if ((old_mem->mem_type == TTM_PL_TT ||
493              old_mem->mem_type == AMDGPU_PL_PREEMPT) &&
494             new_mem->mem_type == TTM_PL_SYSTEM) {
495                 r = ttm_bo_wait_ctx(bo, ctx);
496                 if (r)
497                         return r;
498
499                 amdgpu_ttm_backend_unbind(bo->bdev, bo->ttm);
500                 ttm_resource_free(bo, &bo->resource);
501                 ttm_bo_assign_mem(bo, new_mem);
502                 goto out;
503         }
504
505         if (old_mem->mem_type == AMDGPU_PL_GDS ||
506             old_mem->mem_type == AMDGPU_PL_GWS ||
507             old_mem->mem_type == AMDGPU_PL_OA ||
508             new_mem->mem_type == AMDGPU_PL_GDS ||
509             new_mem->mem_type == AMDGPU_PL_GWS ||
510             new_mem->mem_type == AMDGPU_PL_OA) {
511                 /* Nothing to save here */
512                 ttm_bo_move_null(bo, new_mem);
513                 goto out;
514         }
515
516         if (adev->mman.buffer_funcs_enabled) {
517                 if (((old_mem->mem_type == TTM_PL_SYSTEM &&
518                       new_mem->mem_type == TTM_PL_VRAM) ||
519                      (old_mem->mem_type == TTM_PL_VRAM &&
520                       new_mem->mem_type == TTM_PL_SYSTEM))) {
521                         hop->fpfn = 0;
522                         hop->lpfn = 0;
523                         hop->mem_type = TTM_PL_TT;
524                         hop->flags = 0;
525                         return -EMULTIHOP;
526                 }
527
528                 r = amdgpu_move_blit(bo, evict, new_mem, old_mem);
529         } else {
530                 r = -ENODEV;
531         }
532
533         if (r) {
534                 /* Check that all memory is CPU accessible */
535                 if (!amdgpu_mem_visible(adev, old_mem) ||
536                     !amdgpu_mem_visible(adev, new_mem)) {
537                         pr_err("Move buffer fallback to memcpy unavailable\n");
538                         return r;
539                 }
540
541                 r = ttm_bo_move_memcpy(bo, ctx, new_mem);
542                 if (r)
543                         return r;
544         }
545
546         if (bo->type == ttm_bo_type_device &&
547             new_mem->mem_type == TTM_PL_VRAM &&
548             old_mem->mem_type != TTM_PL_VRAM) {
549                 /* amdgpu_bo_fault_reserve_notify will re-set this if the CPU
550                  * accesses the BO after it's moved.
551                  */
552                 abo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
553         }
554
555 out:
556         /* update statistics */
557         atomic64_add(bo->base.size, &adev->num_bytes_moved);
558         amdgpu_bo_move_notify(bo, evict, new_mem);
559         return 0;
560 }
561
562 /*
563  * amdgpu_ttm_io_mem_reserve - Reserve a block of memory during a fault
564  *
565  * Called by ttm_mem_io_reserve() ultimately via ttm_bo_vm_fault()
566  */
567 static int amdgpu_ttm_io_mem_reserve(struct ttm_device *bdev,
568                                      struct ttm_resource *mem)
569 {
570         struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
571         size_t bus_size = (size_t)mem->num_pages << PAGE_SHIFT;
572
573         switch (mem->mem_type) {
574         case TTM_PL_SYSTEM:
575                 /* system memory */
576                 return 0;
577         case TTM_PL_TT:
578         case AMDGPU_PL_PREEMPT:
579                 break;
580         case TTM_PL_VRAM:
581                 mem->bus.offset = mem->start << PAGE_SHIFT;
582                 /* check if it's visible */
583                 if ((mem->bus.offset + bus_size) > adev->gmc.visible_vram_size)
584                         return -EINVAL;
585
586                 if (adev->mman.aper_base_kaddr &&
587                     mem->placement & TTM_PL_FLAG_CONTIGUOUS)
588                         mem->bus.addr = (u8 *)adev->mman.aper_base_kaddr +
589                                         mem->bus.offset;
590
591                 mem->bus.offset += adev->gmc.aper_base;
592                 mem->bus.is_iomem = true;
593                 break;
594         default:
595                 return -EINVAL;
596         }
597         return 0;
598 }
599
600 static unsigned long amdgpu_ttm_io_mem_pfn(struct ttm_buffer_object *bo,
601                                            unsigned long page_offset)
602 {
603         struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
604         struct amdgpu_res_cursor cursor;
605
606         amdgpu_res_first(bo->resource, (u64)page_offset << PAGE_SHIFT, 0,
607                          &cursor);
608         return (adev->gmc.aper_base + cursor.start) >> PAGE_SHIFT;
609 }
610
611 /**
612  * amdgpu_ttm_domain_start - Returns GPU start address
613  * @adev: amdgpu device object
614  * @type: type of the memory
615  *
616  * Returns:
617  * GPU start address of a memory domain
618  */
619
620 uint64_t amdgpu_ttm_domain_start(struct amdgpu_device *adev, uint32_t type)
621 {
622         switch (type) {
623         case TTM_PL_TT:
624                 return adev->gmc.gart_start;
625         case TTM_PL_VRAM:
626                 return adev->gmc.vram_start;
627         }
628
629         return 0;
630 }
631
632 /*
633  * TTM backend functions.
634  */
635 struct amdgpu_ttm_tt {
636         struct ttm_tt   ttm;
637         struct drm_gem_object   *gobj;
638         u64                     offset;
639         uint64_t                userptr;
640         struct task_struct      *usertask;
641         uint32_t                userflags;
642         bool                    bound;
643 #if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR)
644         struct hmm_range        *range;
645 #endif
646 };
647
648 #ifdef CONFIG_DRM_AMDGPU_USERPTR
649 /*
650  * amdgpu_ttm_tt_get_user_pages - get device accessible pages that back user
651  * memory and start HMM tracking CPU page table update
652  *
653  * Calling function must call amdgpu_ttm_tt_userptr_range_done() once and only
654  * once afterwards to stop HMM tracking
655  */
656 int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages)
657 {
658         struct ttm_tt *ttm = bo->tbo.ttm;
659         struct amdgpu_ttm_tt *gtt = (void *)ttm;
660         unsigned long start = gtt->userptr;
661         struct vm_area_struct *vma;
662         struct mm_struct *mm;
663         bool readonly;
664         int r = 0;
665
666         mm = bo->notifier.mm;
667         if (unlikely(!mm)) {
668                 DRM_DEBUG_DRIVER("BO is not registered?\n");
669                 return -EFAULT;
670         }
671
672         /* Another get_user_pages is running at the same time?? */
673         if (WARN_ON(gtt->range))
674                 return -EFAULT;
675
676         if (!mmget_not_zero(mm)) /* Happens during process shutdown */
677                 return -ESRCH;
678
679         mmap_read_lock(mm);
680         vma = vma_lookup(mm, start);
681         if (unlikely(!vma)) {
682                 r = -EFAULT;
683                 goto out_unlock;
684         }
685         if (unlikely((gtt->userflags & AMDGPU_GEM_USERPTR_ANONONLY) &&
686                 vma->vm_file)) {
687                 r = -EPERM;
688                 goto out_unlock;
689         }
690
691         readonly = amdgpu_ttm_tt_is_readonly(ttm);
692         r = amdgpu_hmm_range_get_pages(&bo->notifier, mm, pages, start,
693                                        ttm->num_pages, &gtt->range, readonly,
694                                        true, NULL);
695 out_unlock:
696         mmap_read_unlock(mm);
697         mmput(mm);
698
699         return r;
700 }
701
702 /*
703  * amdgpu_ttm_tt_userptr_range_done - stop HMM track the CPU page table change
704  * Check if the pages backing this ttm range have been invalidated
705  *
706  * Returns: true if pages are still valid
707  */
708 bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm)
709 {
710         struct amdgpu_ttm_tt *gtt = (void *)ttm;
711         bool r = false;
712
713         if (!gtt || !gtt->userptr)
714                 return false;
715
716         DRM_DEBUG_DRIVER("user_pages_done 0x%llx pages 0x%x\n",
717                 gtt->userptr, ttm->num_pages);
718
719         WARN_ONCE(!gtt->range || !gtt->range->hmm_pfns,
720                 "No user pages to check\n");
721
722         if (gtt->range) {
723                 /*
724                  * FIXME: Must always hold notifier_lock for this, and must
725                  * not ignore the return code.
726                  */
727                 r = amdgpu_hmm_range_get_pages_done(gtt->range);
728                 gtt->range = NULL;
729         }
730
731         return !r;
732 }
733 #endif
734
735 /*
736  * amdgpu_ttm_tt_set_user_pages - Copy pages in, putting old pages as necessary.
737  *
738  * Called by amdgpu_cs_list_validate(). This creates the page list
739  * that backs user memory and will ultimately be mapped into the device
740  * address space.
741  */
742 void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct page **pages)
743 {
744         unsigned long i;
745
746         for (i = 0; i < ttm->num_pages; ++i)
747                 ttm->pages[i] = pages ? pages[i] : NULL;
748 }
749
750 /*
751  * amdgpu_ttm_tt_pin_userptr - prepare the sg table with the user pages
752  *
753  * Called by amdgpu_ttm_backend_bind()
754  **/
755 static int amdgpu_ttm_tt_pin_userptr(struct ttm_device *bdev,
756                                      struct ttm_tt *ttm)
757 {
758         struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
759         struct amdgpu_ttm_tt *gtt = (void *)ttm;
760         int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
761         enum dma_data_direction direction = write ?
762                 DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
763         int r;
764
765         /* Allocate an SG array and squash pages into it */
766         r = sg_alloc_table_from_pages(ttm->sg, ttm->pages, ttm->num_pages, 0,
767                                       (u64)ttm->num_pages << PAGE_SHIFT,
768                                       GFP_KERNEL);
769         if (r)
770                 goto release_sg;
771
772         /* Map SG to device */
773         r = dma_map_sgtable(adev->dev, ttm->sg, direction, 0);
774         if (r)
775                 goto release_sg;
776
777         /* convert SG to linear array of pages and dma addresses */
778         drm_prime_sg_to_dma_addr_array(ttm->sg, gtt->ttm.dma_address,
779                                        ttm->num_pages);
780
781         return 0;
782
783 release_sg:
784         kfree(ttm->sg);
785         ttm->sg = NULL;
786         return r;
787 }
788
789 /*
790  * amdgpu_ttm_tt_unpin_userptr - Unpin and unmap userptr pages
791  */
792 static void amdgpu_ttm_tt_unpin_userptr(struct ttm_device *bdev,
793                                         struct ttm_tt *ttm)
794 {
795         struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
796         struct amdgpu_ttm_tt *gtt = (void *)ttm;
797         int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
798         enum dma_data_direction direction = write ?
799                 DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
800
801         /* double check that we don't free the table twice */
802         if (!ttm->sg || !ttm->sg->sgl)
803                 return;
804
805         /* unmap the pages mapped to the device */
806         dma_unmap_sgtable(adev->dev, ttm->sg, direction, 0);
807         sg_free_table(ttm->sg);
808
809 #if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR)
810         if (gtt->range) {
811                 unsigned long i;
812
813                 for (i = 0; i < ttm->num_pages; i++) {
814                         if (ttm->pages[i] !=
815                             hmm_pfn_to_page(gtt->range->hmm_pfns[i]))
816                                 break;
817                 }
818
819                 WARN((i == ttm->num_pages), "Missing get_user_page_done\n");
820         }
821 #endif
822 }
823
824 static int amdgpu_ttm_gart_bind(struct amdgpu_device *adev,
825                                 struct ttm_buffer_object *tbo,
826                                 uint64_t flags)
827 {
828         struct amdgpu_bo *abo = ttm_to_amdgpu_bo(tbo);
829         struct ttm_tt *ttm = tbo->ttm;
830         struct amdgpu_ttm_tt *gtt = (void *)ttm;
831         int r;
832
833         if (amdgpu_bo_encrypted(abo))
834                 flags |= AMDGPU_PTE_TMZ;
835
836         if (abo->flags & AMDGPU_GEM_CREATE_CP_MQD_GFX9) {
837                 uint64_t page_idx = 1;
838
839                 r = amdgpu_gart_bind(adev, gtt->offset, page_idx,
840                                 gtt->ttm.dma_address, flags);
841                 if (r)
842                         goto gart_bind_fail;
843
844                 /* The memory type of the first page defaults to UC. Now
845                  * modify the memory type to NC from the second page of
846                  * the BO onward.
847                  */
848                 flags &= ~AMDGPU_PTE_MTYPE_VG10_MASK;
849                 flags |= AMDGPU_PTE_MTYPE_VG10(AMDGPU_MTYPE_NC);
850
851                 r = amdgpu_gart_bind(adev,
852                                 gtt->offset + (page_idx << PAGE_SHIFT),
853                                 ttm->num_pages - page_idx,
854                                 &(gtt->ttm.dma_address[page_idx]), flags);
855         } else {
856                 r = amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages,
857                                      gtt->ttm.dma_address, flags);
858         }
859
860 gart_bind_fail:
861         if (r)
862                 DRM_ERROR("failed to bind %u pages at 0x%08llX\n",
863                           ttm->num_pages, gtt->offset);
864
865         return r;
866 }
867
868 /*
869  * amdgpu_ttm_backend_bind - Bind GTT memory
870  *
871  * Called by ttm_tt_bind() on behalf of ttm_bo_handle_move_mem().
872  * This handles binding GTT memory to the device address space.
873  */
874 static int amdgpu_ttm_backend_bind(struct ttm_device *bdev,
875                                    struct ttm_tt *ttm,
876                                    struct ttm_resource *bo_mem)
877 {
878         struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
879         struct amdgpu_ttm_tt *gtt = (void*)ttm;
880         uint64_t flags;
881         int r = 0;
882
883         if (!bo_mem)
884                 return -EINVAL;
885
886         if (gtt->bound)
887                 return 0;
888
889         if (gtt->userptr) {
890                 r = amdgpu_ttm_tt_pin_userptr(bdev, ttm);
891                 if (r) {
892                         DRM_ERROR("failed to pin userptr\n");
893                         return r;
894                 }
895         } else if (ttm->page_flags & TTM_PAGE_FLAG_SG) {
896                 if (!ttm->sg) {
897                         struct dma_buf_attachment *attach;
898                         struct sg_table *sgt;
899
900                         attach = gtt->gobj->import_attach;
901                         sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
902                         if (IS_ERR(sgt))
903                                 return PTR_ERR(sgt);
904
905                         ttm->sg = sgt;
906                 }
907
908                 drm_prime_sg_to_dma_addr_array(ttm->sg, gtt->ttm.dma_address,
909                                                ttm->num_pages);
910         }
911
912         if (!ttm->num_pages) {
913                 WARN(1, "nothing to bind %u pages for mreg %p back %p!\n",
914                      ttm->num_pages, bo_mem, ttm);
915         }
916
917         if (bo_mem->mem_type == AMDGPU_PL_GDS ||
918             bo_mem->mem_type == AMDGPU_PL_GWS ||
919             bo_mem->mem_type == AMDGPU_PL_OA)
920                 return -EINVAL;
921
922         if (bo_mem->mem_type != TTM_PL_TT ||
923             !amdgpu_gtt_mgr_has_gart_addr(bo_mem)) {
924                 gtt->offset = AMDGPU_BO_INVALID_OFFSET;
925                 return 0;
926         }
927
928         /* compute PTE flags relevant to this BO memory */
929         flags = amdgpu_ttm_tt_pte_flags(adev, ttm, bo_mem);
930
931         /* bind pages into GART page tables */
932         gtt->offset = (u64)bo_mem->start << PAGE_SHIFT;
933         r = amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages,
934                 gtt->ttm.dma_address, flags);
935
936         if (r)
937                 DRM_ERROR("failed to bind %u pages at 0x%08llX\n",
938                           ttm->num_pages, gtt->offset);
939         gtt->bound = true;
940         return r;
941 }
942
943 /*
944  * amdgpu_ttm_alloc_gart - Make sure buffer object is accessible either
945  * through AGP or GART aperture.
946  *
947  * If bo is accessible through AGP aperture, then use AGP aperture
948  * to access bo; otherwise allocate logical space in GART aperture
949  * and map bo to GART aperture.
950  */
951 int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
952 {
953         struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
954         struct ttm_operation_ctx ctx = { false, false };
955         struct amdgpu_ttm_tt *gtt = (void *)bo->ttm;
956         struct ttm_placement placement;
957         struct ttm_place placements;
958         struct ttm_resource *tmp;
959         uint64_t addr, flags;
960         int r;
961
962         if (bo->resource->start != AMDGPU_BO_INVALID_OFFSET)
963                 return 0;
964
965         addr = amdgpu_gmc_agp_addr(bo);
966         if (addr != AMDGPU_BO_INVALID_OFFSET) {
967                 bo->resource->start = addr >> PAGE_SHIFT;
968                 return 0;
969         }
970
971         /* allocate GART space */
972         placement.num_placement = 1;
973         placement.placement = &placements;
974         placement.num_busy_placement = 1;
975         placement.busy_placement = &placements;
976         placements.fpfn = 0;
977         placements.lpfn = adev->gmc.gart_size >> PAGE_SHIFT;
978         placements.mem_type = TTM_PL_TT;
979         placements.flags = bo->resource->placement;
980
981         r = ttm_bo_mem_space(bo, &placement, &tmp, &ctx);
982         if (unlikely(r))
983                 return r;
984
985         /* compute PTE flags for this buffer object */
986         flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, tmp);
987
988         /* Bind pages */
989         gtt->offset = (u64)tmp->start << PAGE_SHIFT;
990         r = amdgpu_ttm_gart_bind(adev, bo, flags);
991         if (unlikely(r)) {
992                 ttm_resource_free(bo, &tmp);
993                 return r;
994         }
995
996         amdgpu_gart_invalidate_tlb(adev);
997         ttm_resource_free(bo, &bo->resource);
998         ttm_bo_assign_mem(bo, tmp);
999
1000         return 0;
1001 }
1002
1003 /*
1004  * amdgpu_ttm_recover_gart - Rebind GTT pages
1005  *
1006  * Called by amdgpu_gtt_mgr_recover() from amdgpu_device_reset() to
1007  * rebind GTT pages during a GPU reset.
1008  */
1009 int amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo)
1010 {
1011         struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
1012         uint64_t flags;
1013         int r;
1014
1015         if (!tbo->ttm)
1016                 return 0;
1017
1018         flags = amdgpu_ttm_tt_pte_flags(adev, tbo->ttm, tbo->resource);
1019         r = amdgpu_ttm_gart_bind(adev, tbo, flags);
1020
1021         return r;
1022 }
1023
1024 /*
1025  * amdgpu_ttm_backend_unbind - Unbind GTT mapped pages
1026  *
1027  * Called by ttm_tt_unbind() on behalf of ttm_bo_move_ttm() and
1028  * ttm_tt_destroy().
1029  */
1030 static void amdgpu_ttm_backend_unbind(struct ttm_device *bdev,
1031                                       struct ttm_tt *ttm)
1032 {
1033         struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
1034         struct amdgpu_ttm_tt *gtt = (void *)ttm;
1035         int r;
1036
1037         /* if the pages have userptr pinning then clear that first */
1038         if (gtt->userptr) {
1039                 amdgpu_ttm_tt_unpin_userptr(bdev, ttm);
1040         } else if (ttm->sg && gtt->gobj->import_attach) {
1041                 struct dma_buf_attachment *attach;
1042
1043                 attach = gtt->gobj->import_attach;
1044                 dma_buf_unmap_attachment(attach, ttm->sg, DMA_BIDIRECTIONAL);
1045                 ttm->sg = NULL;
1046         }
1047
1048         if (!gtt->bound)
1049                 return;
1050
1051         if (gtt->offset == AMDGPU_BO_INVALID_OFFSET)
1052                 return;
1053
1054         /* unbind shouldn't be done for GDS/GWS/OA in ttm_bo_clean_mm */
1055         r = amdgpu_gart_unbind(adev, gtt->offset, ttm->num_pages);
1056         if (r)
1057                 DRM_ERROR("failed to unbind %u pages at 0x%08llX\n",
1058                           gtt->ttm.num_pages, gtt->offset);
1059         gtt->bound = false;
1060 }
1061
1062 static void amdgpu_ttm_backend_destroy(struct ttm_device *bdev,
1063                                        struct ttm_tt *ttm)
1064 {
1065         struct amdgpu_ttm_tt *gtt = (void *)ttm;
1066
1067         amdgpu_ttm_backend_unbind(bdev, ttm);
1068         ttm_tt_destroy_common(bdev, ttm);
1069         if (gtt->usertask)
1070                 put_task_struct(gtt->usertask);
1071
1072         ttm_tt_fini(&gtt->ttm);
1073         kfree(gtt);
1074 }
1075
1076 /**
1077  * amdgpu_ttm_tt_create - Create a ttm_tt object for a given BO
1078  *
1079  * @bo: The buffer object to create a GTT ttm_tt object around
1080  * @page_flags: Page flags to be added to the ttm_tt object
1081  *
1082  * Called by ttm_tt_create().
1083  */
1084 static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_buffer_object *bo,
1085                                            uint32_t page_flags)
1086 {
1087         struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
1088         struct amdgpu_ttm_tt *gtt;
1089         enum ttm_caching caching;
1090
1091         gtt = kzalloc(sizeof(struct amdgpu_ttm_tt), GFP_KERNEL);
1092         if (gtt == NULL) {
1093                 return NULL;
1094         }
1095         gtt->gobj = &bo->base;
1096
1097         if (abo->flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
1098                 caching = ttm_write_combined;
1099         else
1100                 caching = ttm_cached;
1101
1102         /* allocate space for the uninitialized page entries */
1103         if (ttm_sg_tt_init(&gtt->ttm, bo, page_flags, caching)) {
1104                 kfree(gtt);
1105                 return NULL;
1106         }
1107         return &gtt->ttm;
1108 }
1109
1110 /*
1111  * amdgpu_ttm_tt_populate - Map GTT pages visible to the device
1112  *
1113  * Map the pages of a ttm_tt object to an address space visible
1114  * to the underlying device.
1115  */
1116 static int amdgpu_ttm_tt_populate(struct ttm_device *bdev,
1117                                   struct ttm_tt *ttm,
1118                                   struct ttm_operation_ctx *ctx)
1119 {
1120         struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
1121         struct amdgpu_ttm_tt *gtt = (void *)ttm;
1122
1123         /* user pages are bound by amdgpu_ttm_tt_pin_userptr() */
1124         if (gtt && gtt->userptr) {
1125                 ttm->sg = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
1126                 if (!ttm->sg)
1127                         return -ENOMEM;
1128                 return 0;
1129         }
1130
1131         if (ttm->page_flags & TTM_PAGE_FLAG_SG)
1132                 return 0;
1133
1134         return ttm_pool_alloc(&adev->mman.bdev.pool, ttm, ctx);
1135 }
1136
1137 /*
1138  * amdgpu_ttm_tt_unpopulate - unmap GTT pages and unpopulate page arrays
1139  *
1140  * Unmaps pages of a ttm_tt object from the device address space and
1141  * unpopulates the page array backing it.
1142  */
1143 static void amdgpu_ttm_tt_unpopulate(struct ttm_device *bdev,
1144                                      struct ttm_tt *ttm)
1145 {
1146         struct amdgpu_ttm_tt *gtt = (void *)ttm;
1147         struct amdgpu_device *adev;
1148
1149         if (gtt && gtt->userptr) {
1150                 amdgpu_ttm_tt_set_user_pages(ttm, NULL);
1151                 kfree(ttm->sg);
1152                 ttm->sg = NULL;
1153                 return;
1154         }
1155
1156         if (ttm->page_flags & TTM_PAGE_FLAG_SG)
1157                 return;
1158
1159         adev = amdgpu_ttm_adev(bdev);
1160         return ttm_pool_free(&adev->mman.bdev.pool, ttm);
1161 }
1162
1163 /**
1164  * amdgpu_ttm_tt_set_userptr - Initialize userptr GTT ttm_tt for the current
1165  * task
1166  *
1167  * @bo: The ttm_buffer_object to bind this userptr to
1168  * @addr:  The address in the current tasks VM space to use
1169  * @flags: Requirements of userptr object.
1170  *
1171  * Called by amdgpu_gem_userptr_ioctl() to bind userptr pages
1172  * to current task
1173  */
1174 int amdgpu_ttm_tt_set_userptr(struct ttm_buffer_object *bo,
1175                               uint64_t addr, uint32_t flags)
1176 {
1177         struct amdgpu_ttm_tt *gtt;
1178
1179         if (!bo->ttm) {
1180                 /* TODO: We want a separate TTM object type for userptrs */
1181                 bo->ttm = amdgpu_ttm_tt_create(bo, 0);
1182                 if (bo->ttm == NULL)
1183                         return -ENOMEM;
1184         }
1185
1186         /* Set TTM_PAGE_FLAG_SG before populate but after create. */
1187         bo->ttm->page_flags |= TTM_PAGE_FLAG_SG;
1188
1189         gtt = (void *)bo->ttm;
1190         gtt->userptr = addr;
1191         gtt->userflags = flags;
1192
1193         if (gtt->usertask)
1194                 put_task_struct(gtt->usertask);
1195         gtt->usertask = current->group_leader;
1196         get_task_struct(gtt->usertask);
1197
1198         return 0;
1199 }
1200
1201 /*
1202  * amdgpu_ttm_tt_get_usermm - Return memory manager for ttm_tt object
1203  */
1204 struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm)
1205 {
1206         struct amdgpu_ttm_tt *gtt = (void *)ttm;
1207
1208         if (gtt == NULL)
1209                 return NULL;
1210
1211         if (gtt->usertask == NULL)
1212                 return NULL;
1213
1214         return gtt->usertask->mm;
1215 }
1216
1217 /*
1218  * amdgpu_ttm_tt_affect_userptr - Determine if a ttm_tt object lays inside an
1219  * address range for the current task.
1220  *
1221  */
1222 bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
1223                                   unsigned long end)
1224 {
1225         struct amdgpu_ttm_tt *gtt = (void *)ttm;
1226         unsigned long size;
1227
1228         if (gtt == NULL || !gtt->userptr)
1229                 return false;
1230
1231         /* Return false if no part of the ttm_tt object lies within
1232          * the range
1233          */
1234         size = (unsigned long)gtt->ttm.num_pages * PAGE_SIZE;
1235         if (gtt->userptr > end || gtt->userptr + size <= start)
1236                 return false;
1237
1238         return true;
1239 }
1240
1241 /*
1242  * amdgpu_ttm_tt_is_userptr - Have the pages backing by userptr?
1243  */
1244 bool amdgpu_ttm_tt_is_userptr(struct ttm_tt *ttm)
1245 {
1246         struct amdgpu_ttm_tt *gtt = (void *)ttm;
1247
1248         if (gtt == NULL || !gtt->userptr)
1249                 return false;
1250
1251         return true;
1252 }
1253
1254 /*
1255  * amdgpu_ttm_tt_is_readonly - Is the ttm_tt object read only?
1256  */
1257 bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm)
1258 {
1259         struct amdgpu_ttm_tt *gtt = (void *)ttm;
1260
1261         if (gtt == NULL)
1262                 return false;
1263
1264         return !!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
1265 }
1266
1267 /**
1268  * amdgpu_ttm_tt_pde_flags - Compute PDE flags for ttm_tt object
1269  *
1270  * @ttm: The ttm_tt object to compute the flags for
1271  * @mem: The memory registry backing this ttm_tt object
1272  *
1273  * Figure out the flags to use for a VM PDE (Page Directory Entry).
1274  */
1275 uint64_t amdgpu_ttm_tt_pde_flags(struct ttm_tt *ttm, struct ttm_resource *mem)
1276 {
1277         uint64_t flags = 0;
1278
1279         if (mem && mem->mem_type != TTM_PL_SYSTEM)
1280                 flags |= AMDGPU_PTE_VALID;
1281
1282         if (mem && (mem->mem_type == TTM_PL_TT ||
1283                     mem->mem_type == AMDGPU_PL_PREEMPT)) {
1284                 flags |= AMDGPU_PTE_SYSTEM;
1285
1286                 if (ttm->caching == ttm_cached)
1287                         flags |= AMDGPU_PTE_SNOOPED;
1288         }
1289
1290         if (mem && mem->mem_type == TTM_PL_VRAM &&
1291                         mem->bus.caching == ttm_cached)
1292                 flags |= AMDGPU_PTE_SNOOPED;
1293
1294         return flags;
1295 }
1296
1297 /**
1298  * amdgpu_ttm_tt_pte_flags - Compute PTE flags for ttm_tt object
1299  *
1300  * @adev: amdgpu_device pointer
1301  * @ttm: The ttm_tt object to compute the flags for
1302  * @mem: The memory registry backing this ttm_tt object
1303  *
1304  * Figure out the flags to use for a VM PTE (Page Table Entry).
1305  */
1306 uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
1307                                  struct ttm_resource *mem)
1308 {
1309         uint64_t flags = amdgpu_ttm_tt_pde_flags(ttm, mem);
1310
1311         flags |= adev->gart.gart_pte_flags;
1312         flags |= AMDGPU_PTE_READABLE;
1313
1314         if (!amdgpu_ttm_tt_is_readonly(ttm))
1315                 flags |= AMDGPU_PTE_WRITEABLE;
1316
1317         return flags;
1318 }
1319
1320 /*
1321  * amdgpu_ttm_bo_eviction_valuable - Check to see if we can evict a buffer
1322  * object.
1323  *
1324  * Return true if eviction is sensible. Called by ttm_mem_evict_first() on
1325  * behalf of ttm_bo_mem_force_space() which tries to evict buffer objects until
1326  * it can find space for a new object and by ttm_bo_force_list_clean() which is
1327  * used to clean out a memory space.
1328  */
1329 static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
1330                                             const struct ttm_place *place)
1331 {
1332         unsigned long num_pages = bo->resource->num_pages;
1333         struct amdgpu_res_cursor cursor;
1334         struct dma_resv_list *flist;
1335         struct dma_fence *f;
1336         int i;
1337
1338         /* Swapout? */
1339         if (bo->resource->mem_type == TTM_PL_SYSTEM)
1340                 return true;
1341
1342         if (bo->type == ttm_bo_type_kernel &&
1343             !amdgpu_vm_evictable(ttm_to_amdgpu_bo(bo)))
1344                 return false;
1345
1346         /* If bo is a KFD BO, check if the bo belongs to the current process.
1347          * If true, then return false as any KFD process needs all its BOs to
1348          * be resident to run successfully
1349          */
1350         flist = dma_resv_shared_list(bo->base.resv);
1351         if (flist) {
1352                 for (i = 0; i < flist->shared_count; ++i) {
1353                         f = rcu_dereference_protected(flist->shared[i],
1354                                 dma_resv_held(bo->base.resv));
1355                         if (amdkfd_fence_check_mm(f, current->mm))
1356                                 return false;
1357                 }
1358         }
1359
1360         switch (bo->resource->mem_type) {
1361         case AMDGPU_PL_PREEMPT:
1362                 /* Preemptible BOs don't own system resources managed by the
1363                  * driver (pages, VRAM, GART space). They point to resources
1364                  * owned by someone else (e.g. pageable memory in user mode
1365                  * or a DMABuf). They are used in a preemptible context so we
1366                  * can guarantee no deadlocks and good QoS in case of MMU
1367                  * notifiers or DMABuf move notifiers from the resource owner.
1368                  */
1369                 return false;
1370         case TTM_PL_TT:
1371                 if (amdgpu_bo_is_amdgpu_bo(bo) &&
1372                     amdgpu_bo_encrypted(ttm_to_amdgpu_bo(bo)))
1373                         return false;
1374                 return true;
1375
1376         case TTM_PL_VRAM:
1377                 /* Check each drm MM node individually */
1378                 amdgpu_res_first(bo->resource, 0, (u64)num_pages << PAGE_SHIFT,
1379                                  &cursor);
1380                 while (cursor.remaining) {
1381                         if (place->fpfn < PFN_DOWN(cursor.start + cursor.size)
1382                             && !(place->lpfn &&
1383                                  place->lpfn <= PFN_DOWN(cursor.start)))
1384                                 return true;
1385
1386                         amdgpu_res_next(&cursor, cursor.size);
1387                 }
1388                 return false;
1389
1390         default:
1391                 break;
1392         }
1393
1394         return ttm_bo_eviction_valuable(bo, place);
1395 }
1396
1397 /**
1398  * amdgpu_ttm_access_memory - Read or Write memory that backs a buffer object.
1399  *
1400  * @bo:  The buffer object to read/write
1401  * @offset:  Offset into buffer object
1402  * @buf:  Secondary buffer to write/read from
1403  * @len: Length in bytes of access
1404  * @write:  true if writing
1405  *
1406  * This is used to access VRAM that backs a buffer object via MMIO
1407  * access for debugging purposes.
1408  */
1409 static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo,
1410                                     unsigned long offset, void *buf, int len,
1411                                     int write)
1412 {
1413         struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
1414         struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
1415         struct amdgpu_res_cursor cursor;
1416         unsigned long flags;
1417         uint32_t value = 0;
1418         int ret = 0;
1419
1420         if (bo->resource->mem_type != TTM_PL_VRAM)
1421                 return -EIO;
1422
1423         amdgpu_res_first(bo->resource, offset, len, &cursor);
1424         while (cursor.remaining) {
1425                 uint64_t aligned_pos = cursor.start & ~(uint64_t)3;
1426                 uint64_t bytes = 4 - (cursor.start & 3);
1427                 uint32_t shift = (cursor.start & 3) * 8;
1428                 uint32_t mask = 0xffffffff << shift;
1429
1430                 if (cursor.size < bytes) {
1431                         mask &= 0xffffffff >> (bytes - cursor.size) * 8;
1432                         bytes = cursor.size;
1433                 }
1434
1435                 if (mask != 0xffffffff) {
1436                         spin_lock_irqsave(&adev->mmio_idx_lock, flags);
1437                         WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)aligned_pos) | 0x80000000);
1438                         WREG32_NO_KIQ(mmMM_INDEX_HI, aligned_pos >> 31);
1439                         value = RREG32_NO_KIQ(mmMM_DATA);
1440                         if (write) {
1441                                 value &= ~mask;
1442                                 value |= (*(uint32_t *)buf << shift) & mask;
1443                                 WREG32_NO_KIQ(mmMM_DATA, value);
1444                         }
1445                         spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
1446                         if (!write) {
1447                                 value = (value & mask) >> shift;
1448                                 memcpy(buf, &value, bytes);
1449                         }
1450                 } else {
1451                         bytes = cursor.size & ~0x3ULL;
1452                         amdgpu_device_vram_access(adev, cursor.start,
1453                                                   (uint32_t *)buf, bytes,
1454                                                   write);
1455                 }
1456
1457                 ret += bytes;
1458                 buf = (uint8_t *)buf + bytes;
1459                 amdgpu_res_next(&cursor, bytes);
1460         }
1461
1462         return ret;
1463 }
1464
1465 static void
1466 amdgpu_bo_delete_mem_notify(struct ttm_buffer_object *bo)
1467 {
1468         amdgpu_bo_move_notify(bo, false, NULL);
1469 }
1470
1471 static struct ttm_device_funcs amdgpu_bo_driver = {
1472         .ttm_tt_create = &amdgpu_ttm_tt_create,
1473         .ttm_tt_populate = &amdgpu_ttm_tt_populate,
1474         .ttm_tt_unpopulate = &amdgpu_ttm_tt_unpopulate,
1475         .ttm_tt_destroy = &amdgpu_ttm_backend_destroy,
1476         .eviction_valuable = amdgpu_ttm_bo_eviction_valuable,
1477         .evict_flags = &amdgpu_evict_flags,
1478         .move = &amdgpu_bo_move,
1479         .delete_mem_notify = &amdgpu_bo_delete_mem_notify,
1480         .release_notify = &amdgpu_bo_release_notify,
1481         .io_mem_reserve = &amdgpu_ttm_io_mem_reserve,
1482         .io_mem_pfn = amdgpu_ttm_io_mem_pfn,
1483         .access_memory = &amdgpu_ttm_access_memory,
1484         .del_from_lru_notify = &amdgpu_vm_del_from_lru_notify
1485 };
1486
1487 /*
1488  * Firmware Reservation functions
1489  */
1490 /**
1491  * amdgpu_ttm_fw_reserve_vram_fini - free fw reserved vram
1492  *
1493  * @adev: amdgpu_device pointer
1494  *
1495  * free fw reserved vram if it has been reserved.
1496  */
1497 static void amdgpu_ttm_fw_reserve_vram_fini(struct amdgpu_device *adev)
1498 {
1499         amdgpu_bo_free_kernel(&adev->mman.fw_vram_usage_reserved_bo,
1500                 NULL, &adev->mman.fw_vram_usage_va);
1501 }
1502
1503 /**
1504  * amdgpu_ttm_fw_reserve_vram_init - create bo vram reservation from fw
1505  *
1506  * @adev: amdgpu_device pointer
1507  *
1508  * create bo vram reservation from fw.
1509  */
1510 static int amdgpu_ttm_fw_reserve_vram_init(struct amdgpu_device *adev)
1511 {
1512         uint64_t vram_size = adev->gmc.visible_vram_size;
1513
1514         adev->mman.fw_vram_usage_va = NULL;
1515         adev->mman.fw_vram_usage_reserved_bo = NULL;
1516
1517         if (adev->mman.fw_vram_usage_size == 0 ||
1518             adev->mman.fw_vram_usage_size > vram_size)
1519                 return 0;
1520
1521         return amdgpu_bo_create_kernel_at(adev,
1522                                           adev->mman.fw_vram_usage_start_offset,
1523                                           adev->mman.fw_vram_usage_size,
1524                                           AMDGPU_GEM_DOMAIN_VRAM,
1525                                           &adev->mman.fw_vram_usage_reserved_bo,
1526                                           &adev->mman.fw_vram_usage_va);
1527 }
1528
1529 /*
1530  * Memoy training reservation functions
1531  */
1532
1533 /**
1534  * amdgpu_ttm_training_reserve_vram_fini - free memory training reserved vram
1535  *
1536  * @adev: amdgpu_device pointer
1537  *
1538  * free memory training reserved vram if it has been reserved.
1539  */
1540 static int amdgpu_ttm_training_reserve_vram_fini(struct amdgpu_device *adev)
1541 {
1542         struct psp_memory_training_context *ctx = &adev->psp.mem_train_ctx;
1543
1544         ctx->init = PSP_MEM_TRAIN_NOT_SUPPORT;
1545         amdgpu_bo_free_kernel(&ctx->c2p_bo, NULL, NULL);
1546         ctx->c2p_bo = NULL;
1547
1548         return 0;
1549 }
1550
1551 static void amdgpu_ttm_training_data_block_init(struct amdgpu_device *adev)
1552 {
1553         struct psp_memory_training_context *ctx = &adev->psp.mem_train_ctx;
1554
1555         memset(ctx, 0, sizeof(*ctx));
1556
1557         ctx->c2p_train_data_offset =
1558                 ALIGN((adev->gmc.mc_vram_size - adev->mman.discovery_tmr_size - SZ_1M), SZ_1M);
1559         ctx->p2c_train_data_offset =
1560                 (adev->gmc.mc_vram_size - GDDR6_MEM_TRAINING_OFFSET);
1561         ctx->train_data_size =
1562                 GDDR6_MEM_TRAINING_DATA_SIZE_IN_BYTES;
1563
1564         DRM_DEBUG("train_data_size:%llx,p2c_train_data_offset:%llx,c2p_train_data_offset:%llx.\n",
1565                         ctx->train_data_size,
1566                         ctx->p2c_train_data_offset,
1567                         ctx->c2p_train_data_offset);
1568 }
1569
1570 /*
1571  * reserve TMR memory at the top of VRAM which holds
1572  * IP Discovery data and is protected by PSP.
1573  */
1574 static int amdgpu_ttm_reserve_tmr(struct amdgpu_device *adev)
1575 {
1576         int ret;
1577         struct psp_memory_training_context *ctx = &adev->psp.mem_train_ctx;
1578         bool mem_train_support = false;
1579
1580         if (!amdgpu_sriov_vf(adev)) {
1581                 if (amdgpu_atomfirmware_mem_training_supported(adev))
1582                         mem_train_support = true;
1583                 else
1584                         DRM_DEBUG("memory training does not support!\n");
1585         }
1586
1587         /*
1588          * Query reserved tmr size through atom firmwareinfo for Sienna_Cichlid and onwards for all
1589          * the use cases (IP discovery/G6 memory training/profiling/diagnostic data.etc)
1590          *
1591          * Otherwise, fallback to legacy approach to check and reserve tmr block for ip
1592          * discovery data and G6 memory training data respectively
1593          */
1594         adev->mman.discovery_tmr_size =
1595                 amdgpu_atomfirmware_get_fw_reserved_fb_size(adev);
1596         if (!adev->mman.discovery_tmr_size)
1597                 adev->mman.discovery_tmr_size = DISCOVERY_TMR_OFFSET;
1598
1599         if (mem_train_support) {
1600                 /* reserve vram for mem train according to TMR location */
1601                 amdgpu_ttm_training_data_block_init(adev);
1602                 ret = amdgpu_bo_create_kernel_at(adev,
1603                                          ctx->c2p_train_data_offset,
1604                                          ctx->train_data_size,
1605                                          AMDGPU_GEM_DOMAIN_VRAM,
1606                                          &ctx->c2p_bo,
1607                                          NULL);
1608                 if (ret) {
1609                         DRM_ERROR("alloc c2p_bo failed(%d)!\n", ret);
1610                         amdgpu_ttm_training_reserve_vram_fini(adev);
1611                         return ret;
1612                 }
1613                 ctx->init = PSP_MEM_TRAIN_RESERVE_SUCCESS;
1614         }
1615
1616         ret = amdgpu_bo_create_kernel_at(adev,
1617                                 adev->gmc.real_vram_size - adev->mman.discovery_tmr_size,
1618                                 adev->mman.discovery_tmr_size,
1619                                 AMDGPU_GEM_DOMAIN_VRAM,
1620                                 &adev->mman.discovery_memory,
1621                                 NULL);
1622         if (ret) {
1623                 DRM_ERROR("alloc tmr failed(%d)!\n", ret);
1624                 amdgpu_bo_free_kernel(&adev->mman.discovery_memory, NULL, NULL);
1625                 return ret;
1626         }
1627
1628         return 0;
1629 }
1630
1631 /*
1632  * amdgpu_ttm_init - Init the memory management (ttm) as well as various
1633  * gtt/vram related fields.
1634  *
1635  * This initializes all of the memory space pools that the TTM layer
1636  * will need such as the GTT space (system memory mapped to the device),
1637  * VRAM (on-board memory), and on-chip memories (GDS, GWS, OA) which
1638  * can be mapped per VMID.
1639  */
1640 int amdgpu_ttm_init(struct amdgpu_device *adev)
1641 {
1642         uint64_t gtt_size;
1643         int r;
1644         u64 vis_vram_limit;
1645
1646         mutex_init(&adev->mman.gtt_window_lock);
1647
1648         /* No others user of address space so set it to 0 */
1649         r = ttm_device_init(&adev->mman.bdev, &amdgpu_bo_driver, adev->dev,
1650                                adev_to_drm(adev)->anon_inode->i_mapping,
1651                                adev_to_drm(adev)->vma_offset_manager,
1652                                adev->need_swiotlb,
1653                                dma_addressing_limited(adev->dev));
1654         if (r) {
1655                 DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
1656                 return r;
1657         }
1658         adev->mman.initialized = true;
1659
1660         /* Initialize VRAM pool with all of VRAM divided into pages */
1661         r = amdgpu_vram_mgr_init(adev);
1662         if (r) {
1663                 DRM_ERROR("Failed initializing VRAM heap.\n");
1664                 return r;
1665         }
1666
1667         /* Reduce size of CPU-visible VRAM if requested */
1668         vis_vram_limit = (u64)amdgpu_vis_vram_limit * 1024 * 1024;
1669         if (amdgpu_vis_vram_limit > 0 &&
1670             vis_vram_limit <= adev->gmc.visible_vram_size)
1671                 adev->gmc.visible_vram_size = vis_vram_limit;
1672
1673         /* Change the size here instead of the init above so only lpfn is affected */
1674         amdgpu_ttm_set_buffer_funcs_status(adev, false);
1675 #ifdef CONFIG_64BIT
1676 #ifdef CONFIG_X86
1677         if (adev->gmc.xgmi.connected_to_cpu)
1678                 adev->mman.aper_base_kaddr = ioremap_cache(adev->gmc.aper_base,
1679                                 adev->gmc.visible_vram_size);
1680
1681         else
1682 #endif
1683                 adev->mman.aper_base_kaddr = ioremap_wc(adev->gmc.aper_base,
1684                                 adev->gmc.visible_vram_size);
1685 #endif
1686
1687         /*
1688          *The reserved vram for firmware must be pinned to the specified
1689          *place on the VRAM, so reserve it early.
1690          */
1691         r = amdgpu_ttm_fw_reserve_vram_init(adev);
1692         if (r) {
1693                 return r;
1694         }
1695
1696         /*
1697          * only NAVI10 and onwards ASIC support for IP discovery.
1698          * If IP discovery enabled, a block of memory should be
1699          * reserved for IP discovey.
1700          */
1701         if (adev->mman.discovery_bin) {
1702                 r = amdgpu_ttm_reserve_tmr(adev);
1703                 if (r)
1704                         return r;
1705         }
1706
1707         /* allocate memory as required for VGA
1708          * This is used for VGA emulation and pre-OS scanout buffers to
1709          * avoid display artifacts while transitioning between pre-OS
1710          * and driver.  */
1711         r = amdgpu_bo_create_kernel_at(adev, 0, adev->mman.stolen_vga_size,
1712                                        AMDGPU_GEM_DOMAIN_VRAM,
1713                                        &adev->mman.stolen_vga_memory,
1714                                        NULL);
1715         if (r)
1716                 return r;
1717         r = amdgpu_bo_create_kernel_at(adev, adev->mman.stolen_vga_size,
1718                                        adev->mman.stolen_extended_size,
1719                                        AMDGPU_GEM_DOMAIN_VRAM,
1720                                        &adev->mman.stolen_extended_memory,
1721                                        NULL);
1722         if (r)
1723                 return r;
1724         r = amdgpu_bo_create_kernel_at(adev, adev->mman.stolen_reserved_offset,
1725                                        adev->mman.stolen_reserved_size,
1726                                        AMDGPU_GEM_DOMAIN_VRAM,
1727                                        &adev->mman.stolen_reserved_memory,
1728                                        NULL);
1729         if (r)
1730                 return r;
1731
1732         DRM_INFO("amdgpu: %uM of VRAM memory ready\n",
1733                  (unsigned) (adev->gmc.real_vram_size / (1024 * 1024)));
1734
1735         /* Compute GTT size, either bsaed on 3/4th the size of RAM size
1736          * or whatever the user passed on module init */
1737         if (amdgpu_gtt_size == -1) {
1738                 struct sysinfo si;
1739
1740                 si_meminfo(&si);
1741                 gtt_size = min(max((AMDGPU_DEFAULT_GTT_SIZE_MB << 20),
1742                                adev->gmc.mc_vram_size),
1743                                ((uint64_t)si.totalram * si.mem_unit * 3/4));
1744         }
1745         else
1746                 gtt_size = (uint64_t)amdgpu_gtt_size << 20;
1747
1748         /* Initialize GTT memory pool */
1749         r = amdgpu_gtt_mgr_init(adev, gtt_size);
1750         if (r) {
1751                 DRM_ERROR("Failed initializing GTT heap.\n");
1752                 return r;
1753         }
1754         DRM_INFO("amdgpu: %uM of GTT memory ready.\n",
1755                  (unsigned)(gtt_size / (1024 * 1024)));
1756
1757         /* Initialize preemptible memory pool */
1758         r = amdgpu_preempt_mgr_init(adev);
1759         if (r) {
1760                 DRM_ERROR("Failed initializing PREEMPT heap.\n");
1761                 return r;
1762         }
1763
1764         /* Initialize various on-chip memory pools */
1765         r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_GDS, adev->gds.gds_size);
1766         if (r) {
1767                 DRM_ERROR("Failed initializing GDS heap.\n");
1768                 return r;
1769         }
1770
1771         r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_GWS, adev->gds.gws_size);
1772         if (r) {
1773                 DRM_ERROR("Failed initializing gws heap.\n");
1774                 return r;
1775         }
1776
1777         r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_OA, adev->gds.oa_size);
1778         if (r) {
1779                 DRM_ERROR("Failed initializing oa heap.\n");
1780                 return r;
1781         }
1782
1783         return 0;
1784 }
1785
1786 /*
1787  * amdgpu_ttm_fini - De-initialize the TTM memory pools
1788  */
1789 void amdgpu_ttm_fini(struct amdgpu_device *adev)
1790 {
1791         if (!adev->mman.initialized)
1792                 return;
1793
1794         amdgpu_ttm_training_reserve_vram_fini(adev);
1795         /* return the stolen vga memory back to VRAM */
1796         amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
1797         amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
1798         /* return the IP Discovery TMR memory back to VRAM */
1799         amdgpu_bo_free_kernel(&adev->mman.discovery_memory, NULL, NULL);
1800         if (adev->mman.stolen_reserved_size)
1801                 amdgpu_bo_free_kernel(&adev->mman.stolen_reserved_memory,
1802                                       NULL, NULL);
1803         amdgpu_ttm_fw_reserve_vram_fini(adev);
1804
1805         amdgpu_vram_mgr_fini(adev);
1806         amdgpu_gtt_mgr_fini(adev);
1807         amdgpu_preempt_mgr_fini(adev);
1808         ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_GDS);
1809         ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_GWS);
1810         ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_OA);
1811         ttm_device_fini(&adev->mman.bdev);
1812         adev->mman.initialized = false;
1813         DRM_INFO("amdgpu: ttm finalized\n");
1814 }
1815
1816 /**
1817  * amdgpu_ttm_set_buffer_funcs_status - enable/disable use of buffer functions
1818  *
1819  * @adev: amdgpu_device pointer
1820  * @enable: true when we can use buffer functions.
1821  *
1822  * Enable/disable use of buffer functions during suspend/resume. This should
1823  * only be called at bootup or when userspace isn't running.
1824  */
1825 void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
1826 {
1827         struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
1828         uint64_t size;
1829         int r;
1830
1831         if (!adev->mman.initialized || amdgpu_in_reset(adev) ||
1832             adev->mman.buffer_funcs_enabled == enable)
1833                 return;
1834
1835         if (enable) {
1836                 struct amdgpu_ring *ring;
1837                 struct drm_gpu_scheduler *sched;
1838
1839                 ring = adev->mman.buffer_funcs_ring;
1840                 sched = &ring->sched;
1841                 r = drm_sched_entity_init(&adev->mman.entity,
1842                                           DRM_SCHED_PRIORITY_KERNEL, &sched,
1843                                           1, NULL);
1844                 if (r) {
1845                         DRM_ERROR("Failed setting up TTM BO move entity (%d)\n",
1846                                   r);
1847                         return;
1848                 }
1849         } else {
1850                 drm_sched_entity_destroy(&adev->mman.entity);
1851                 dma_fence_put(man->move);
1852                 man->move = NULL;
1853         }
1854
1855         /* this just adjusts TTM size idea, which sets lpfn to the correct value */
1856         if (enable)
1857                 size = adev->gmc.real_vram_size;
1858         else
1859                 size = adev->gmc.visible_vram_size;
1860         man->size = size >> PAGE_SHIFT;
1861         adev->mman.buffer_funcs_enabled = enable;
1862 }
1863
1864 int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
1865                        uint64_t dst_offset, uint32_t byte_count,
1866                        struct dma_resv *resv,
1867                        struct dma_fence **fence, bool direct_submit,
1868                        bool vm_needs_flush, bool tmz)
1869 {
1870         enum amdgpu_ib_pool_type pool = direct_submit ? AMDGPU_IB_POOL_DIRECT :
1871                 AMDGPU_IB_POOL_DELAYED;
1872         struct amdgpu_device *adev = ring->adev;
1873         struct amdgpu_job *job;
1874
1875         uint32_t max_bytes;
1876         unsigned num_loops, num_dw;
1877         unsigned i;
1878         int r;
1879
1880         if (direct_submit && !ring->sched.ready) {
1881                 DRM_ERROR("Trying to move memory with ring turned off.\n");
1882                 return -EINVAL;
1883         }
1884
1885         max_bytes = adev->mman.buffer_funcs->copy_max_bytes;
1886         num_loops = DIV_ROUND_UP(byte_count, max_bytes);
1887         num_dw = ALIGN(num_loops * adev->mman.buffer_funcs->copy_num_dw, 8);
1888
1889         r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, pool, &job);
1890         if (r)
1891                 return r;
1892
1893         if (vm_needs_flush) {
1894                 job->vm_pd_addr = amdgpu_gmc_pd_addr(adev->gmc.pdb0_bo ?
1895                                         adev->gmc.pdb0_bo : adev->gart.bo);
1896                 job->vm_needs_flush = true;
1897         }
1898         if (resv) {
1899                 r = amdgpu_sync_resv(adev, &job->sync, resv,
1900                                      AMDGPU_SYNC_ALWAYS,
1901                                      AMDGPU_FENCE_OWNER_UNDEFINED);
1902                 if (r) {
1903                         DRM_ERROR("sync failed (%d).\n", r);
1904                         goto error_free;
1905                 }
1906         }
1907
1908         for (i = 0; i < num_loops; i++) {
1909                 uint32_t cur_size_in_bytes = min(byte_count, max_bytes);
1910
1911                 amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_offset,
1912                                         dst_offset, cur_size_in_bytes, tmz);
1913
1914                 src_offset += cur_size_in_bytes;
1915                 dst_offset += cur_size_in_bytes;
1916                 byte_count -= cur_size_in_bytes;
1917         }
1918
1919         amdgpu_ring_pad_ib(ring, &job->ibs[0]);
1920         WARN_ON(job->ibs[0].length_dw > num_dw);
1921         if (direct_submit)
1922                 r = amdgpu_job_submit_direct(job, ring, fence);
1923         else
1924                 r = amdgpu_job_submit(job, &adev->mman.entity,
1925                                       AMDGPU_FENCE_OWNER_UNDEFINED, fence);
1926         if (r)
1927                 goto error_free;
1928
1929         return r;
1930
1931 error_free:
1932         amdgpu_job_free(job);
1933         DRM_ERROR("Error scheduling IBs (%d)\n", r);
1934         return r;
1935 }
1936
1937 int amdgpu_fill_buffer(struct amdgpu_bo *bo,
1938                        uint32_t src_data,
1939                        struct dma_resv *resv,
1940                        struct dma_fence **fence)
1941 {
1942         struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
1943         uint32_t max_bytes = adev->mman.buffer_funcs->fill_max_bytes;
1944         struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
1945
1946         struct amdgpu_res_cursor cursor;
1947         unsigned int num_loops, num_dw;
1948         uint64_t num_bytes;
1949
1950         struct amdgpu_job *job;
1951         int r;
1952
1953         if (!adev->mman.buffer_funcs_enabled) {
1954                 DRM_ERROR("Trying to clear memory with ring turned off.\n");
1955                 return -EINVAL;
1956         }
1957
1958         if (bo->tbo.resource->mem_type == AMDGPU_PL_PREEMPT) {
1959                 DRM_ERROR("Trying to clear preemptible memory.\n");
1960                 return -EINVAL;
1961         }
1962
1963         if (bo->tbo.resource->mem_type == TTM_PL_TT) {
1964                 r = amdgpu_ttm_alloc_gart(&bo->tbo);
1965                 if (r)
1966                         return r;
1967         }
1968
1969         num_bytes = bo->tbo.resource->num_pages << PAGE_SHIFT;
1970         num_loops = 0;
1971
1972         amdgpu_res_first(bo->tbo.resource, 0, num_bytes, &cursor);
1973         while (cursor.remaining) {
1974                 num_loops += DIV_ROUND_UP_ULL(cursor.size, max_bytes);
1975                 amdgpu_res_next(&cursor, cursor.size);
1976         }
1977         num_dw = num_loops * adev->mman.buffer_funcs->fill_num_dw;
1978
1979         /* for IB padding */
1980         num_dw += 64;
1981
1982         r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, AMDGPU_IB_POOL_DELAYED,
1983                                      &job);
1984         if (r)
1985                 return r;
1986
1987         if (resv) {
1988                 r = amdgpu_sync_resv(adev, &job->sync, resv,
1989                                      AMDGPU_SYNC_ALWAYS,
1990                                      AMDGPU_FENCE_OWNER_UNDEFINED);
1991                 if (r) {
1992                         DRM_ERROR("sync failed (%d).\n", r);
1993                         goto error_free;
1994                 }
1995         }
1996
1997         amdgpu_res_first(bo->tbo.resource, 0, num_bytes, &cursor);
1998         while (cursor.remaining) {
1999                 uint32_t cur_size = min_t(uint64_t, cursor.size, max_bytes);
2000                 uint64_t dst_addr = cursor.start;
2001
2002                 dst_addr += amdgpu_ttm_domain_start(adev,
2003                                                     bo->tbo.resource->mem_type);
2004                 amdgpu_emit_fill_buffer(adev, &job->ibs[0], src_data, dst_addr,
2005                                         cur_size);
2006
2007                 amdgpu_res_next(&cursor, cur_size);
2008         }
2009
2010         amdgpu_ring_pad_ib(ring, &job->ibs[0]);
2011         WARN_ON(job->ibs[0].length_dw > num_dw);
2012         r = amdgpu_job_submit(job, &adev->mman.entity,
2013                               AMDGPU_FENCE_OWNER_UNDEFINED, fence);
2014         if (r)
2015                 goto error_free;
2016
2017         return 0;
2018
2019 error_free:
2020         amdgpu_job_free(job);
2021         return r;
2022 }
2023
2024 #if defined(CONFIG_DEBUG_FS)
2025
2026 static int amdgpu_mm_vram_table_show(struct seq_file *m, void *unused)
2027 {
2028         struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
2029         struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev,
2030                                                             TTM_PL_VRAM);
2031         struct drm_printer p = drm_seq_file_printer(m);
2032
2033         man->func->debug(man, &p);
2034         return 0;
2035 }
2036
2037 static int amdgpu_ttm_page_pool_show(struct seq_file *m, void *unused)
2038 {
2039         struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
2040
2041         return ttm_pool_debugfs(&adev->mman.bdev.pool, m);
2042 }
2043
2044 static int amdgpu_mm_tt_table_show(struct seq_file *m, void *unused)
2045 {
2046         struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
2047         struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev,
2048                                                             TTM_PL_TT);
2049         struct drm_printer p = drm_seq_file_printer(m);
2050
2051         man->func->debug(man, &p);
2052         return 0;
2053 }
2054
2055 static int amdgpu_mm_gds_table_show(struct seq_file *m, void *unused)
2056 {
2057         struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
2058         struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev,
2059                                                             AMDGPU_PL_GDS);
2060         struct drm_printer p = drm_seq_file_printer(m);
2061
2062         man->func->debug(man, &p);
2063         return 0;
2064 }
2065
2066 static int amdgpu_mm_gws_table_show(struct seq_file *m, void *unused)
2067 {
2068         struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
2069         struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev,
2070                                                             AMDGPU_PL_GWS);
2071         struct drm_printer p = drm_seq_file_printer(m);
2072
2073         man->func->debug(man, &p);
2074         return 0;
2075 }
2076
2077 static int amdgpu_mm_oa_table_show(struct seq_file *m, void *unused)
2078 {
2079         struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
2080         struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev,
2081                                                             AMDGPU_PL_OA);
2082         struct drm_printer p = drm_seq_file_printer(m);
2083
2084         man->func->debug(man, &p);
2085         return 0;
2086 }
2087
2088 DEFINE_SHOW_ATTRIBUTE(amdgpu_mm_vram_table);
2089 DEFINE_SHOW_ATTRIBUTE(amdgpu_mm_tt_table);
2090 DEFINE_SHOW_ATTRIBUTE(amdgpu_mm_gds_table);
2091 DEFINE_SHOW_ATTRIBUTE(amdgpu_mm_gws_table);
2092 DEFINE_SHOW_ATTRIBUTE(amdgpu_mm_oa_table);
2093 DEFINE_SHOW_ATTRIBUTE(amdgpu_ttm_page_pool);
2094
2095 /*
2096  * amdgpu_ttm_vram_read - Linear read access to VRAM
2097  *
2098  * Accesses VRAM via MMIO for debugging purposes.
2099  */
2100 static ssize_t amdgpu_ttm_vram_read(struct file *f, char __user *buf,
2101                                     size_t size, loff_t *pos)
2102 {
2103         struct amdgpu_device *adev = file_inode(f)->i_private;
2104         ssize_t result = 0;
2105
2106         if (size & 0x3 || *pos & 0x3)
2107                 return -EINVAL;
2108
2109         if (*pos >= adev->gmc.mc_vram_size)
2110                 return -ENXIO;
2111
2112         size = min(size, (size_t)(adev->gmc.mc_vram_size - *pos));
2113         while (size) {
2114                 size_t bytes = min(size, AMDGPU_TTM_VRAM_MAX_DW_READ * 4);
2115                 uint32_t value[AMDGPU_TTM_VRAM_MAX_DW_READ];
2116
2117                 amdgpu_device_vram_access(adev, *pos, value, bytes, false);
2118                 if (copy_to_user(buf, value, bytes))
2119                         return -EFAULT;
2120
2121                 result += bytes;
2122                 buf += bytes;
2123                 *pos += bytes;
2124                 size -= bytes;
2125         }
2126
2127         return result;
2128 }
2129
2130 /*
2131  * amdgpu_ttm_vram_write - Linear write access to VRAM
2132  *
2133  * Accesses VRAM via MMIO for debugging purposes.
2134  */
2135 static ssize_t amdgpu_ttm_vram_write(struct file *f, const char __user *buf,
2136                                     size_t size, loff_t *pos)
2137 {
2138         struct amdgpu_device *adev = file_inode(f)->i_private;
2139         ssize_t result = 0;
2140         int r;
2141
2142         if (size & 0x3 || *pos & 0x3)
2143                 return -EINVAL;
2144
2145         if (*pos >= adev->gmc.mc_vram_size)
2146                 return -ENXIO;
2147
2148         while (size) {
2149                 unsigned long flags;
2150                 uint32_t value;
2151
2152                 if (*pos >= adev->gmc.mc_vram_size)
2153                         return result;
2154
2155                 r = get_user(value, (uint32_t *)buf);
2156                 if (r)
2157                         return r;
2158
2159                 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
2160                 WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)*pos) | 0x80000000);
2161                 WREG32_NO_KIQ(mmMM_INDEX_HI, *pos >> 31);
2162                 WREG32_NO_KIQ(mmMM_DATA, value);
2163                 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
2164
2165                 result += 4;
2166                 buf += 4;
2167                 *pos += 4;
2168                 size -= 4;
2169         }
2170
2171         return result;
2172 }
2173
2174 static const struct file_operations amdgpu_ttm_vram_fops = {
2175         .owner = THIS_MODULE,
2176         .read = amdgpu_ttm_vram_read,
2177         .write = amdgpu_ttm_vram_write,
2178         .llseek = default_llseek,
2179 };
2180
2181 /*
2182  * amdgpu_iomem_read - Virtual read access to GPU mapped memory
2183  *
2184  * This function is used to read memory that has been mapped to the
2185  * GPU and the known addresses are not physical addresses but instead
2186  * bus addresses (e.g., what you'd put in an IB or ring buffer).
2187  */
2188 static ssize_t amdgpu_iomem_read(struct file *f, char __user *buf,
2189                                  size_t size, loff_t *pos)
2190 {
2191         struct amdgpu_device *adev = file_inode(f)->i_private;
2192         struct iommu_domain *dom;
2193         ssize_t result = 0;
2194         int r;
2195
2196         /* retrieve the IOMMU domain if any for this device */
2197         dom = iommu_get_domain_for_dev(adev->dev);
2198
2199         while (size) {
2200                 phys_addr_t addr = *pos & PAGE_MASK;
2201                 loff_t off = *pos & ~PAGE_MASK;
2202                 size_t bytes = PAGE_SIZE - off;
2203                 unsigned long pfn;
2204                 struct page *p;
2205                 void *ptr;
2206
2207                 bytes = bytes < size ? bytes : size;
2208
2209                 /* Translate the bus address to a physical address.  If
2210                  * the domain is NULL it means there is no IOMMU active
2211                  * and the address translation is the identity
2212                  */
2213                 addr = dom ? iommu_iova_to_phys(dom, addr) : addr;
2214
2215                 pfn = addr >> PAGE_SHIFT;
2216                 if (!pfn_valid(pfn))
2217                         return -EPERM;
2218
2219                 p = pfn_to_page(pfn);
2220                 if (p->mapping != adev->mman.bdev.dev_mapping)
2221                         return -EPERM;
2222
2223                 ptr = kmap(p);
2224                 r = copy_to_user(buf, ptr + off, bytes);
2225                 kunmap(p);
2226                 if (r)
2227                         return -EFAULT;
2228
2229                 size -= bytes;
2230                 *pos += bytes;
2231                 result += bytes;
2232         }
2233
2234         return result;
2235 }
2236
2237 /*
2238  * amdgpu_iomem_write - Virtual write access to GPU mapped memory
2239  *
2240  * This function is used to write memory that has been mapped to the
2241  * GPU and the known addresses are not physical addresses but instead
2242  * bus addresses (e.g., what you'd put in an IB or ring buffer).
2243  */
2244 static ssize_t amdgpu_iomem_write(struct file *f, const char __user *buf,
2245                                  size_t size, loff_t *pos)
2246 {
2247         struct amdgpu_device *adev = file_inode(f)->i_private;
2248         struct iommu_domain *dom;
2249         ssize_t result = 0;
2250         int r;
2251
2252         dom = iommu_get_domain_for_dev(adev->dev);
2253
2254         while (size) {
2255                 phys_addr_t addr = *pos & PAGE_MASK;
2256                 loff_t off = *pos & ~PAGE_MASK;
2257                 size_t bytes = PAGE_SIZE - off;
2258                 unsigned long pfn;
2259                 struct page *p;
2260                 void *ptr;
2261
2262                 bytes = bytes < size ? bytes : size;
2263
2264                 addr = dom ? iommu_iova_to_phys(dom, addr) : addr;
2265
2266                 pfn = addr >> PAGE_SHIFT;
2267                 if (!pfn_valid(pfn))
2268                         return -EPERM;
2269
2270                 p = pfn_to_page(pfn);
2271                 if (p->mapping != adev->mman.bdev.dev_mapping)
2272                         return -EPERM;
2273
2274                 ptr = kmap(p);
2275                 r = copy_from_user(ptr + off, buf, bytes);
2276                 kunmap(p);
2277                 if (r)
2278                         return -EFAULT;
2279
2280                 size -= bytes;
2281                 *pos += bytes;
2282                 result += bytes;
2283         }
2284
2285         return result;
2286 }
2287
2288 static const struct file_operations amdgpu_ttm_iomem_fops = {
2289         .owner = THIS_MODULE,
2290         .read = amdgpu_iomem_read,
2291         .write = amdgpu_iomem_write,
2292         .llseek = default_llseek
2293 };
2294
2295 #endif
2296
2297 void amdgpu_ttm_debugfs_init(struct amdgpu_device *adev)
2298 {
2299 #if defined(CONFIG_DEBUG_FS)
2300         struct drm_minor *minor = adev_to_drm(adev)->primary;
2301         struct dentry *root = minor->debugfs_root;
2302
2303         debugfs_create_file_size("amdgpu_vram", 0444, root, adev,
2304                                  &amdgpu_ttm_vram_fops, adev->gmc.mc_vram_size);
2305         debugfs_create_file("amdgpu_iomem", 0444, root, adev,
2306                             &amdgpu_ttm_iomem_fops);
2307         debugfs_create_file("amdgpu_vram_mm", 0444, root, adev,
2308                             &amdgpu_mm_vram_table_fops);
2309         debugfs_create_file("amdgpu_gtt_mm", 0444, root, adev,
2310                             &amdgpu_mm_tt_table_fops);
2311         debugfs_create_file("amdgpu_gds_mm", 0444, root, adev,
2312                             &amdgpu_mm_gds_table_fops);
2313         debugfs_create_file("amdgpu_gws_mm", 0444, root, adev,
2314                             &amdgpu_mm_gws_table_fops);
2315         debugfs_create_file("amdgpu_oa_mm", 0444, root, adev,
2316                             &amdgpu_mm_oa_table_fops);
2317         debugfs_create_file("ttm_page_pool", 0444, root, adev,
2318                             &amdgpu_ttm_page_pool_fops);
2319 #endif
2320 }