Merge branch 'irq-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-microblaze.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_vm.c
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 #include <linux/dma-fence-array.h>
29 #include <linux/interval_tree_generic.h>
30 #include <linux/idr.h>
31 #include <drm/drmP.h>
32 #include <drm/amdgpu_drm.h>
33 #include "amdgpu.h"
34 #include "amdgpu_trace.h"
35 #include "amdgpu_amdkfd.h"
36
37 /*
38  * GPUVM
39  * GPUVM is similar to the legacy gart on older asics, however
40  * rather than there being a single global gart table
41  * for the entire GPU, there are multiple VM page tables active
42  * at any given time.  The VM page tables can contain a mix
43  * vram pages and system memory pages and system memory pages
44  * can be mapped as snooped (cached system pages) or unsnooped
45  * (uncached system pages).
46  * Each VM has an ID associated with it and there is a page table
47  * associated with each VMID.  When execting a command buffer,
48  * the kernel tells the the ring what VMID to use for that command
49  * buffer.  VMIDs are allocated dynamically as commands are submitted.
50  * The userspace drivers maintain their own address space and the kernel
51  * sets up their pages tables accordingly when they submit their
52  * command buffers and a VMID is assigned.
53  * Cayman/Trinity support up to 8 active VMs at any given time;
54  * SI supports 16.
55  */
56
57 #define START(node) ((node)->start)
58 #define LAST(node) ((node)->last)
59
60 INTERVAL_TREE_DEFINE(struct amdgpu_bo_va_mapping, rb, uint64_t, __subtree_last,
61                      START, LAST, static, amdgpu_vm_it)
62
63 #undef START
64 #undef LAST
65
66 /* Local structure. Encapsulate some VM table update parameters to reduce
67  * the number of function parameters
68  */
69 struct amdgpu_pte_update_params {
70         /* amdgpu device we do this update for */
71         struct amdgpu_device *adev;
72         /* optional amdgpu_vm we do this update for */
73         struct amdgpu_vm *vm;
74         /* address where to copy page table entries from */
75         uint64_t src;
76         /* indirect buffer to fill with commands */
77         struct amdgpu_ib *ib;
78         /* Function which actually does the update */
79         void (*func)(struct amdgpu_pte_update_params *params,
80                      struct amdgpu_bo *bo, uint64_t pe,
81                      uint64_t addr, unsigned count, uint32_t incr,
82                      uint64_t flags);
83         /* The next two are used during VM update by CPU
84          *  DMA addresses to use for mapping
85          *  Kernel pointer of PD/PT BO that needs to be updated
86          */
87         dma_addr_t *pages_addr;
88         void *kptr;
89 };
90
91 /* Helper to disable partial resident texture feature from a fence callback */
92 struct amdgpu_prt_cb {
93         struct amdgpu_device *adev;
94         struct dma_fence_cb cb;
95 };
96
97 /**
98  * amdgpu_vm_level_shift - return the addr shift for each level
99  *
100  * @adev: amdgpu_device pointer
101  *
102  * Returns the number of bits the pfn needs to be right shifted for a level.
103  */
104 static unsigned amdgpu_vm_level_shift(struct amdgpu_device *adev,
105                                       unsigned level)
106 {
107         unsigned shift = 0xff;
108
109         switch (level) {
110         case AMDGPU_VM_PDB2:
111         case AMDGPU_VM_PDB1:
112         case AMDGPU_VM_PDB0:
113                 shift = 9 * (AMDGPU_VM_PDB0 - level) +
114                         adev->vm_manager.block_size;
115                 break;
116         case AMDGPU_VM_PTB:
117                 shift = 0;
118                 break;
119         default:
120                 dev_err(adev->dev, "the level%d isn't supported.\n", level);
121         }
122
123         return shift;
124 }
125
126 /**
127  * amdgpu_vm_num_entries - return the number of entries in a PD/PT
128  *
129  * @adev: amdgpu_device pointer
130  *
131  * Calculate the number of entries in a page directory or page table.
132  */
133 static unsigned amdgpu_vm_num_entries(struct amdgpu_device *adev,
134                                       unsigned level)
135 {
136         unsigned shift = amdgpu_vm_level_shift(adev,
137                                                adev->vm_manager.root_level);
138
139         if (level == adev->vm_manager.root_level)
140                 /* For the root directory */
141                 return round_up(adev->vm_manager.max_pfn, 1 << shift) >> shift;
142         else if (level != AMDGPU_VM_PTB)
143                 /* Everything in between */
144                 return 512;
145         else
146                 /* For the page tables on the leaves */
147                 return AMDGPU_VM_PTE_COUNT(adev);
148 }
149
150 /**
151  * amdgpu_vm_bo_size - returns the size of the BOs in bytes
152  *
153  * @adev: amdgpu_device pointer
154  *
155  * Calculate the size of the BO for a page directory or page table in bytes.
156  */
157 static unsigned amdgpu_vm_bo_size(struct amdgpu_device *adev, unsigned level)
158 {
159         return AMDGPU_GPU_PAGE_ALIGN(amdgpu_vm_num_entries(adev, level) * 8);
160 }
161
162 /**
163  * amdgpu_vm_get_pd_bo - add the VM PD to a validation list
164  *
165  * @vm: vm providing the BOs
166  * @validated: head of validation list
167  * @entry: entry to add
168  *
169  * Add the page directory to the list of BOs to
170  * validate for command submission.
171  */
172 void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
173                          struct list_head *validated,
174                          struct amdgpu_bo_list_entry *entry)
175 {
176         entry->robj = vm->root.base.bo;
177         entry->priority = 0;
178         entry->tv.bo = &entry->robj->tbo;
179         entry->tv.shared = true;
180         entry->user_pages = NULL;
181         list_add(&entry->tv.head, validated);
182 }
183
184 /**
185  * amdgpu_vm_validate_pt_bos - validate the page table BOs
186  *
187  * @adev: amdgpu device pointer
188  * @vm: vm providing the BOs
189  * @validate: callback to do the validation
190  * @param: parameter for the validation callback
191  *
192  * Validate the page table BOs on command submission if neccessary.
193  */
194 int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
195                               int (*validate)(void *p, struct amdgpu_bo *bo),
196                               void *param)
197 {
198         struct ttm_bo_global *glob = adev->mman.bdev.glob;
199         int r;
200
201         spin_lock(&vm->status_lock);
202         while (!list_empty(&vm->evicted)) {
203                 struct amdgpu_vm_bo_base *bo_base;
204                 struct amdgpu_bo *bo;
205
206                 bo_base = list_first_entry(&vm->evicted,
207                                            struct amdgpu_vm_bo_base,
208                                            vm_status);
209                 spin_unlock(&vm->status_lock);
210
211                 bo = bo_base->bo;
212                 BUG_ON(!bo);
213                 if (bo->parent) {
214                         r = validate(param, bo);
215                         if (r)
216                                 return r;
217
218                         spin_lock(&glob->lru_lock);
219                         ttm_bo_move_to_lru_tail(&bo->tbo);
220                         if (bo->shadow)
221                                 ttm_bo_move_to_lru_tail(&bo->shadow->tbo);
222                         spin_unlock(&glob->lru_lock);
223                 }
224
225                 if (bo->tbo.type == ttm_bo_type_kernel &&
226                     vm->use_cpu_for_update) {
227                         r = amdgpu_bo_kmap(bo, NULL);
228                         if (r)
229                                 return r;
230                 }
231
232                 spin_lock(&vm->status_lock);
233                 if (bo->tbo.type != ttm_bo_type_kernel)
234                         list_move(&bo_base->vm_status, &vm->moved);
235                 else
236                         list_move(&bo_base->vm_status, &vm->relocated);
237         }
238         spin_unlock(&vm->status_lock);
239
240         return 0;
241 }
242
243 /**
244  * amdgpu_vm_ready - check VM is ready for updates
245  *
246  * @vm: VM to check
247  *
248  * Check if all VM PDs/PTs are ready for updates
249  */
250 bool amdgpu_vm_ready(struct amdgpu_vm *vm)
251 {
252         bool ready;
253
254         spin_lock(&vm->status_lock);
255         ready = list_empty(&vm->evicted);
256         spin_unlock(&vm->status_lock);
257
258         return ready;
259 }
260
261 /**
262  * amdgpu_vm_clear_bo - initially clear the PDs/PTs
263  *
264  * @adev: amdgpu_device pointer
265  * @bo: BO to clear
266  * @level: level this BO is at
267  *
268  * Root PD needs to be reserved when calling this.
269  */
270 static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
271                               struct amdgpu_vm *vm, struct amdgpu_bo *bo,
272                               unsigned level, bool pte_support_ats)
273 {
274         struct ttm_operation_ctx ctx = { true, false };
275         struct dma_fence *fence = NULL;
276         unsigned entries, ats_entries;
277         struct amdgpu_ring *ring;
278         struct amdgpu_job *job;
279         uint64_t addr;
280         int r;
281
282         addr = amdgpu_bo_gpu_offset(bo);
283         entries = amdgpu_bo_size(bo) / 8;
284
285         if (pte_support_ats) {
286                 if (level == adev->vm_manager.root_level) {
287                         ats_entries = amdgpu_vm_level_shift(adev, level);
288                         ats_entries += AMDGPU_GPU_PAGE_SHIFT;
289                         ats_entries = AMDGPU_VA_HOLE_START >> ats_entries;
290                         ats_entries = min(ats_entries, entries);
291                         entries -= ats_entries;
292                 } else {
293                         ats_entries = entries;
294                         entries = 0;
295                 }
296         } else {
297                 ats_entries = 0;
298         }
299
300         ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
301
302         r = reservation_object_reserve_shared(bo->tbo.resv);
303         if (r)
304                 return r;
305
306         r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
307         if (r)
308                 goto error;
309
310         r = amdgpu_job_alloc_with_ib(adev, 64, &job);
311         if (r)
312                 goto error;
313
314         if (ats_entries) {
315                 uint64_t ats_value;
316
317                 ats_value = AMDGPU_PTE_DEFAULT_ATC;
318                 if (level != AMDGPU_VM_PTB)
319                         ats_value |= AMDGPU_PDE_PTE;
320
321                 amdgpu_vm_set_pte_pde(adev, &job->ibs[0], addr, 0,
322                                       ats_entries, 0, ats_value);
323                 addr += ats_entries * 8;
324         }
325
326         if (entries)
327                 amdgpu_vm_set_pte_pde(adev, &job->ibs[0], addr, 0,
328                                       entries, 0, 0);
329
330         amdgpu_ring_pad_ib(ring, &job->ibs[0]);
331
332         WARN_ON(job->ibs[0].length_dw > 64);
333         r = amdgpu_sync_resv(adev, &job->sync, bo->tbo.resv,
334                              AMDGPU_FENCE_OWNER_UNDEFINED, false);
335         if (r)
336                 goto error_free;
337
338         r = amdgpu_job_submit(job, ring, &vm->entity,
339                               AMDGPU_FENCE_OWNER_UNDEFINED, &fence);
340         if (r)
341                 goto error_free;
342
343         amdgpu_bo_fence(bo, fence, true);
344         dma_fence_put(fence);
345
346         if (bo->shadow)
347                 return amdgpu_vm_clear_bo(adev, vm, bo->shadow,
348                                           level, pte_support_ats);
349
350         return 0;
351
352 error_free:
353         amdgpu_job_free(job);
354
355 error:
356         return r;
357 }
358
359 /**
360  * amdgpu_vm_alloc_levels - allocate the PD/PT levels
361  *
362  * @adev: amdgpu_device pointer
363  * @vm: requested vm
364  * @saddr: start of the address range
365  * @eaddr: end of the address range
366  *
367  * Make sure the page directories and page tables are allocated
368  */
369 static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
370                                   struct amdgpu_vm *vm,
371                                   struct amdgpu_vm_pt *parent,
372                                   uint64_t saddr, uint64_t eaddr,
373                                   unsigned level, bool ats)
374 {
375         unsigned shift = amdgpu_vm_level_shift(adev, level);
376         unsigned pt_idx, from, to;
377         u64 flags;
378         int r;
379
380         if (!parent->entries) {
381                 unsigned num_entries = amdgpu_vm_num_entries(adev, level);
382
383                 parent->entries = kvmalloc_array(num_entries,
384                                                    sizeof(struct amdgpu_vm_pt),
385                                                    GFP_KERNEL | __GFP_ZERO);
386                 if (!parent->entries)
387                         return -ENOMEM;
388                 memset(parent->entries, 0 , sizeof(struct amdgpu_vm_pt));
389         }
390
391         from = saddr >> shift;
392         to = eaddr >> shift;
393         if (from >= amdgpu_vm_num_entries(adev, level) ||
394             to >= amdgpu_vm_num_entries(adev, level))
395                 return -EINVAL;
396
397         ++level;
398         saddr = saddr & ((1 << shift) - 1);
399         eaddr = eaddr & ((1 << shift) - 1);
400
401         flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
402         if (vm->use_cpu_for_update)
403                 flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
404         else
405                 flags |= (AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
406                                 AMDGPU_GEM_CREATE_SHADOW);
407
408         /* walk over the address space and allocate the page tables */
409         for (pt_idx = from; pt_idx <= to; ++pt_idx) {
410                 struct reservation_object *resv = vm->root.base.bo->tbo.resv;
411                 struct amdgpu_vm_pt *entry = &parent->entries[pt_idx];
412                 struct amdgpu_bo *pt;
413
414                 if (!entry->base.bo) {
415                         r = amdgpu_bo_create(adev,
416                                              amdgpu_vm_bo_size(adev, level),
417                                              AMDGPU_GPU_PAGE_SIZE,
418                                              AMDGPU_GEM_DOMAIN_VRAM, flags,
419                                              ttm_bo_type_kernel, resv, &pt);
420                         if (r)
421                                 return r;
422
423                         r = amdgpu_vm_clear_bo(adev, vm, pt, level, ats);
424                         if (r) {
425                                 amdgpu_bo_unref(&pt->shadow);
426                                 amdgpu_bo_unref(&pt);
427                                 return r;
428                         }
429
430                         if (vm->use_cpu_for_update) {
431                                 r = amdgpu_bo_kmap(pt, NULL);
432                                 if (r) {
433                                         amdgpu_bo_unref(&pt->shadow);
434                                         amdgpu_bo_unref(&pt);
435                                         return r;
436                                 }
437                         }
438
439                         /* Keep a reference to the root directory to avoid
440                         * freeing them up in the wrong order.
441                         */
442                         pt->parent = amdgpu_bo_ref(parent->base.bo);
443
444                         entry->base.vm = vm;
445                         entry->base.bo = pt;
446                         list_add_tail(&entry->base.bo_list, &pt->va);
447                         spin_lock(&vm->status_lock);
448                         list_add(&entry->base.vm_status, &vm->relocated);
449                         spin_unlock(&vm->status_lock);
450                 }
451
452                 if (level < AMDGPU_VM_PTB) {
453                         uint64_t sub_saddr = (pt_idx == from) ? saddr : 0;
454                         uint64_t sub_eaddr = (pt_idx == to) ? eaddr :
455                                 ((1 << shift) - 1);
456                         r = amdgpu_vm_alloc_levels(adev, vm, entry, sub_saddr,
457                                                    sub_eaddr, level, ats);
458                         if (r)
459                                 return r;
460                 }
461         }
462
463         return 0;
464 }
465
466 /**
467  * amdgpu_vm_alloc_pts - Allocate page tables.
468  *
469  * @adev: amdgpu_device pointer
470  * @vm: VM to allocate page tables for
471  * @saddr: Start address which needs to be allocated
472  * @size: Size from start address we need.
473  *
474  * Make sure the page tables are allocated.
475  */
476 int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
477                         struct amdgpu_vm *vm,
478                         uint64_t saddr, uint64_t size)
479 {
480         uint64_t eaddr;
481         bool ats = false;
482
483         /* validate the parameters */
484         if (saddr & AMDGPU_GPU_PAGE_MASK || size & AMDGPU_GPU_PAGE_MASK)
485                 return -EINVAL;
486
487         eaddr = saddr + size - 1;
488
489         if (vm->pte_support_ats)
490                 ats = saddr < AMDGPU_VA_HOLE_START;
491
492         saddr /= AMDGPU_GPU_PAGE_SIZE;
493         eaddr /= AMDGPU_GPU_PAGE_SIZE;
494
495         if (eaddr >= adev->vm_manager.max_pfn) {
496                 dev_err(adev->dev, "va above limit (0x%08llX >= 0x%08llX)\n",
497                         eaddr, adev->vm_manager.max_pfn);
498                 return -EINVAL;
499         }
500
501         return amdgpu_vm_alloc_levels(adev, vm, &vm->root, saddr, eaddr,
502                                       adev->vm_manager.root_level, ats);
503 }
504
505 /**
506  * amdgpu_vm_check_compute_bug - check whether asic has compute vm bug
507  *
508  * @adev: amdgpu_device pointer
509  */
510 void amdgpu_vm_check_compute_bug(struct amdgpu_device *adev)
511 {
512         const struct amdgpu_ip_block *ip_block;
513         bool has_compute_vm_bug;
514         struct amdgpu_ring *ring;
515         int i;
516
517         has_compute_vm_bug = false;
518
519         ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX);
520         if (ip_block) {
521                 /* Compute has a VM bug for GFX version < 7.
522                    Compute has a VM bug for GFX 8 MEC firmware version < 673.*/
523                 if (ip_block->version->major <= 7)
524                         has_compute_vm_bug = true;
525                 else if (ip_block->version->major == 8)
526                         if (adev->gfx.mec_fw_version < 673)
527                                 has_compute_vm_bug = true;
528         }
529
530         for (i = 0; i < adev->num_rings; i++) {
531                 ring = adev->rings[i];
532                 if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE)
533                         /* only compute rings */
534                         ring->has_compute_vm_bug = has_compute_vm_bug;
535                 else
536                         ring->has_compute_vm_bug = false;
537         }
538 }
539
540 bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
541                                   struct amdgpu_job *job)
542 {
543         struct amdgpu_device *adev = ring->adev;
544         unsigned vmhub = ring->funcs->vmhub;
545         struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
546         struct amdgpu_vmid *id;
547         bool gds_switch_needed;
548         bool vm_flush_needed = job->vm_needs_flush || ring->has_compute_vm_bug;
549
550         if (job->vmid == 0)
551                 return false;
552         id = &id_mgr->ids[job->vmid];
553         gds_switch_needed = ring->funcs->emit_gds_switch && (
554                 id->gds_base != job->gds_base ||
555                 id->gds_size != job->gds_size ||
556                 id->gws_base != job->gws_base ||
557                 id->gws_size != job->gws_size ||
558                 id->oa_base != job->oa_base ||
559                 id->oa_size != job->oa_size);
560
561         if (amdgpu_vmid_had_gpu_reset(adev, id))
562                 return true;
563
564         return vm_flush_needed || gds_switch_needed;
565 }
566
567 static bool amdgpu_vm_is_large_bar(struct amdgpu_device *adev)
568 {
569         return (adev->gmc.real_vram_size == adev->gmc.visible_vram_size);
570 }
571
572 /**
573  * amdgpu_vm_flush - hardware flush the vm
574  *
575  * @ring: ring to use for flush
576  * @vmid: vmid number to use
577  * @pd_addr: address of the page directory
578  *
579  * Emit a VM flush when it is necessary.
580  */
581 int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_pipe_sync)
582 {
583         struct amdgpu_device *adev = ring->adev;
584         unsigned vmhub = ring->funcs->vmhub;
585         struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
586         struct amdgpu_vmid *id = &id_mgr->ids[job->vmid];
587         bool gds_switch_needed = ring->funcs->emit_gds_switch && (
588                 id->gds_base != job->gds_base ||
589                 id->gds_size != job->gds_size ||
590                 id->gws_base != job->gws_base ||
591                 id->gws_size != job->gws_size ||
592                 id->oa_base != job->oa_base ||
593                 id->oa_size != job->oa_size);
594         bool vm_flush_needed = job->vm_needs_flush;
595         bool pasid_mapping_needed = id->pasid != job->pasid ||
596                 !id->pasid_mapping ||
597                 !dma_fence_is_signaled(id->pasid_mapping);
598         struct dma_fence *fence = NULL;
599         unsigned patch_offset = 0;
600         int r;
601
602         if (amdgpu_vmid_had_gpu_reset(adev, id)) {
603                 gds_switch_needed = true;
604                 vm_flush_needed = true;
605                 pasid_mapping_needed = true;
606         }
607
608         gds_switch_needed &= !!ring->funcs->emit_gds_switch;
609         vm_flush_needed &= !!ring->funcs->emit_vm_flush;
610         pasid_mapping_needed &= adev->gmc.gmc_funcs->emit_pasid_mapping &&
611                 ring->funcs->emit_wreg;
612
613         if (!vm_flush_needed && !gds_switch_needed && !need_pipe_sync)
614                 return 0;
615
616         if (ring->funcs->init_cond_exec)
617                 patch_offset = amdgpu_ring_init_cond_exec(ring);
618
619         if (need_pipe_sync)
620                 amdgpu_ring_emit_pipeline_sync(ring);
621
622         if (vm_flush_needed) {
623                 trace_amdgpu_vm_flush(ring, job->vmid, job->vm_pd_addr);
624                 amdgpu_ring_emit_vm_flush(ring, job->vmid, job->vm_pd_addr);
625         }
626
627         if (pasid_mapping_needed)
628                 amdgpu_gmc_emit_pasid_mapping(ring, job->vmid, job->pasid);
629
630         if (vm_flush_needed || pasid_mapping_needed) {
631                 r = amdgpu_fence_emit(ring, &fence);
632                 if (r)
633                         return r;
634         }
635
636         if (vm_flush_needed) {
637                 mutex_lock(&id_mgr->lock);
638                 dma_fence_put(id->last_flush);
639                 id->last_flush = dma_fence_get(fence);
640                 id->current_gpu_reset_count =
641                         atomic_read(&adev->gpu_reset_counter);
642                 mutex_unlock(&id_mgr->lock);
643         }
644
645         if (pasid_mapping_needed) {
646                 id->pasid = job->pasid;
647                 dma_fence_put(id->pasid_mapping);
648                 id->pasid_mapping = dma_fence_get(fence);
649         }
650         dma_fence_put(fence);
651
652         if (ring->funcs->emit_gds_switch && gds_switch_needed) {
653                 id->gds_base = job->gds_base;
654                 id->gds_size = job->gds_size;
655                 id->gws_base = job->gws_base;
656                 id->gws_size = job->gws_size;
657                 id->oa_base = job->oa_base;
658                 id->oa_size = job->oa_size;
659                 amdgpu_ring_emit_gds_switch(ring, job->vmid, job->gds_base,
660                                             job->gds_size, job->gws_base,
661                                             job->gws_size, job->oa_base,
662                                             job->oa_size);
663         }
664
665         if (ring->funcs->patch_cond_exec)
666                 amdgpu_ring_patch_cond_exec(ring, patch_offset);
667
668         /* the double SWITCH_BUFFER here *cannot* be skipped by COND_EXEC */
669         if (ring->funcs->emit_switch_buffer) {
670                 amdgpu_ring_emit_switch_buffer(ring);
671                 amdgpu_ring_emit_switch_buffer(ring);
672         }
673         return 0;
674 }
675
676 /**
677  * amdgpu_vm_bo_find - find the bo_va for a specific vm & bo
678  *
679  * @vm: requested vm
680  * @bo: requested buffer object
681  *
682  * Find @bo inside the requested vm.
683  * Search inside the @bos vm list for the requested vm
684  * Returns the found bo_va or NULL if none is found
685  *
686  * Object has to be reserved!
687  */
688 struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
689                                        struct amdgpu_bo *bo)
690 {
691         struct amdgpu_bo_va *bo_va;
692
693         list_for_each_entry(bo_va, &bo->va, base.bo_list) {
694                 if (bo_va->base.vm == vm) {
695                         return bo_va;
696                 }
697         }
698         return NULL;
699 }
700
701 /**
702  * amdgpu_vm_do_set_ptes - helper to call the right asic function
703  *
704  * @params: see amdgpu_pte_update_params definition
705  * @bo: PD/PT to update
706  * @pe: addr of the page entry
707  * @addr: dst addr to write into pe
708  * @count: number of page entries to update
709  * @incr: increase next addr by incr bytes
710  * @flags: hw access flags
711  *
712  * Traces the parameters and calls the right asic functions
713  * to setup the page table using the DMA.
714  */
715 static void amdgpu_vm_do_set_ptes(struct amdgpu_pte_update_params *params,
716                                   struct amdgpu_bo *bo,
717                                   uint64_t pe, uint64_t addr,
718                                   unsigned count, uint32_t incr,
719                                   uint64_t flags)
720 {
721         pe += amdgpu_bo_gpu_offset(bo);
722         trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags);
723
724         if (count < 3) {
725                 amdgpu_vm_write_pte(params->adev, params->ib, pe,
726                                     addr | flags, count, incr);
727
728         } else {
729                 amdgpu_vm_set_pte_pde(params->adev, params->ib, pe, addr,
730                                       count, incr, flags);
731         }
732 }
733
734 /**
735  * amdgpu_vm_do_copy_ptes - copy the PTEs from the GART
736  *
737  * @params: see amdgpu_pte_update_params definition
738  * @bo: PD/PT to update
739  * @pe: addr of the page entry
740  * @addr: dst addr to write into pe
741  * @count: number of page entries to update
742  * @incr: increase next addr by incr bytes
743  * @flags: hw access flags
744  *
745  * Traces the parameters and calls the DMA function to copy the PTEs.
746  */
747 static void amdgpu_vm_do_copy_ptes(struct amdgpu_pte_update_params *params,
748                                    struct amdgpu_bo *bo,
749                                    uint64_t pe, uint64_t addr,
750                                    unsigned count, uint32_t incr,
751                                    uint64_t flags)
752 {
753         uint64_t src = (params->src + (addr >> 12) * 8);
754
755         pe += amdgpu_bo_gpu_offset(bo);
756         trace_amdgpu_vm_copy_ptes(pe, src, count);
757
758         amdgpu_vm_copy_pte(params->adev, params->ib, pe, src, count);
759 }
760
761 /**
762  * amdgpu_vm_map_gart - Resolve gart mapping of addr
763  *
764  * @pages_addr: optional DMA address to use for lookup
765  * @addr: the unmapped addr
766  *
767  * Look up the physical address of the page that the pte resolves
768  * to and return the pointer for the page table entry.
769  */
770 static uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr)
771 {
772         uint64_t result;
773
774         /* page table offset */
775         result = pages_addr[addr >> PAGE_SHIFT];
776
777         /* in case cpu page size != gpu page size*/
778         result |= addr & (~PAGE_MASK);
779
780         result &= 0xFFFFFFFFFFFFF000ULL;
781
782         return result;
783 }
784
785 /**
786  * amdgpu_vm_cpu_set_ptes - helper to update page tables via CPU
787  *
788  * @params: see amdgpu_pte_update_params definition
789  * @bo: PD/PT to update
790  * @pe: kmap addr of the page entry
791  * @addr: dst addr to write into pe
792  * @count: number of page entries to update
793  * @incr: increase next addr by incr bytes
794  * @flags: hw access flags
795  *
796  * Write count number of PT/PD entries directly.
797  */
798 static void amdgpu_vm_cpu_set_ptes(struct amdgpu_pte_update_params *params,
799                                    struct amdgpu_bo *bo,
800                                    uint64_t pe, uint64_t addr,
801                                    unsigned count, uint32_t incr,
802                                    uint64_t flags)
803 {
804         unsigned int i;
805         uint64_t value;
806
807         pe += (unsigned long)amdgpu_bo_kptr(bo);
808
809         trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags);
810
811         for (i = 0; i < count; i++) {
812                 value = params->pages_addr ?
813                         amdgpu_vm_map_gart(params->pages_addr, addr) :
814                         addr;
815                 amdgpu_gmc_set_pte_pde(params->adev, (void *)(uintptr_t)pe,
816                                        i, value, flags);
817                 addr += incr;
818         }
819 }
820
821 static int amdgpu_vm_wait_pd(struct amdgpu_device *adev, struct amdgpu_vm *vm,
822                              void *owner)
823 {
824         struct amdgpu_sync sync;
825         int r;
826
827         amdgpu_sync_create(&sync);
828         amdgpu_sync_resv(adev, &sync, vm->root.base.bo->tbo.resv, owner, false);
829         r = amdgpu_sync_wait(&sync, true);
830         amdgpu_sync_free(&sync);
831
832         return r;
833 }
834
835 /*
836  * amdgpu_vm_update_pde - update a single level in the hierarchy
837  *
838  * @param: parameters for the update
839  * @vm: requested vm
840  * @parent: parent directory
841  * @entry: entry to update
842  *
843  * Makes sure the requested entry in parent is up to date.
844  */
845 static void amdgpu_vm_update_pde(struct amdgpu_pte_update_params *params,
846                                  struct amdgpu_vm *vm,
847                                  struct amdgpu_vm_pt *parent,
848                                  struct amdgpu_vm_pt *entry)
849 {
850         struct amdgpu_bo *bo = parent->base.bo, *pbo;
851         uint64_t pde, pt, flags;
852         unsigned level;
853
854         /* Don't update huge pages here */
855         if (entry->huge)
856                 return;
857
858         for (level = 0, pbo = bo->parent; pbo; ++level)
859                 pbo = pbo->parent;
860
861         level += params->adev->vm_manager.root_level;
862         pt = amdgpu_bo_gpu_offset(entry->base.bo);
863         flags = AMDGPU_PTE_VALID;
864         amdgpu_gmc_get_vm_pde(params->adev, level, &pt, &flags);
865         pde = (entry - parent->entries) * 8;
866         if (bo->shadow)
867                 params->func(params, bo->shadow, pde, pt, 1, 0, flags);
868         params->func(params, bo, pde, pt, 1, 0, flags);
869 }
870
871 /*
872  * amdgpu_vm_invalidate_level - mark all PD levels as invalid
873  *
874  * @parent: parent PD
875  *
876  * Mark all PD level as invalid after an error.
877  */
878 static void amdgpu_vm_invalidate_level(struct amdgpu_device *adev,
879                                        struct amdgpu_vm *vm,
880                                        struct amdgpu_vm_pt *parent,
881                                        unsigned level)
882 {
883         unsigned pt_idx, num_entries;
884
885         /*
886          * Recurse into the subdirectories. This recursion is harmless because
887          * we only have a maximum of 5 layers.
888          */
889         num_entries = amdgpu_vm_num_entries(adev, level);
890         for (pt_idx = 0; pt_idx < num_entries; ++pt_idx) {
891                 struct amdgpu_vm_pt *entry = &parent->entries[pt_idx];
892
893                 if (!entry->base.bo)
894                         continue;
895
896                 spin_lock(&vm->status_lock);
897                 if (list_empty(&entry->base.vm_status))
898                         list_add(&entry->base.vm_status, &vm->relocated);
899                 spin_unlock(&vm->status_lock);
900                 amdgpu_vm_invalidate_level(adev, vm, entry, level + 1);
901         }
902 }
903
904 /*
905  * amdgpu_vm_update_directories - make sure that all directories are valid
906  *
907  * @adev: amdgpu_device pointer
908  * @vm: requested vm
909  *
910  * Makes sure all directories are up to date.
911  * Returns 0 for success, error for failure.
912  */
913 int amdgpu_vm_update_directories(struct amdgpu_device *adev,
914                                  struct amdgpu_vm *vm)
915 {
916         struct amdgpu_pte_update_params params;
917         struct amdgpu_job *job;
918         unsigned ndw = 0;
919         int r = 0;
920
921         if (list_empty(&vm->relocated))
922                 return 0;
923
924 restart:
925         memset(&params, 0, sizeof(params));
926         params.adev = adev;
927
928         if (vm->use_cpu_for_update) {
929                 r = amdgpu_vm_wait_pd(adev, vm, AMDGPU_FENCE_OWNER_VM);
930                 if (unlikely(r))
931                         return r;
932
933                 params.func = amdgpu_vm_cpu_set_ptes;
934         } else {
935                 ndw = 512 * 8;
936                 r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job);
937                 if (r)
938                         return r;
939
940                 params.ib = &job->ibs[0];
941                 params.func = amdgpu_vm_do_set_ptes;
942         }
943
944         spin_lock(&vm->status_lock);
945         while (!list_empty(&vm->relocated)) {
946                 struct amdgpu_vm_bo_base *bo_base, *parent;
947                 struct amdgpu_vm_pt *pt, *entry;
948                 struct amdgpu_bo *bo;
949
950                 bo_base = list_first_entry(&vm->relocated,
951                                            struct amdgpu_vm_bo_base,
952                                            vm_status);
953                 list_del_init(&bo_base->vm_status);
954                 spin_unlock(&vm->status_lock);
955
956                 bo = bo_base->bo->parent;
957                 if (!bo) {
958                         spin_lock(&vm->status_lock);
959                         continue;
960                 }
961
962                 parent = list_first_entry(&bo->va, struct amdgpu_vm_bo_base,
963                                           bo_list);
964                 pt = container_of(parent, struct amdgpu_vm_pt, base);
965                 entry = container_of(bo_base, struct amdgpu_vm_pt, base);
966
967                 amdgpu_vm_update_pde(&params, vm, pt, entry);
968
969                 spin_lock(&vm->status_lock);
970                 if (!vm->use_cpu_for_update &&
971                     (ndw - params.ib->length_dw) < 32)
972                         break;
973         }
974         spin_unlock(&vm->status_lock);
975
976         if (vm->use_cpu_for_update) {
977                 /* Flush HDP */
978                 mb();
979                 amdgpu_asic_flush_hdp(adev, NULL);
980         } else if (params.ib->length_dw == 0) {
981                 amdgpu_job_free(job);
982         } else {
983                 struct amdgpu_bo *root = vm->root.base.bo;
984                 struct amdgpu_ring *ring;
985                 struct dma_fence *fence;
986
987                 ring = container_of(vm->entity.sched, struct amdgpu_ring,
988                                     sched);
989
990                 amdgpu_ring_pad_ib(ring, params.ib);
991                 amdgpu_sync_resv(adev, &job->sync, root->tbo.resv,
992                                  AMDGPU_FENCE_OWNER_VM, false);
993                 WARN_ON(params.ib->length_dw > ndw);
994                 r = amdgpu_job_submit(job, ring, &vm->entity,
995                                       AMDGPU_FENCE_OWNER_VM, &fence);
996                 if (r)
997                         goto error;
998
999                 amdgpu_bo_fence(root, fence, true);
1000                 dma_fence_put(vm->last_update);
1001                 vm->last_update = fence;
1002         }
1003
1004         if (!list_empty(&vm->relocated))
1005                 goto restart;
1006
1007         return 0;
1008
1009 error:
1010         amdgpu_vm_invalidate_level(adev, vm, &vm->root,
1011                                    adev->vm_manager.root_level);
1012         amdgpu_job_free(job);
1013         return r;
1014 }
1015
1016 /**
1017  * amdgpu_vm_find_entry - find the entry for an address
1018  *
1019  * @p: see amdgpu_pte_update_params definition
1020  * @addr: virtual address in question
1021  * @entry: resulting entry or NULL
1022  * @parent: parent entry
1023  *
1024  * Find the vm_pt entry and it's parent for the given address.
1025  */
1026 void amdgpu_vm_get_entry(struct amdgpu_pte_update_params *p, uint64_t addr,
1027                          struct amdgpu_vm_pt **entry,
1028                          struct amdgpu_vm_pt **parent)
1029 {
1030         unsigned level = p->adev->vm_manager.root_level;
1031
1032         *parent = NULL;
1033         *entry = &p->vm->root;
1034         while ((*entry)->entries) {
1035                 unsigned shift = amdgpu_vm_level_shift(p->adev, level++);
1036
1037                 *parent = *entry;
1038                 *entry = &(*entry)->entries[addr >> shift];
1039                 addr &= (1ULL << shift) - 1;
1040         }
1041
1042         if (level != AMDGPU_VM_PTB)
1043                 *entry = NULL;
1044 }
1045
1046 /**
1047  * amdgpu_vm_handle_huge_pages - handle updating the PD with huge pages
1048  *
1049  * @p: see amdgpu_pte_update_params definition
1050  * @entry: vm_pt entry to check
1051  * @parent: parent entry
1052  * @nptes: number of PTEs updated with this operation
1053  * @dst: destination address where the PTEs should point to
1054  * @flags: access flags fro the PTEs
1055  *
1056  * Check if we can update the PD with a huge page.
1057  */
1058 static void amdgpu_vm_handle_huge_pages(struct amdgpu_pte_update_params *p,
1059                                         struct amdgpu_vm_pt *entry,
1060                                         struct amdgpu_vm_pt *parent,
1061                                         unsigned nptes, uint64_t dst,
1062                                         uint64_t flags)
1063 {
1064         uint64_t pde;
1065
1066         /* In the case of a mixed PT the PDE must point to it*/
1067         if (p->adev->asic_type >= CHIP_VEGA10 && !p->src &&
1068             nptes == AMDGPU_VM_PTE_COUNT(p->adev)) {
1069                 /* Set the huge page flag to stop scanning at this PDE */
1070                 flags |= AMDGPU_PDE_PTE;
1071         }
1072
1073         if (!(flags & AMDGPU_PDE_PTE)) {
1074                 if (entry->huge) {
1075                         /* Add the entry to the relocated list to update it. */
1076                         entry->huge = false;
1077                         spin_lock(&p->vm->status_lock);
1078                         list_move(&entry->base.vm_status, &p->vm->relocated);
1079                         spin_unlock(&p->vm->status_lock);
1080                 }
1081                 return;
1082         }
1083
1084         entry->huge = true;
1085         amdgpu_gmc_get_vm_pde(p->adev, AMDGPU_VM_PDB0, &dst, &flags);
1086
1087         pde = (entry - parent->entries) * 8;
1088         if (parent->base.bo->shadow)
1089                 p->func(p, parent->base.bo->shadow, pde, dst, 1, 0, flags);
1090         p->func(p, parent->base.bo, pde, dst, 1, 0, flags);
1091 }
1092
1093 /**
1094  * amdgpu_vm_update_ptes - make sure that page tables are valid
1095  *
1096  * @params: see amdgpu_pte_update_params definition
1097  * @vm: requested vm
1098  * @start: start of GPU address range
1099  * @end: end of GPU address range
1100  * @dst: destination address to map to, the next dst inside the function
1101  * @flags: mapping flags
1102  *
1103  * Update the page tables in the range @start - @end.
1104  * Returns 0 for success, -EINVAL for failure.
1105  */
1106 static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
1107                                   uint64_t start, uint64_t end,
1108                                   uint64_t dst, uint64_t flags)
1109 {
1110         struct amdgpu_device *adev = params->adev;
1111         const uint64_t mask = AMDGPU_VM_PTE_COUNT(adev) - 1;
1112
1113         uint64_t addr, pe_start;
1114         struct amdgpu_bo *pt;
1115         unsigned nptes;
1116
1117         /* walk over the address space and update the page tables */
1118         for (addr = start; addr < end; addr += nptes,
1119              dst += nptes * AMDGPU_GPU_PAGE_SIZE) {
1120                 struct amdgpu_vm_pt *entry, *parent;
1121
1122                 amdgpu_vm_get_entry(params, addr, &entry, &parent);
1123                 if (!entry)
1124                         return -ENOENT;
1125
1126                 if ((addr & ~mask) == (end & ~mask))
1127                         nptes = end - addr;
1128                 else
1129                         nptes = AMDGPU_VM_PTE_COUNT(adev) - (addr & mask);
1130
1131                 amdgpu_vm_handle_huge_pages(params, entry, parent,
1132                                             nptes, dst, flags);
1133                 /* We don't need to update PTEs for huge pages */
1134                 if (entry->huge)
1135                         continue;
1136
1137                 pt = entry->base.bo;
1138                 pe_start = (addr & mask) * 8;
1139                 if (pt->shadow)
1140                         params->func(params, pt->shadow, pe_start, dst, nptes,
1141                                      AMDGPU_GPU_PAGE_SIZE, flags);
1142                 params->func(params, pt, pe_start, dst, nptes,
1143                              AMDGPU_GPU_PAGE_SIZE, flags);
1144         }
1145
1146         return 0;
1147 }
1148
1149 /*
1150  * amdgpu_vm_frag_ptes - add fragment information to PTEs
1151  *
1152  * @params: see amdgpu_pte_update_params definition
1153  * @vm: requested vm
1154  * @start: first PTE to handle
1155  * @end: last PTE to handle
1156  * @dst: addr those PTEs should point to
1157  * @flags: hw mapping flags
1158  * Returns 0 for success, -EINVAL for failure.
1159  */
1160 static int amdgpu_vm_frag_ptes(struct amdgpu_pte_update_params  *params,
1161                                 uint64_t start, uint64_t end,
1162                                 uint64_t dst, uint64_t flags)
1163 {
1164         /**
1165          * The MC L1 TLB supports variable sized pages, based on a fragment
1166          * field in the PTE. When this field is set to a non-zero value, page
1167          * granularity is increased from 4KB to (1 << (12 + frag)). The PTE
1168          * flags are considered valid for all PTEs within the fragment range
1169          * and corresponding mappings are assumed to be physically contiguous.
1170          *
1171          * The L1 TLB can store a single PTE for the whole fragment,
1172          * significantly increasing the space available for translation
1173          * caching. This leads to large improvements in throughput when the
1174          * TLB is under pressure.
1175          *
1176          * The L2 TLB distributes small and large fragments into two
1177          * asymmetric partitions. The large fragment cache is significantly
1178          * larger. Thus, we try to use large fragments wherever possible.
1179          * Userspace can support this by aligning virtual base address and
1180          * allocation size to the fragment size.
1181          */
1182         unsigned max_frag = params->adev->vm_manager.fragment_size;
1183         int r;
1184
1185         /* system pages are non continuously */
1186         if (params->src || !(flags & AMDGPU_PTE_VALID))
1187                 return amdgpu_vm_update_ptes(params, start, end, dst, flags);
1188
1189         while (start != end) {
1190                 uint64_t frag_flags, frag_end;
1191                 unsigned frag;
1192
1193                 /* This intentionally wraps around if no bit is set */
1194                 frag = min((unsigned)ffs(start) - 1,
1195                            (unsigned)fls64(end - start) - 1);
1196                 if (frag >= max_frag) {
1197                         frag_flags = AMDGPU_PTE_FRAG(max_frag);
1198                         frag_end = end & ~((1ULL << max_frag) - 1);
1199                 } else {
1200                         frag_flags = AMDGPU_PTE_FRAG(frag);
1201                         frag_end = start + (1 << frag);
1202                 }
1203
1204                 r = amdgpu_vm_update_ptes(params, start, frag_end, dst,
1205                                           flags | frag_flags);
1206                 if (r)
1207                         return r;
1208
1209                 dst += (frag_end - start) * AMDGPU_GPU_PAGE_SIZE;
1210                 start = frag_end;
1211         }
1212
1213         return 0;
1214 }
1215
1216 /**
1217  * amdgpu_vm_bo_update_mapping - update a mapping in the vm page table
1218  *
1219  * @adev: amdgpu_device pointer
1220  * @exclusive: fence we need to sync to
1221  * @pages_addr: DMA addresses to use for mapping
1222  * @vm: requested vm
1223  * @start: start of mapped range
1224  * @last: last mapped entry
1225  * @flags: flags for the entries
1226  * @addr: addr to set the area to
1227  * @fence: optional resulting fence
1228  *
1229  * Fill in the page table entries between @start and @last.
1230  * Returns 0 for success, -EINVAL for failure.
1231  */
1232 static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
1233                                        struct dma_fence *exclusive,
1234                                        dma_addr_t *pages_addr,
1235                                        struct amdgpu_vm *vm,
1236                                        uint64_t start, uint64_t last,
1237                                        uint64_t flags, uint64_t addr,
1238                                        struct dma_fence **fence)
1239 {
1240         struct amdgpu_ring *ring;
1241         void *owner = AMDGPU_FENCE_OWNER_VM;
1242         unsigned nptes, ncmds, ndw;
1243         struct amdgpu_job *job;
1244         struct amdgpu_pte_update_params params;
1245         struct dma_fence *f = NULL;
1246         int r;
1247
1248         memset(&params, 0, sizeof(params));
1249         params.adev = adev;
1250         params.vm = vm;
1251
1252         /* sync to everything on unmapping */
1253         if (!(flags & AMDGPU_PTE_VALID))
1254                 owner = AMDGPU_FENCE_OWNER_UNDEFINED;
1255
1256         if (vm->use_cpu_for_update) {
1257                 /* params.src is used as flag to indicate system Memory */
1258                 if (pages_addr)
1259                         params.src = ~0;
1260
1261                 /* Wait for PT BOs to be free. PTs share the same resv. object
1262                  * as the root PD BO
1263                  */
1264                 r = amdgpu_vm_wait_pd(adev, vm, owner);
1265                 if (unlikely(r))
1266                         return r;
1267
1268                 params.func = amdgpu_vm_cpu_set_ptes;
1269                 params.pages_addr = pages_addr;
1270                 return amdgpu_vm_frag_ptes(&params, start, last + 1,
1271                                            addr, flags);
1272         }
1273
1274         ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
1275
1276         nptes = last - start + 1;
1277
1278         /*
1279          * reserve space for two commands every (1 << BLOCK_SIZE)
1280          *  entries or 2k dwords (whatever is smaller)
1281          *
1282          * The second command is for the shadow pagetables.
1283          */
1284         if (vm->root.base.bo->shadow)
1285                 ncmds = ((nptes >> min(adev->vm_manager.block_size, 11u)) + 1) * 2;
1286         else
1287                 ncmds = ((nptes >> min(adev->vm_manager.block_size, 11u)) + 1);
1288
1289         /* padding, etc. */
1290         ndw = 64;
1291
1292         if (pages_addr) {
1293                 /* copy commands needed */
1294                 ndw += ncmds * adev->vm_manager.vm_pte_funcs->copy_pte_num_dw;
1295
1296                 /* and also PTEs */
1297                 ndw += nptes * 2;
1298
1299                 params.func = amdgpu_vm_do_copy_ptes;
1300
1301         } else {
1302                 /* set page commands needed */
1303                 ndw += ncmds * 10;
1304
1305                 /* extra commands for begin/end fragments */
1306                 ndw += 2 * 10 * adev->vm_manager.fragment_size;
1307
1308                 params.func = amdgpu_vm_do_set_ptes;
1309         }
1310
1311         r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job);
1312         if (r)
1313                 return r;
1314
1315         params.ib = &job->ibs[0];
1316
1317         if (pages_addr) {
1318                 uint64_t *pte;
1319                 unsigned i;
1320
1321                 /* Put the PTEs at the end of the IB. */
1322                 i = ndw - nptes * 2;
1323                 pte= (uint64_t *)&(job->ibs->ptr[i]);
1324                 params.src = job->ibs->gpu_addr + i * 4;
1325
1326                 for (i = 0; i < nptes; ++i) {
1327                         pte[i] = amdgpu_vm_map_gart(pages_addr, addr + i *
1328                                                     AMDGPU_GPU_PAGE_SIZE);
1329                         pte[i] |= flags;
1330                 }
1331                 addr = 0;
1332         }
1333
1334         r = amdgpu_sync_fence(adev, &job->sync, exclusive, false);
1335         if (r)
1336                 goto error_free;
1337
1338         r = amdgpu_sync_resv(adev, &job->sync, vm->root.base.bo->tbo.resv,
1339                              owner, false);
1340         if (r)
1341                 goto error_free;
1342
1343         r = reservation_object_reserve_shared(vm->root.base.bo->tbo.resv);
1344         if (r)
1345                 goto error_free;
1346
1347         r = amdgpu_vm_frag_ptes(&params, start, last + 1, addr, flags);
1348         if (r)
1349                 goto error_free;
1350
1351         amdgpu_ring_pad_ib(ring, params.ib);
1352         WARN_ON(params.ib->length_dw > ndw);
1353         r = amdgpu_job_submit(job, ring, &vm->entity,
1354                               AMDGPU_FENCE_OWNER_VM, &f);
1355         if (r)
1356                 goto error_free;
1357
1358         amdgpu_bo_fence(vm->root.base.bo, f, true);
1359         dma_fence_put(*fence);
1360         *fence = f;
1361         return 0;
1362
1363 error_free:
1364         amdgpu_job_free(job);
1365         return r;
1366 }
1367
1368 /**
1369  * amdgpu_vm_bo_split_mapping - split a mapping into smaller chunks
1370  *
1371  * @adev: amdgpu_device pointer
1372  * @exclusive: fence we need to sync to
1373  * @pages_addr: DMA addresses to use for mapping
1374  * @vm: requested vm
1375  * @mapping: mapped range and flags to use for the update
1376  * @flags: HW flags for the mapping
1377  * @nodes: array of drm_mm_nodes with the MC addresses
1378  * @fence: optional resulting fence
1379  *
1380  * Split the mapping into smaller chunks so that each update fits
1381  * into a SDMA IB.
1382  * Returns 0 for success, -EINVAL for failure.
1383  */
1384 static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
1385                                       struct dma_fence *exclusive,
1386                                       dma_addr_t *pages_addr,
1387                                       struct amdgpu_vm *vm,
1388                                       struct amdgpu_bo_va_mapping *mapping,
1389                                       uint64_t flags,
1390                                       struct drm_mm_node *nodes,
1391                                       struct dma_fence **fence)
1392 {
1393         unsigned min_linear_pages = 1 << adev->vm_manager.fragment_size;
1394         uint64_t pfn, start = mapping->start;
1395         int r;
1396
1397         /* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here
1398          * but in case of something, we filter the flags in first place
1399          */
1400         if (!(mapping->flags & AMDGPU_PTE_READABLE))
1401                 flags &= ~AMDGPU_PTE_READABLE;
1402         if (!(mapping->flags & AMDGPU_PTE_WRITEABLE))
1403                 flags &= ~AMDGPU_PTE_WRITEABLE;
1404
1405         flags &= ~AMDGPU_PTE_EXECUTABLE;
1406         flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
1407
1408         flags &= ~AMDGPU_PTE_MTYPE_MASK;
1409         flags |= (mapping->flags & AMDGPU_PTE_MTYPE_MASK);
1410
1411         if ((mapping->flags & AMDGPU_PTE_PRT) &&
1412             (adev->asic_type >= CHIP_VEGA10)) {
1413                 flags |= AMDGPU_PTE_PRT;
1414                 flags &= ~AMDGPU_PTE_VALID;
1415         }
1416
1417         trace_amdgpu_vm_bo_update(mapping);
1418
1419         pfn = mapping->offset >> PAGE_SHIFT;
1420         if (nodes) {
1421                 while (pfn >= nodes->size) {
1422                         pfn -= nodes->size;
1423                         ++nodes;
1424                 }
1425         }
1426
1427         do {
1428                 dma_addr_t *dma_addr = NULL;
1429                 uint64_t max_entries;
1430                 uint64_t addr, last;
1431
1432                 if (nodes) {
1433                         addr = nodes->start << PAGE_SHIFT;
1434                         max_entries = (nodes->size - pfn) *
1435                                 (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
1436                 } else {
1437                         addr = 0;
1438                         max_entries = S64_MAX;
1439                 }
1440
1441                 if (pages_addr) {
1442                         uint64_t count;
1443
1444                         max_entries = min(max_entries, 16ull * 1024ull);
1445                         for (count = 1; count < max_entries; ++count) {
1446                                 uint64_t idx = pfn + count;
1447
1448                                 if (pages_addr[idx] !=
1449                                     (pages_addr[idx - 1] + PAGE_SIZE))
1450                                         break;
1451                         }
1452
1453                         if (count < min_linear_pages) {
1454                                 addr = pfn << PAGE_SHIFT;
1455                                 dma_addr = pages_addr;
1456                         } else {
1457                                 addr = pages_addr[pfn];
1458                                 max_entries = count;
1459                         }
1460
1461                 } else if (flags & AMDGPU_PTE_VALID) {
1462                         addr += adev->vm_manager.vram_base_offset;
1463                         addr += pfn << PAGE_SHIFT;
1464                 }
1465
1466                 last = min((uint64_t)mapping->last, start + max_entries - 1);
1467                 r = amdgpu_vm_bo_update_mapping(adev, exclusive, dma_addr, vm,
1468                                                 start, last, flags, addr,
1469                                                 fence);
1470                 if (r)
1471                         return r;
1472
1473                 pfn += last - start + 1;
1474                 if (nodes && nodes->size == pfn) {
1475                         pfn = 0;
1476                         ++nodes;
1477                 }
1478                 start = last + 1;
1479
1480         } while (unlikely(start != mapping->last + 1));
1481
1482         return 0;
1483 }
1484
1485 /**
1486  * amdgpu_vm_bo_update - update all BO mappings in the vm page table
1487  *
1488  * @adev: amdgpu_device pointer
1489  * @bo_va: requested BO and VM object
1490  * @clear: if true clear the entries
1491  *
1492  * Fill in the page table entries for @bo_va.
1493  * Returns 0 for success, -EINVAL for failure.
1494  */
1495 int amdgpu_vm_bo_update(struct amdgpu_device *adev,
1496                         struct amdgpu_bo_va *bo_va,
1497                         bool clear)
1498 {
1499         struct amdgpu_bo *bo = bo_va->base.bo;
1500         struct amdgpu_vm *vm = bo_va->base.vm;
1501         struct amdgpu_bo_va_mapping *mapping;
1502         dma_addr_t *pages_addr = NULL;
1503         struct ttm_mem_reg *mem;
1504         struct drm_mm_node *nodes;
1505         struct dma_fence *exclusive, **last_update;
1506         uint64_t flags;
1507         int r;
1508
1509         if (clear || !bo_va->base.bo) {
1510                 mem = NULL;
1511                 nodes = NULL;
1512                 exclusive = NULL;
1513         } else {
1514                 struct ttm_dma_tt *ttm;
1515
1516                 mem = &bo_va->base.bo->tbo.mem;
1517                 nodes = mem->mm_node;
1518                 if (mem->mem_type == TTM_PL_TT) {
1519                         ttm = container_of(bo_va->base.bo->tbo.ttm,
1520                                            struct ttm_dma_tt, ttm);
1521                         pages_addr = ttm->dma_address;
1522                 }
1523                 exclusive = reservation_object_get_excl(bo->tbo.resv);
1524         }
1525
1526         if (bo)
1527                 flags = amdgpu_ttm_tt_pte_flags(adev, bo->tbo.ttm, mem);
1528         else
1529                 flags = 0x0;
1530
1531         if (clear || (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv))
1532                 last_update = &vm->last_update;
1533         else
1534                 last_update = &bo_va->last_pt_update;
1535
1536         if (!clear && bo_va->base.moved) {
1537                 bo_va->base.moved = false;
1538                 list_splice_init(&bo_va->valids, &bo_va->invalids);
1539
1540         } else if (bo_va->cleared != clear) {
1541                 list_splice_init(&bo_va->valids, &bo_va->invalids);
1542         }
1543
1544         list_for_each_entry(mapping, &bo_va->invalids, list) {
1545                 r = amdgpu_vm_bo_split_mapping(adev, exclusive, pages_addr, vm,
1546                                                mapping, flags, nodes,
1547                                                last_update);
1548                 if (r)
1549                         return r;
1550         }
1551
1552         if (vm->use_cpu_for_update) {
1553                 /* Flush HDP */
1554                 mb();
1555                 amdgpu_asic_flush_hdp(adev, NULL);
1556         }
1557
1558         spin_lock(&vm->status_lock);
1559         list_del_init(&bo_va->base.vm_status);
1560         spin_unlock(&vm->status_lock);
1561
1562         list_splice_init(&bo_va->invalids, &bo_va->valids);
1563         bo_va->cleared = clear;
1564
1565         if (trace_amdgpu_vm_bo_mapping_enabled()) {
1566                 list_for_each_entry(mapping, &bo_va->valids, list)
1567                         trace_amdgpu_vm_bo_mapping(mapping);
1568         }
1569
1570         return 0;
1571 }
1572
1573 /**
1574  * amdgpu_vm_update_prt_state - update the global PRT state
1575  */
1576 static void amdgpu_vm_update_prt_state(struct amdgpu_device *adev)
1577 {
1578         unsigned long flags;
1579         bool enable;
1580
1581         spin_lock_irqsave(&adev->vm_manager.prt_lock, flags);
1582         enable = !!atomic_read(&adev->vm_manager.num_prt_users);
1583         adev->gmc.gmc_funcs->set_prt(adev, enable);
1584         spin_unlock_irqrestore(&adev->vm_manager.prt_lock, flags);
1585 }
1586
1587 /**
1588  * amdgpu_vm_prt_get - add a PRT user
1589  */
1590 static void amdgpu_vm_prt_get(struct amdgpu_device *adev)
1591 {
1592         if (!adev->gmc.gmc_funcs->set_prt)
1593                 return;
1594
1595         if (atomic_inc_return(&adev->vm_manager.num_prt_users) == 1)
1596                 amdgpu_vm_update_prt_state(adev);
1597 }
1598
1599 /**
1600  * amdgpu_vm_prt_put - drop a PRT user
1601  */
1602 static void amdgpu_vm_prt_put(struct amdgpu_device *adev)
1603 {
1604         if (atomic_dec_return(&adev->vm_manager.num_prt_users) == 0)
1605                 amdgpu_vm_update_prt_state(adev);
1606 }
1607
1608 /**
1609  * amdgpu_vm_prt_cb - callback for updating the PRT status
1610  */
1611 static void amdgpu_vm_prt_cb(struct dma_fence *fence, struct dma_fence_cb *_cb)
1612 {
1613         struct amdgpu_prt_cb *cb = container_of(_cb, struct amdgpu_prt_cb, cb);
1614
1615         amdgpu_vm_prt_put(cb->adev);
1616         kfree(cb);
1617 }
1618
1619 /**
1620  * amdgpu_vm_add_prt_cb - add callback for updating the PRT status
1621  */
1622 static void amdgpu_vm_add_prt_cb(struct amdgpu_device *adev,
1623                                  struct dma_fence *fence)
1624 {
1625         struct amdgpu_prt_cb *cb;
1626
1627         if (!adev->gmc.gmc_funcs->set_prt)
1628                 return;
1629
1630         cb = kmalloc(sizeof(struct amdgpu_prt_cb), GFP_KERNEL);
1631         if (!cb) {
1632                 /* Last resort when we are OOM */
1633                 if (fence)
1634                         dma_fence_wait(fence, false);
1635
1636                 amdgpu_vm_prt_put(adev);
1637         } else {
1638                 cb->adev = adev;
1639                 if (!fence || dma_fence_add_callback(fence, &cb->cb,
1640                                                      amdgpu_vm_prt_cb))
1641                         amdgpu_vm_prt_cb(fence, &cb->cb);
1642         }
1643 }
1644
1645 /**
1646  * amdgpu_vm_free_mapping - free a mapping
1647  *
1648  * @adev: amdgpu_device pointer
1649  * @vm: requested vm
1650  * @mapping: mapping to be freed
1651  * @fence: fence of the unmap operation
1652  *
1653  * Free a mapping and make sure we decrease the PRT usage count if applicable.
1654  */
1655 static void amdgpu_vm_free_mapping(struct amdgpu_device *adev,
1656                                    struct amdgpu_vm *vm,
1657                                    struct amdgpu_bo_va_mapping *mapping,
1658                                    struct dma_fence *fence)
1659 {
1660         if (mapping->flags & AMDGPU_PTE_PRT)
1661                 amdgpu_vm_add_prt_cb(adev, fence);
1662         kfree(mapping);
1663 }
1664
1665 /**
1666  * amdgpu_vm_prt_fini - finish all prt mappings
1667  *
1668  * @adev: amdgpu_device pointer
1669  * @vm: requested vm
1670  *
1671  * Register a cleanup callback to disable PRT support after VM dies.
1672  */
1673 static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
1674 {
1675         struct reservation_object *resv = vm->root.base.bo->tbo.resv;
1676         struct dma_fence *excl, **shared;
1677         unsigned i, shared_count;
1678         int r;
1679
1680         r = reservation_object_get_fences_rcu(resv, &excl,
1681                                               &shared_count, &shared);
1682         if (r) {
1683                 /* Not enough memory to grab the fence list, as last resort
1684                  * block for all the fences to complete.
1685                  */
1686                 reservation_object_wait_timeout_rcu(resv, true, false,
1687                                                     MAX_SCHEDULE_TIMEOUT);
1688                 return;
1689         }
1690
1691         /* Add a callback for each fence in the reservation object */
1692         amdgpu_vm_prt_get(adev);
1693         amdgpu_vm_add_prt_cb(adev, excl);
1694
1695         for (i = 0; i < shared_count; ++i) {
1696                 amdgpu_vm_prt_get(adev);
1697                 amdgpu_vm_add_prt_cb(adev, shared[i]);
1698         }
1699
1700         kfree(shared);
1701 }
1702
1703 /**
1704  * amdgpu_vm_clear_freed - clear freed BOs in the PT
1705  *
1706  * @adev: amdgpu_device pointer
1707  * @vm: requested vm
1708  * @fence: optional resulting fence (unchanged if no work needed to be done
1709  * or if an error occurred)
1710  *
1711  * Make sure all freed BOs are cleared in the PT.
1712  * Returns 0 for success.
1713  *
1714  * PTs have to be reserved and mutex must be locked!
1715  */
1716 int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
1717                           struct amdgpu_vm *vm,
1718                           struct dma_fence **fence)
1719 {
1720         struct amdgpu_bo_va_mapping *mapping;
1721         uint64_t init_pte_value = 0;
1722         struct dma_fence *f = NULL;
1723         int r;
1724
1725         while (!list_empty(&vm->freed)) {
1726                 mapping = list_first_entry(&vm->freed,
1727                         struct amdgpu_bo_va_mapping, list);
1728                 list_del(&mapping->list);
1729
1730                 if (vm->pte_support_ats && mapping->start < AMDGPU_VA_HOLE_START)
1731                         init_pte_value = AMDGPU_PTE_DEFAULT_ATC;
1732
1733                 r = amdgpu_vm_bo_update_mapping(adev, NULL, NULL, vm,
1734                                                 mapping->start, mapping->last,
1735                                                 init_pte_value, 0, &f);
1736                 amdgpu_vm_free_mapping(adev, vm, mapping, f);
1737                 if (r) {
1738                         dma_fence_put(f);
1739                         return r;
1740                 }
1741         }
1742
1743         if (fence && f) {
1744                 dma_fence_put(*fence);
1745                 *fence = f;
1746         } else {
1747                 dma_fence_put(f);
1748         }
1749
1750         return 0;
1751
1752 }
1753
1754 /**
1755  * amdgpu_vm_handle_moved - handle moved BOs in the PT
1756  *
1757  * @adev: amdgpu_device pointer
1758  * @vm: requested vm
1759  * @sync: sync object to add fences to
1760  *
1761  * Make sure all BOs which are moved are updated in the PTs.
1762  * Returns 0 for success.
1763  *
1764  * PTs have to be reserved!
1765  */
1766 int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
1767                            struct amdgpu_vm *vm)
1768 {
1769         bool clear;
1770         int r = 0;
1771
1772         spin_lock(&vm->status_lock);
1773         while (!list_empty(&vm->moved)) {
1774                 struct amdgpu_bo_va *bo_va;
1775                 struct reservation_object *resv;
1776
1777                 bo_va = list_first_entry(&vm->moved,
1778                         struct amdgpu_bo_va, base.vm_status);
1779                 spin_unlock(&vm->status_lock);
1780
1781                 resv = bo_va->base.bo->tbo.resv;
1782
1783                 /* Per VM BOs never need to bo cleared in the page tables */
1784                 if (resv == vm->root.base.bo->tbo.resv)
1785                         clear = false;
1786                 /* Try to reserve the BO to avoid clearing its ptes */
1787                 else if (!amdgpu_vm_debug && reservation_object_trylock(resv))
1788                         clear = false;
1789                 /* Somebody else is using the BO right now */
1790                 else
1791                         clear = true;
1792
1793                 r = amdgpu_vm_bo_update(adev, bo_va, clear);
1794                 if (r)
1795                         return r;
1796
1797                 if (!clear && resv != vm->root.base.bo->tbo.resv)
1798                         reservation_object_unlock(resv);
1799
1800                 spin_lock(&vm->status_lock);
1801         }
1802         spin_unlock(&vm->status_lock);
1803
1804         return r;
1805 }
1806
1807 /**
1808  * amdgpu_vm_bo_add - add a bo to a specific vm
1809  *
1810  * @adev: amdgpu_device pointer
1811  * @vm: requested vm
1812  * @bo: amdgpu buffer object
1813  *
1814  * Add @bo into the requested vm.
1815  * Add @bo to the list of bos associated with the vm
1816  * Returns newly added bo_va or NULL for failure
1817  *
1818  * Object has to be reserved!
1819  */
1820 struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
1821                                       struct amdgpu_vm *vm,
1822                                       struct amdgpu_bo *bo)
1823 {
1824         struct amdgpu_bo_va *bo_va;
1825
1826         bo_va = kzalloc(sizeof(struct amdgpu_bo_va), GFP_KERNEL);
1827         if (bo_va == NULL) {
1828                 return NULL;
1829         }
1830         bo_va->base.vm = vm;
1831         bo_va->base.bo = bo;
1832         INIT_LIST_HEAD(&bo_va->base.bo_list);
1833         INIT_LIST_HEAD(&bo_va->base.vm_status);
1834
1835         bo_va->ref_count = 1;
1836         INIT_LIST_HEAD(&bo_va->valids);
1837         INIT_LIST_HEAD(&bo_va->invalids);
1838
1839         if (!bo)
1840                 return bo_va;
1841
1842         list_add_tail(&bo_va->base.bo_list, &bo->va);
1843
1844         if (bo->tbo.resv != vm->root.base.bo->tbo.resv)
1845                 return bo_va;
1846
1847         if (bo->preferred_domains &
1848             amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type))
1849                 return bo_va;
1850
1851         /*
1852          * We checked all the prerequisites, but it looks like this per VM BO
1853          * is currently evicted. add the BO to the evicted list to make sure it
1854          * is validated on next VM use to avoid fault.
1855          * */
1856         spin_lock(&vm->status_lock);
1857         list_move_tail(&bo_va->base.vm_status, &vm->evicted);
1858         spin_unlock(&vm->status_lock);
1859
1860         return bo_va;
1861 }
1862
1863
1864 /**
1865  * amdgpu_vm_bo_insert_mapping - insert a new mapping
1866  *
1867  * @adev: amdgpu_device pointer
1868  * @bo_va: bo_va to store the address
1869  * @mapping: the mapping to insert
1870  *
1871  * Insert a new mapping into all structures.
1872  */
1873 static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev,
1874                                     struct amdgpu_bo_va *bo_va,
1875                                     struct amdgpu_bo_va_mapping *mapping)
1876 {
1877         struct amdgpu_vm *vm = bo_va->base.vm;
1878         struct amdgpu_bo *bo = bo_va->base.bo;
1879
1880         mapping->bo_va = bo_va;
1881         list_add(&mapping->list, &bo_va->invalids);
1882         amdgpu_vm_it_insert(mapping, &vm->va);
1883
1884         if (mapping->flags & AMDGPU_PTE_PRT)
1885                 amdgpu_vm_prt_get(adev);
1886
1887         if (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv) {
1888                 spin_lock(&vm->status_lock);
1889                 if (list_empty(&bo_va->base.vm_status))
1890                         list_add(&bo_va->base.vm_status, &vm->moved);
1891                 spin_unlock(&vm->status_lock);
1892         }
1893         trace_amdgpu_vm_bo_map(bo_va, mapping);
1894 }
1895
1896 /**
1897  * amdgpu_vm_bo_map - map bo inside a vm
1898  *
1899  * @adev: amdgpu_device pointer
1900  * @bo_va: bo_va to store the address
1901  * @saddr: where to map the BO
1902  * @offset: requested offset in the BO
1903  * @flags: attributes of pages (read/write/valid/etc.)
1904  *
1905  * Add a mapping of the BO at the specefied addr into the VM.
1906  * Returns 0 for success, error for failure.
1907  *
1908  * Object has to be reserved and unreserved outside!
1909  */
1910 int amdgpu_vm_bo_map(struct amdgpu_device *adev,
1911                      struct amdgpu_bo_va *bo_va,
1912                      uint64_t saddr, uint64_t offset,
1913                      uint64_t size, uint64_t flags)
1914 {
1915         struct amdgpu_bo_va_mapping *mapping, *tmp;
1916         struct amdgpu_bo *bo = bo_va->base.bo;
1917         struct amdgpu_vm *vm = bo_va->base.vm;
1918         uint64_t eaddr;
1919
1920         /* validate the parameters */
1921         if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK ||
1922             size == 0 || size & AMDGPU_GPU_PAGE_MASK)
1923                 return -EINVAL;
1924
1925         /* make sure object fit at this offset */
1926         eaddr = saddr + size - 1;
1927         if (saddr >= eaddr ||
1928             (bo && offset + size > amdgpu_bo_size(bo)))
1929                 return -EINVAL;
1930
1931         saddr /= AMDGPU_GPU_PAGE_SIZE;
1932         eaddr /= AMDGPU_GPU_PAGE_SIZE;
1933
1934         tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
1935         if (tmp) {
1936                 /* bo and tmp overlap, invalid addr */
1937                 dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with "
1938                         "0x%010Lx-0x%010Lx\n", bo, saddr, eaddr,
1939                         tmp->start, tmp->last + 1);
1940                 return -EINVAL;
1941         }
1942
1943         mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
1944         if (!mapping)
1945                 return -ENOMEM;
1946
1947         mapping->start = saddr;
1948         mapping->last = eaddr;
1949         mapping->offset = offset;
1950         mapping->flags = flags;
1951
1952         amdgpu_vm_bo_insert_map(adev, bo_va, mapping);
1953
1954         return 0;
1955 }
1956
1957 /**
1958  * amdgpu_vm_bo_replace_map - map bo inside a vm, replacing existing mappings
1959  *
1960  * @adev: amdgpu_device pointer
1961  * @bo_va: bo_va to store the address
1962  * @saddr: where to map the BO
1963  * @offset: requested offset in the BO
1964  * @flags: attributes of pages (read/write/valid/etc.)
1965  *
1966  * Add a mapping of the BO at the specefied addr into the VM. Replace existing
1967  * mappings as we do so.
1968  * Returns 0 for success, error for failure.
1969  *
1970  * Object has to be reserved and unreserved outside!
1971  */
1972 int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
1973                              struct amdgpu_bo_va *bo_va,
1974                              uint64_t saddr, uint64_t offset,
1975                              uint64_t size, uint64_t flags)
1976 {
1977         struct amdgpu_bo_va_mapping *mapping;
1978         struct amdgpu_bo *bo = bo_va->base.bo;
1979         uint64_t eaddr;
1980         int r;
1981
1982         /* validate the parameters */
1983         if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK ||
1984             size == 0 || size & AMDGPU_GPU_PAGE_MASK)
1985                 return -EINVAL;
1986
1987         /* make sure object fit at this offset */
1988         eaddr = saddr + size - 1;
1989         if (saddr >= eaddr ||
1990             (bo && offset + size > amdgpu_bo_size(bo)))
1991                 return -EINVAL;
1992
1993         /* Allocate all the needed memory */
1994         mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
1995         if (!mapping)
1996                 return -ENOMEM;
1997
1998         r = amdgpu_vm_bo_clear_mappings(adev, bo_va->base.vm, saddr, size);
1999         if (r) {
2000                 kfree(mapping);
2001                 return r;
2002         }
2003
2004         saddr /= AMDGPU_GPU_PAGE_SIZE;
2005         eaddr /= AMDGPU_GPU_PAGE_SIZE;
2006
2007         mapping->start = saddr;
2008         mapping->last = eaddr;
2009         mapping->offset = offset;
2010         mapping->flags = flags;
2011
2012         amdgpu_vm_bo_insert_map(adev, bo_va, mapping);
2013
2014         return 0;
2015 }
2016
2017 /**
2018  * amdgpu_vm_bo_unmap - remove bo mapping from vm
2019  *
2020  * @adev: amdgpu_device pointer
2021  * @bo_va: bo_va to remove the address from
2022  * @saddr: where to the BO is mapped
2023  *
2024  * Remove a mapping of the BO at the specefied addr from the VM.
2025  * Returns 0 for success, error for failure.
2026  *
2027  * Object has to be reserved and unreserved outside!
2028  */
2029 int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
2030                        struct amdgpu_bo_va *bo_va,
2031                        uint64_t saddr)
2032 {
2033         struct amdgpu_bo_va_mapping *mapping;
2034         struct amdgpu_vm *vm = bo_va->base.vm;
2035         bool valid = true;
2036
2037         saddr /= AMDGPU_GPU_PAGE_SIZE;
2038
2039         list_for_each_entry(mapping, &bo_va->valids, list) {
2040                 if (mapping->start == saddr)
2041                         break;
2042         }
2043
2044         if (&mapping->list == &bo_va->valids) {
2045                 valid = false;
2046
2047                 list_for_each_entry(mapping, &bo_va->invalids, list) {
2048                         if (mapping->start == saddr)
2049                                 break;
2050                 }
2051
2052                 if (&mapping->list == &bo_va->invalids)
2053                         return -ENOENT;
2054         }
2055
2056         list_del(&mapping->list);
2057         amdgpu_vm_it_remove(mapping, &vm->va);
2058         mapping->bo_va = NULL;
2059         trace_amdgpu_vm_bo_unmap(bo_va, mapping);
2060
2061         if (valid)
2062                 list_add(&mapping->list, &vm->freed);
2063         else
2064                 amdgpu_vm_free_mapping(adev, vm, mapping,
2065                                        bo_va->last_pt_update);
2066
2067         return 0;
2068 }
2069
2070 /**
2071  * amdgpu_vm_bo_clear_mappings - remove all mappings in a specific range
2072  *
2073  * @adev: amdgpu_device pointer
2074  * @vm: VM structure to use
2075  * @saddr: start of the range
2076  * @size: size of the range
2077  *
2078  * Remove all mappings in a range, split them as appropriate.
2079  * Returns 0 for success, error for failure.
2080  */
2081 int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
2082                                 struct amdgpu_vm *vm,
2083                                 uint64_t saddr, uint64_t size)
2084 {
2085         struct amdgpu_bo_va_mapping *before, *after, *tmp, *next;
2086         LIST_HEAD(removed);
2087         uint64_t eaddr;
2088
2089         eaddr = saddr + size - 1;
2090         saddr /= AMDGPU_GPU_PAGE_SIZE;
2091         eaddr /= AMDGPU_GPU_PAGE_SIZE;
2092
2093         /* Allocate all the needed memory */
2094         before = kzalloc(sizeof(*before), GFP_KERNEL);
2095         if (!before)
2096                 return -ENOMEM;
2097         INIT_LIST_HEAD(&before->list);
2098
2099         after = kzalloc(sizeof(*after), GFP_KERNEL);
2100         if (!after) {
2101                 kfree(before);
2102                 return -ENOMEM;
2103         }
2104         INIT_LIST_HEAD(&after->list);
2105
2106         /* Now gather all removed mappings */
2107         tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
2108         while (tmp) {
2109                 /* Remember mapping split at the start */
2110                 if (tmp->start < saddr) {
2111                         before->start = tmp->start;
2112                         before->last = saddr - 1;
2113                         before->offset = tmp->offset;
2114                         before->flags = tmp->flags;
2115                         list_add(&before->list, &tmp->list);
2116                 }
2117
2118                 /* Remember mapping split at the end */
2119                 if (tmp->last > eaddr) {
2120                         after->start = eaddr + 1;
2121                         after->last = tmp->last;
2122                         after->offset = tmp->offset;
2123                         after->offset += after->start - tmp->start;
2124                         after->flags = tmp->flags;
2125                         list_add(&after->list, &tmp->list);
2126                 }
2127
2128                 list_del(&tmp->list);
2129                 list_add(&tmp->list, &removed);
2130
2131                 tmp = amdgpu_vm_it_iter_next(tmp, saddr, eaddr);
2132         }
2133
2134         /* And free them up */
2135         list_for_each_entry_safe(tmp, next, &removed, list) {
2136                 amdgpu_vm_it_remove(tmp, &vm->va);
2137                 list_del(&tmp->list);
2138
2139                 if (tmp->start < saddr)
2140                     tmp->start = saddr;
2141                 if (tmp->last > eaddr)
2142                     tmp->last = eaddr;
2143
2144                 tmp->bo_va = NULL;
2145                 list_add(&tmp->list, &vm->freed);
2146                 trace_amdgpu_vm_bo_unmap(NULL, tmp);
2147         }
2148
2149         /* Insert partial mapping before the range */
2150         if (!list_empty(&before->list)) {
2151                 amdgpu_vm_it_insert(before, &vm->va);
2152                 if (before->flags & AMDGPU_PTE_PRT)
2153                         amdgpu_vm_prt_get(adev);
2154         } else {
2155                 kfree(before);
2156         }
2157
2158         /* Insert partial mapping after the range */
2159         if (!list_empty(&after->list)) {
2160                 amdgpu_vm_it_insert(after, &vm->va);
2161                 if (after->flags & AMDGPU_PTE_PRT)
2162                         amdgpu_vm_prt_get(adev);
2163         } else {
2164                 kfree(after);
2165         }
2166
2167         return 0;
2168 }
2169
2170 /**
2171  * amdgpu_vm_bo_lookup_mapping - find mapping by address
2172  *
2173  * @vm: the requested VM
2174  *
2175  * Find a mapping by it's address.
2176  */
2177 struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm,
2178                                                          uint64_t addr)
2179 {
2180         return amdgpu_vm_it_iter_first(&vm->va, addr, addr);
2181 }
2182
2183 /**
2184  * amdgpu_vm_bo_rmv - remove a bo to a specific vm
2185  *
2186  * @adev: amdgpu_device pointer
2187  * @bo_va: requested bo_va
2188  *
2189  * Remove @bo_va->bo from the requested vm.
2190  *
2191  * Object have to be reserved!
2192  */
2193 void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
2194                       struct amdgpu_bo_va *bo_va)
2195 {
2196         struct amdgpu_bo_va_mapping *mapping, *next;
2197         struct amdgpu_vm *vm = bo_va->base.vm;
2198
2199         list_del(&bo_va->base.bo_list);
2200
2201         spin_lock(&vm->status_lock);
2202         list_del(&bo_va->base.vm_status);
2203         spin_unlock(&vm->status_lock);
2204
2205         list_for_each_entry_safe(mapping, next, &bo_va->valids, list) {
2206                 list_del(&mapping->list);
2207                 amdgpu_vm_it_remove(mapping, &vm->va);
2208                 mapping->bo_va = NULL;
2209                 trace_amdgpu_vm_bo_unmap(bo_va, mapping);
2210                 list_add(&mapping->list, &vm->freed);
2211         }
2212         list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) {
2213                 list_del(&mapping->list);
2214                 amdgpu_vm_it_remove(mapping, &vm->va);
2215                 amdgpu_vm_free_mapping(adev, vm, mapping,
2216                                        bo_va->last_pt_update);
2217         }
2218
2219         dma_fence_put(bo_va->last_pt_update);
2220         kfree(bo_va);
2221 }
2222
2223 /**
2224  * amdgpu_vm_bo_invalidate - mark the bo as invalid
2225  *
2226  * @adev: amdgpu_device pointer
2227  * @vm: requested vm
2228  * @bo: amdgpu buffer object
2229  *
2230  * Mark @bo as invalid.
2231  */
2232 void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
2233                              struct amdgpu_bo *bo, bool evicted)
2234 {
2235         struct amdgpu_vm_bo_base *bo_base;
2236
2237         list_for_each_entry(bo_base, &bo->va, bo_list) {
2238                 struct amdgpu_vm *vm = bo_base->vm;
2239
2240                 bo_base->moved = true;
2241                 if (evicted && bo->tbo.resv == vm->root.base.bo->tbo.resv) {
2242                         spin_lock(&bo_base->vm->status_lock);
2243                         if (bo->tbo.type == ttm_bo_type_kernel)
2244                                 list_move(&bo_base->vm_status, &vm->evicted);
2245                         else
2246                                 list_move_tail(&bo_base->vm_status,
2247                                                &vm->evicted);
2248                         spin_unlock(&bo_base->vm->status_lock);
2249                         continue;
2250                 }
2251
2252                 if (bo->tbo.type == ttm_bo_type_kernel) {
2253                         spin_lock(&bo_base->vm->status_lock);
2254                         if (list_empty(&bo_base->vm_status))
2255                                 list_add(&bo_base->vm_status, &vm->relocated);
2256                         spin_unlock(&bo_base->vm->status_lock);
2257                         continue;
2258                 }
2259
2260                 spin_lock(&bo_base->vm->status_lock);
2261                 if (list_empty(&bo_base->vm_status))
2262                         list_add(&bo_base->vm_status, &vm->moved);
2263                 spin_unlock(&bo_base->vm->status_lock);
2264         }
2265 }
2266
2267 static uint32_t amdgpu_vm_get_block_size(uint64_t vm_size)
2268 {
2269         /* Total bits covered by PD + PTs */
2270         unsigned bits = ilog2(vm_size) + 18;
2271
2272         /* Make sure the PD is 4K in size up to 8GB address space.
2273            Above that split equal between PD and PTs */
2274         if (vm_size <= 8)
2275                 return (bits - 9);
2276         else
2277                 return ((bits + 3) / 2);
2278 }
2279
2280 /**
2281  * amdgpu_vm_adjust_size - adjust vm size, block size and fragment size
2282  *
2283  * @adev: amdgpu_device pointer
2284  * @vm_size: the default vm size if it's set auto
2285  */
2286 void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t vm_size,
2287                            uint32_t fragment_size_default, unsigned max_level,
2288                            unsigned max_bits)
2289 {
2290         uint64_t tmp;
2291
2292         /* adjust vm size first */
2293         if (amdgpu_vm_size != -1) {
2294                 unsigned max_size = 1 << (max_bits - 30);
2295
2296                 vm_size = amdgpu_vm_size;
2297                 if (vm_size > max_size) {
2298                         dev_warn(adev->dev, "VM size (%d) too large, max is %u GB\n",
2299                                  amdgpu_vm_size, max_size);
2300                         vm_size = max_size;
2301                 }
2302         }
2303
2304         adev->vm_manager.max_pfn = (uint64_t)vm_size << 18;
2305
2306         tmp = roundup_pow_of_two(adev->vm_manager.max_pfn);
2307         if (amdgpu_vm_block_size != -1)
2308                 tmp >>= amdgpu_vm_block_size - 9;
2309         tmp = DIV_ROUND_UP(fls64(tmp) - 1, 9) - 1;
2310         adev->vm_manager.num_level = min(max_level, (unsigned)tmp);
2311         switch (adev->vm_manager.num_level) {
2312         case 3:
2313                 adev->vm_manager.root_level = AMDGPU_VM_PDB2;
2314                 break;
2315         case 2:
2316                 adev->vm_manager.root_level = AMDGPU_VM_PDB1;
2317                 break;
2318         case 1:
2319                 adev->vm_manager.root_level = AMDGPU_VM_PDB0;
2320                 break;
2321         default:
2322                 dev_err(adev->dev, "VMPT only supports 2~4+1 levels\n");
2323         }
2324         /* block size depends on vm size and hw setup*/
2325         if (amdgpu_vm_block_size != -1)
2326                 adev->vm_manager.block_size =
2327                         min((unsigned)amdgpu_vm_block_size, max_bits
2328                             - AMDGPU_GPU_PAGE_SHIFT
2329                             - 9 * adev->vm_manager.num_level);
2330         else if (adev->vm_manager.num_level > 1)
2331                 adev->vm_manager.block_size = 9;
2332         else
2333                 adev->vm_manager.block_size = amdgpu_vm_get_block_size(tmp);
2334
2335         if (amdgpu_vm_fragment_size == -1)
2336                 adev->vm_manager.fragment_size = fragment_size_default;
2337         else
2338                 adev->vm_manager.fragment_size = amdgpu_vm_fragment_size;
2339
2340         DRM_INFO("vm size is %u GB, %u levels, block size is %u-bit, fragment size is %u-bit\n",
2341                  vm_size, adev->vm_manager.num_level + 1,
2342                  adev->vm_manager.block_size,
2343                  adev->vm_manager.fragment_size);
2344 }
2345
2346 /**
2347  * amdgpu_vm_init - initialize a vm instance
2348  *
2349  * @adev: amdgpu_device pointer
2350  * @vm: requested vm
2351  * @vm_context: Indicates if it GFX or Compute context
2352  *
2353  * Init @vm fields.
2354  */
2355 int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
2356                    int vm_context, unsigned int pasid)
2357 {
2358         const unsigned align = min(AMDGPU_VM_PTB_ALIGN_SIZE,
2359                 AMDGPU_VM_PTE_COUNT(adev) * 8);
2360         unsigned ring_instance;
2361         struct amdgpu_ring *ring;
2362         struct drm_sched_rq *rq;
2363         unsigned long size;
2364         uint64_t flags;
2365         int r, i;
2366
2367         vm->va = RB_ROOT_CACHED;
2368         for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
2369                 vm->reserved_vmid[i] = NULL;
2370         spin_lock_init(&vm->status_lock);
2371         INIT_LIST_HEAD(&vm->evicted);
2372         INIT_LIST_HEAD(&vm->relocated);
2373         INIT_LIST_HEAD(&vm->moved);
2374         INIT_LIST_HEAD(&vm->freed);
2375
2376         /* create scheduler entity for page table updates */
2377
2378         ring_instance = atomic_inc_return(&adev->vm_manager.vm_pte_next_ring);
2379         ring_instance %= adev->vm_manager.vm_pte_num_rings;
2380         ring = adev->vm_manager.vm_pte_rings[ring_instance];
2381         rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_KERNEL];
2382         r = drm_sched_entity_init(&ring->sched, &vm->entity,
2383                                   rq, amdgpu_sched_jobs, NULL);
2384         if (r)
2385                 return r;
2386
2387         vm->pte_support_ats = false;
2388
2389         if (vm_context == AMDGPU_VM_CONTEXT_COMPUTE) {
2390                 vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
2391                                                 AMDGPU_VM_USE_CPU_FOR_COMPUTE);
2392
2393                 if (adev->asic_type == CHIP_RAVEN)
2394                         vm->pte_support_ats = true;
2395         } else {
2396                 vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
2397                                                 AMDGPU_VM_USE_CPU_FOR_GFX);
2398         }
2399         DRM_DEBUG_DRIVER("VM update mode is %s\n",
2400                          vm->use_cpu_for_update ? "CPU" : "SDMA");
2401         WARN_ONCE((vm->use_cpu_for_update & !amdgpu_vm_is_large_bar(adev)),
2402                   "CPU update of VM recommended only for large BAR system\n");
2403         vm->last_update = NULL;
2404
2405         flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
2406         if (vm->use_cpu_for_update)
2407                 flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
2408         else
2409                 flags |= AMDGPU_GEM_CREATE_SHADOW;
2410
2411         size = amdgpu_vm_bo_size(adev, adev->vm_manager.root_level);
2412         r = amdgpu_bo_create(adev, size, align, AMDGPU_GEM_DOMAIN_VRAM, flags,
2413                              ttm_bo_type_kernel, NULL, &vm->root.base.bo);
2414         if (r)
2415                 goto error_free_sched_entity;
2416
2417         r = amdgpu_bo_reserve(vm->root.base.bo, true);
2418         if (r)
2419                 goto error_free_root;
2420
2421         r = amdgpu_vm_clear_bo(adev, vm, vm->root.base.bo,
2422                                adev->vm_manager.root_level,
2423                                vm->pte_support_ats);
2424         if (r)
2425                 goto error_unreserve;
2426
2427         vm->root.base.vm = vm;
2428         list_add_tail(&vm->root.base.bo_list, &vm->root.base.bo->va);
2429         list_add_tail(&vm->root.base.vm_status, &vm->evicted);
2430         amdgpu_bo_unreserve(vm->root.base.bo);
2431
2432         if (pasid) {
2433                 unsigned long flags;
2434
2435                 spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
2436                 r = idr_alloc(&adev->vm_manager.pasid_idr, vm, pasid, pasid + 1,
2437                               GFP_ATOMIC);
2438                 spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
2439                 if (r < 0)
2440                         goto error_free_root;
2441
2442                 vm->pasid = pasid;
2443         }
2444
2445         INIT_KFIFO(vm->faults);
2446         vm->fault_credit = 16;
2447
2448         return 0;
2449
2450 error_unreserve:
2451         amdgpu_bo_unreserve(vm->root.base.bo);
2452
2453 error_free_root:
2454         amdgpu_bo_unref(&vm->root.base.bo->shadow);
2455         amdgpu_bo_unref(&vm->root.base.bo);
2456         vm->root.base.bo = NULL;
2457
2458 error_free_sched_entity:
2459         drm_sched_entity_fini(&ring->sched, &vm->entity);
2460
2461         return r;
2462 }
2463
2464 /**
2465  * amdgpu_vm_make_compute - Turn a GFX VM into a compute VM
2466  *
2467  * This only works on GFX VMs that don't have any BOs added and no
2468  * page tables allocated yet.
2469  *
2470  * Changes the following VM parameters:
2471  * - use_cpu_for_update
2472  * - pte_supports_ats
2473  * - pasid (old PASID is released, because compute manages its own PASIDs)
2474  *
2475  * Reinitializes the page directory to reflect the changed ATS
2476  * setting. May leave behind an unused shadow BO for the page
2477  * directory when switching from SDMA updates to CPU updates.
2478  *
2479  * Returns 0 for success, -errno for errors.
2480  */
2481 int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm)
2482 {
2483         bool pte_support_ats = (adev->asic_type == CHIP_RAVEN);
2484         int r;
2485
2486         r = amdgpu_bo_reserve(vm->root.base.bo, true);
2487         if (r)
2488                 return r;
2489
2490         /* Sanity checks */
2491         if (!RB_EMPTY_ROOT(&vm->va.rb_root) || vm->root.entries) {
2492                 r = -EINVAL;
2493                 goto error;
2494         }
2495
2496         /* Check if PD needs to be reinitialized and do it before
2497          * changing any other state, in case it fails.
2498          */
2499         if (pte_support_ats != vm->pte_support_ats) {
2500                 r = amdgpu_vm_clear_bo(adev, vm, vm->root.base.bo,
2501                                adev->vm_manager.root_level,
2502                                pte_support_ats);
2503                 if (r)
2504                         goto error;
2505         }
2506
2507         /* Update VM state */
2508         vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
2509                                     AMDGPU_VM_USE_CPU_FOR_COMPUTE);
2510         vm->pte_support_ats = pte_support_ats;
2511         DRM_DEBUG_DRIVER("VM update mode is %s\n",
2512                          vm->use_cpu_for_update ? "CPU" : "SDMA");
2513         WARN_ONCE((vm->use_cpu_for_update & !amdgpu_vm_is_large_bar(adev)),
2514                   "CPU update of VM recommended only for large BAR system\n");
2515
2516         if (vm->pasid) {
2517                 unsigned long flags;
2518
2519                 spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
2520                 idr_remove(&adev->vm_manager.pasid_idr, vm->pasid);
2521                 spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
2522
2523                 vm->pasid = 0;
2524         }
2525
2526 error:
2527         amdgpu_bo_unreserve(vm->root.base.bo);
2528         return r;
2529 }
2530
2531 /**
2532  * amdgpu_vm_free_levels - free PD/PT levels
2533  *
2534  * @adev: amdgpu device structure
2535  * @parent: PD/PT starting level to free
2536  * @level: level of parent structure
2537  *
2538  * Free the page directory or page table level and all sub levels.
2539  */
2540 static void amdgpu_vm_free_levels(struct amdgpu_device *adev,
2541                                   struct amdgpu_vm_pt *parent,
2542                                   unsigned level)
2543 {
2544         unsigned i, num_entries = amdgpu_vm_num_entries(adev, level);
2545
2546         if (parent->base.bo) {
2547                 list_del(&parent->base.bo_list);
2548                 list_del(&parent->base.vm_status);
2549                 amdgpu_bo_unref(&parent->base.bo->shadow);
2550                 amdgpu_bo_unref(&parent->base.bo);
2551         }
2552
2553         if (parent->entries)
2554                 for (i = 0; i < num_entries; i++)
2555                         amdgpu_vm_free_levels(adev, &parent->entries[i],
2556                                               level + 1);
2557
2558         kvfree(parent->entries);
2559 }
2560
2561 /**
2562  * amdgpu_vm_fini - tear down a vm instance
2563  *
2564  * @adev: amdgpu_device pointer
2565  * @vm: requested vm
2566  *
2567  * Tear down @vm.
2568  * Unbind the VM and remove all bos from the vm bo list
2569  */
2570 void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
2571 {
2572         struct amdgpu_bo_va_mapping *mapping, *tmp;
2573         bool prt_fini_needed = !!adev->gmc.gmc_funcs->set_prt;
2574         struct amdgpu_bo *root;
2575         u64 fault;
2576         int i, r;
2577
2578         amdgpu_amdkfd_gpuvm_destroy_cb(adev, vm);
2579
2580         /* Clear pending page faults from IH when the VM is destroyed */
2581         while (kfifo_get(&vm->faults, &fault))
2582                 amdgpu_ih_clear_fault(adev, fault);
2583
2584         if (vm->pasid) {
2585                 unsigned long flags;
2586
2587                 spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
2588                 idr_remove(&adev->vm_manager.pasid_idr, vm->pasid);
2589                 spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
2590         }
2591
2592         drm_sched_entity_fini(vm->entity.sched, &vm->entity);
2593
2594         if (!RB_EMPTY_ROOT(&vm->va.rb_root)) {
2595                 dev_err(adev->dev, "still active bo inside vm\n");
2596         }
2597         rbtree_postorder_for_each_entry_safe(mapping, tmp,
2598                                              &vm->va.rb_root, rb) {
2599                 list_del(&mapping->list);
2600                 amdgpu_vm_it_remove(mapping, &vm->va);
2601                 kfree(mapping);
2602         }
2603         list_for_each_entry_safe(mapping, tmp, &vm->freed, list) {
2604                 if (mapping->flags & AMDGPU_PTE_PRT && prt_fini_needed) {
2605                         amdgpu_vm_prt_fini(adev, vm);
2606                         prt_fini_needed = false;
2607                 }
2608
2609                 list_del(&mapping->list);
2610                 amdgpu_vm_free_mapping(adev, vm, mapping, NULL);
2611         }
2612
2613         root = amdgpu_bo_ref(vm->root.base.bo);
2614         r = amdgpu_bo_reserve(root, true);
2615         if (r) {
2616                 dev_err(adev->dev, "Leaking page tables because BO reservation failed\n");
2617         } else {
2618                 amdgpu_vm_free_levels(adev, &vm->root,
2619                                       adev->vm_manager.root_level);
2620                 amdgpu_bo_unreserve(root);
2621         }
2622         amdgpu_bo_unref(&root);
2623         dma_fence_put(vm->last_update);
2624         for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
2625                 amdgpu_vmid_free_reserved(adev, vm, i);
2626 }
2627
2628 /**
2629  * amdgpu_vm_pasid_fault_credit - Check fault credit for given PASID
2630  *
2631  * @adev: amdgpu_device pointer
2632  * @pasid: PASID do identify the VM
2633  *
2634  * This function is expected to be called in interrupt context. Returns
2635  * true if there was fault credit, false otherwise
2636  */
2637 bool amdgpu_vm_pasid_fault_credit(struct amdgpu_device *adev,
2638                                   unsigned int pasid)
2639 {
2640         struct amdgpu_vm *vm;
2641
2642         spin_lock(&adev->vm_manager.pasid_lock);
2643         vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
2644         if (!vm) {
2645                 /* VM not found, can't track fault credit */
2646                 spin_unlock(&adev->vm_manager.pasid_lock);
2647                 return true;
2648         }
2649
2650         /* No lock needed. only accessed by IRQ handler */
2651         if (!vm->fault_credit) {
2652                 /* Too many faults in this VM */
2653                 spin_unlock(&adev->vm_manager.pasid_lock);
2654                 return false;
2655         }
2656
2657         vm->fault_credit--;
2658         spin_unlock(&adev->vm_manager.pasid_lock);
2659         return true;
2660 }
2661
2662 /**
2663  * amdgpu_vm_manager_init - init the VM manager
2664  *
2665  * @adev: amdgpu_device pointer
2666  *
2667  * Initialize the VM manager structures
2668  */
2669 void amdgpu_vm_manager_init(struct amdgpu_device *adev)
2670 {
2671         unsigned i;
2672
2673         amdgpu_vmid_mgr_init(adev);
2674
2675         adev->vm_manager.fence_context =
2676                 dma_fence_context_alloc(AMDGPU_MAX_RINGS);
2677         for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
2678                 adev->vm_manager.seqno[i] = 0;
2679
2680         atomic_set(&adev->vm_manager.vm_pte_next_ring, 0);
2681         spin_lock_init(&adev->vm_manager.prt_lock);
2682         atomic_set(&adev->vm_manager.num_prt_users, 0);
2683
2684         /* If not overridden by the user, by default, only in large BAR systems
2685          * Compute VM tables will be updated by CPU
2686          */
2687 #ifdef CONFIG_X86_64
2688         if (amdgpu_vm_update_mode == -1) {
2689                 if (amdgpu_vm_is_large_bar(adev))
2690                         adev->vm_manager.vm_update_mode =
2691                                 AMDGPU_VM_USE_CPU_FOR_COMPUTE;
2692                 else
2693                         adev->vm_manager.vm_update_mode = 0;
2694         } else
2695                 adev->vm_manager.vm_update_mode = amdgpu_vm_update_mode;
2696 #else
2697         adev->vm_manager.vm_update_mode = 0;
2698 #endif
2699
2700         idr_init(&adev->vm_manager.pasid_idr);
2701         spin_lock_init(&adev->vm_manager.pasid_lock);
2702 }
2703
2704 /**
2705  * amdgpu_vm_manager_fini - cleanup VM manager
2706  *
2707  * @adev: amdgpu_device pointer
2708  *
2709  * Cleanup the VM manager and free resources.
2710  */
2711 void amdgpu_vm_manager_fini(struct amdgpu_device *adev)
2712 {
2713         WARN_ON(!idr_is_empty(&adev->vm_manager.pasid_idr));
2714         idr_destroy(&adev->vm_manager.pasid_idr);
2715
2716         amdgpu_vmid_mgr_fini(adev);
2717 }
2718
2719 int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
2720 {
2721         union drm_amdgpu_vm *args = data;
2722         struct amdgpu_device *adev = dev->dev_private;
2723         struct amdgpu_fpriv *fpriv = filp->driver_priv;
2724         int r;
2725
2726         switch (args->in.op) {
2727         case AMDGPU_VM_OP_RESERVE_VMID:
2728                 /* current, we only have requirement to reserve vmid from gfxhub */
2729                 r = amdgpu_vmid_alloc_reserved(adev, &fpriv->vm, AMDGPU_GFXHUB);
2730                 if (r)
2731                         return r;
2732                 break;
2733         case AMDGPU_VM_OP_UNRESERVE_VMID:
2734                 amdgpu_vmid_free_reserved(adev, &fpriv->vm, AMDGPU_GFXHUB);
2735                 break;
2736         default:
2737                 return -EINVAL;
2738         }
2739
2740         return 0;
2741 }