drm/amdgpu: cleanup kptr handling
[linux-2.6-microblaze.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_vm.c
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 #include <linux/dma-fence-array.h>
29 #include <linux/interval_tree_generic.h>
30 #include <drm/drmP.h>
31 #include <drm/amdgpu_drm.h>
32 #include "amdgpu.h"
33 #include "amdgpu_trace.h"
34
35 /*
36  * GPUVM
37  * GPUVM is similar to the legacy gart on older asics, however
38  * rather than there being a single global gart table
39  * for the entire GPU, there are multiple VM page tables active
40  * at any given time.  The VM page tables can contain a mix
41  * vram pages and system memory pages and system memory pages
42  * can be mapped as snooped (cached system pages) or unsnooped
43  * (uncached system pages).
44  * Each VM has an ID associated with it and there is a page table
45  * associated with each VMID.  When execting a command buffer,
46  * the kernel tells the the ring what VMID to use for that command
47  * buffer.  VMIDs are allocated dynamically as commands are submitted.
48  * The userspace drivers maintain their own address space and the kernel
49  * sets up their pages tables accordingly when they submit their
50  * command buffers and a VMID is assigned.
51  * Cayman/Trinity support up to 8 active VMs at any given time;
52  * SI supports 16.
53  */
54
55 #define START(node) ((node)->start)
56 #define LAST(node) ((node)->last)
57
58 INTERVAL_TREE_DEFINE(struct amdgpu_bo_va_mapping, rb, uint64_t, __subtree_last,
59                      START, LAST, static, amdgpu_vm_it)
60
61 #undef START
62 #undef LAST
63
64 /* Local structure. Encapsulate some VM table update parameters to reduce
65  * the number of function parameters
66  */
67 struct amdgpu_pte_update_params {
68         /* amdgpu device we do this update for */
69         struct amdgpu_device *adev;
70         /* optional amdgpu_vm we do this update for */
71         struct amdgpu_vm *vm;
72         /* address where to copy page table entries from */
73         uint64_t src;
74         /* indirect buffer to fill with commands */
75         struct amdgpu_ib *ib;
76         /* Function which actually does the update */
77         void (*func)(struct amdgpu_pte_update_params *params, uint64_t pe,
78                      uint64_t addr, unsigned count, uint32_t incr,
79                      uint64_t flags);
80         /* The next two are used during VM update by CPU
81          *  DMA addresses to use for mapping
82          *  Kernel pointer of PD/PT BO that needs to be updated
83          */
84         dma_addr_t *pages_addr;
85         void *kptr;
86 };
87
88 /* Helper to disable partial resident texture feature from a fence callback */
89 struct amdgpu_prt_cb {
90         struct amdgpu_device *adev;
91         struct dma_fence_cb cb;
92 };
93
94 /**
95  * amdgpu_vm_num_entries - return the number of entries in a PD/PT
96  *
97  * @adev: amdgpu_device pointer
98  *
99  * Calculate the number of entries in a page directory or page table.
100  */
101 static unsigned amdgpu_vm_num_entries(struct amdgpu_device *adev,
102                                       unsigned level)
103 {
104         if (level == 0)
105                 /* For the root directory */
106                 return adev->vm_manager.max_pfn >>
107                         (adev->vm_manager.block_size *
108                          adev->vm_manager.num_level);
109         else if (level == adev->vm_manager.num_level)
110                 /* For the page tables on the leaves */
111                 return AMDGPU_VM_PTE_COUNT(adev);
112         else
113                 /* Everything in between */
114                 return 1 << adev->vm_manager.block_size;
115 }
116
117 /**
118  * amdgpu_vm_bo_size - returns the size of the BOs in bytes
119  *
120  * @adev: amdgpu_device pointer
121  *
122  * Calculate the size of the BO for a page directory or page table in bytes.
123  */
124 static unsigned amdgpu_vm_bo_size(struct amdgpu_device *adev, unsigned level)
125 {
126         return AMDGPU_GPU_PAGE_ALIGN(amdgpu_vm_num_entries(adev, level) * 8);
127 }
128
129 /**
130  * amdgpu_vm_get_pd_bo - add the VM PD to a validation list
131  *
132  * @vm: vm providing the BOs
133  * @validated: head of validation list
134  * @entry: entry to add
135  *
136  * Add the page directory to the list of BOs to
137  * validate for command submission.
138  */
139 void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
140                          struct list_head *validated,
141                          struct amdgpu_bo_list_entry *entry)
142 {
143         entry->robj = vm->root.bo;
144         entry->priority = 0;
145         entry->tv.bo = &entry->robj->tbo;
146         entry->tv.shared = true;
147         entry->user_pages = NULL;
148         list_add(&entry->tv.head, validated);
149 }
150
151 /**
152  * amdgpu_vm_validate_layer - validate a single page table level
153  *
154  * @parent: parent page table level
155  * @validate: callback to do the validation
156  * @param: parameter for the validation callback
157  *
158  * Validate the page table BOs on command submission if neccessary.
159  */
160 static int amdgpu_vm_validate_level(struct amdgpu_vm_pt *parent,
161                                     int (*validate)(void *, struct amdgpu_bo *),
162                                     void *param, bool use_cpu_for_update)
163 {
164         unsigned i;
165         int r;
166
167         if (use_cpu_for_update) {
168                 r = amdgpu_bo_kmap(parent->bo, NULL);
169                 if (r)
170                         return r;
171         }
172
173         if (!parent->entries)
174                 return 0;
175
176         for (i = 0; i <= parent->last_entry_used; ++i) {
177                 struct amdgpu_vm_pt *entry = &parent->entries[i];
178
179                 if (!entry->bo)
180                         continue;
181
182                 r = validate(param, entry->bo);
183                 if (r)
184                         return r;
185
186                 /*
187                  * Recurse into the sub directory. This is harmless because we
188                  * have only a maximum of 5 layers.
189                  */
190                 r = amdgpu_vm_validate_level(entry, validate, param,
191                                              use_cpu_for_update);
192                 if (r)
193                         return r;
194         }
195
196         return r;
197 }
198
199 /**
200  * amdgpu_vm_validate_pt_bos - validate the page table BOs
201  *
202  * @adev: amdgpu device pointer
203  * @vm: vm providing the BOs
204  * @validate: callback to do the validation
205  * @param: parameter for the validation callback
206  *
207  * Validate the page table BOs on command submission if neccessary.
208  */
209 int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
210                               int (*validate)(void *p, struct amdgpu_bo *bo),
211                               void *param)
212 {
213         uint64_t num_evictions;
214
215         /* We only need to validate the page tables
216          * if they aren't already valid.
217          */
218         num_evictions = atomic64_read(&adev->num_evictions);
219         if (num_evictions == vm->last_eviction_counter)
220                 return 0;
221
222         return amdgpu_vm_validate_level(&vm->root, validate, param,
223                                         vm->use_cpu_for_update);
224 }
225
226 /**
227  * amdgpu_vm_move_level_in_lru - move one level of PT BOs to the LRU tail
228  *
229  * @adev: amdgpu device instance
230  * @vm: vm providing the BOs
231  *
232  * Move the PT BOs to the tail of the LRU.
233  */
234 static void amdgpu_vm_move_level_in_lru(struct amdgpu_vm_pt *parent)
235 {
236         unsigned i;
237
238         if (!parent->entries)
239                 return;
240
241         for (i = 0; i <= parent->last_entry_used; ++i) {
242                 struct amdgpu_vm_pt *entry = &parent->entries[i];
243
244                 if (!entry->bo)
245                         continue;
246
247                 ttm_bo_move_to_lru_tail(&entry->bo->tbo);
248                 amdgpu_vm_move_level_in_lru(entry);
249         }
250 }
251
252 /**
253  * amdgpu_vm_move_pt_bos_in_lru - move the PT BOs to the LRU tail
254  *
255  * @adev: amdgpu device instance
256  * @vm: vm providing the BOs
257  *
258  * Move the PT BOs to the tail of the LRU.
259  */
260 void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev,
261                                   struct amdgpu_vm *vm)
262 {
263         struct ttm_bo_global *glob = adev->mman.bdev.glob;
264
265         spin_lock(&glob->lru_lock);
266         amdgpu_vm_move_level_in_lru(&vm->root);
267         spin_unlock(&glob->lru_lock);
268 }
269
270  /**
271  * amdgpu_vm_alloc_levels - allocate the PD/PT levels
272  *
273  * @adev: amdgpu_device pointer
274  * @vm: requested vm
275  * @saddr: start of the address range
276  * @eaddr: end of the address range
277  *
278  * Make sure the page directories and page tables are allocated
279  */
280 static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
281                                   struct amdgpu_vm *vm,
282                                   struct amdgpu_vm_pt *parent,
283                                   uint64_t saddr, uint64_t eaddr,
284                                   unsigned level)
285 {
286         unsigned shift = (adev->vm_manager.num_level - level) *
287                 adev->vm_manager.block_size;
288         unsigned pt_idx, from, to;
289         int r;
290         u64 flags;
291
292         if (!parent->entries) {
293                 unsigned num_entries = amdgpu_vm_num_entries(adev, level);
294
295                 parent->entries = kvmalloc_array(num_entries,
296                                                    sizeof(struct amdgpu_vm_pt),
297                                                    GFP_KERNEL | __GFP_ZERO);
298                 if (!parent->entries)
299                         return -ENOMEM;
300                 memset(parent->entries, 0 , sizeof(struct amdgpu_vm_pt));
301         }
302
303         from = saddr >> shift;
304         to = eaddr >> shift;
305         if (from >= amdgpu_vm_num_entries(adev, level) ||
306             to >= amdgpu_vm_num_entries(adev, level))
307                 return -EINVAL;
308
309         if (to > parent->last_entry_used)
310                 parent->last_entry_used = to;
311
312         ++level;
313         saddr = saddr & ((1 << shift) - 1);
314         eaddr = eaddr & ((1 << shift) - 1);
315
316         flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
317                         AMDGPU_GEM_CREATE_VRAM_CLEARED;
318         if (vm->use_cpu_for_update)
319                 flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
320         else
321                 flags |= (AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
322                                 AMDGPU_GEM_CREATE_SHADOW);
323
324         /* walk over the address space and allocate the page tables */
325         for (pt_idx = from; pt_idx <= to; ++pt_idx) {
326                 struct reservation_object *resv = vm->root.bo->tbo.resv;
327                 struct amdgpu_vm_pt *entry = &parent->entries[pt_idx];
328                 struct amdgpu_bo *pt;
329
330                 if (!entry->bo) {
331                         r = amdgpu_bo_create(adev,
332                                              amdgpu_vm_bo_size(adev, level),
333                                              AMDGPU_GPU_PAGE_SIZE, true,
334                                              AMDGPU_GEM_DOMAIN_VRAM,
335                                              flags,
336                                              NULL, resv, &pt);
337                         if (r)
338                                 return r;
339
340                         if (vm->use_cpu_for_update) {
341                                 r = amdgpu_bo_kmap(pt, NULL);
342                                 if (r) {
343                                         amdgpu_bo_unref(&pt);
344                                         return r;
345                                 }
346                         }
347
348                         /* Keep a reference to the root directory to avoid
349                         * freeing them up in the wrong order.
350                         */
351                         pt->parent = amdgpu_bo_ref(vm->root.bo);
352
353                         entry->bo = pt;
354                         entry->addr = 0;
355                         entry->huge_page = false;
356                 }
357
358                 if (level < adev->vm_manager.num_level) {
359                         uint64_t sub_saddr = (pt_idx == from) ? saddr : 0;
360                         uint64_t sub_eaddr = (pt_idx == to) ? eaddr :
361                                 ((1 << shift) - 1);
362                         r = amdgpu_vm_alloc_levels(adev, vm, entry, sub_saddr,
363                                                    sub_eaddr, level);
364                         if (r)
365                                 return r;
366                 }
367         }
368
369         return 0;
370 }
371
372 /**
373  * amdgpu_vm_alloc_pts - Allocate page tables.
374  *
375  * @adev: amdgpu_device pointer
376  * @vm: VM to allocate page tables for
377  * @saddr: Start address which needs to be allocated
378  * @size: Size from start address we need.
379  *
380  * Make sure the page tables are allocated.
381  */
382 int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
383                         struct amdgpu_vm *vm,
384                         uint64_t saddr, uint64_t size)
385 {
386         uint64_t last_pfn;
387         uint64_t eaddr;
388
389         /* validate the parameters */
390         if (saddr & AMDGPU_GPU_PAGE_MASK || size & AMDGPU_GPU_PAGE_MASK)
391                 return -EINVAL;
392
393         eaddr = saddr + size - 1;
394         last_pfn = eaddr / AMDGPU_GPU_PAGE_SIZE;
395         if (last_pfn >= adev->vm_manager.max_pfn) {
396                 dev_err(adev->dev, "va above limit (0x%08llX >= 0x%08llX)\n",
397                         last_pfn, adev->vm_manager.max_pfn);
398                 return -EINVAL;
399         }
400
401         saddr /= AMDGPU_GPU_PAGE_SIZE;
402         eaddr /= AMDGPU_GPU_PAGE_SIZE;
403
404         return amdgpu_vm_alloc_levels(adev, vm, &vm->root, saddr, eaddr, 0);
405 }
406
407 /**
408  * amdgpu_vm_had_gpu_reset - check if reset occured since last use
409  *
410  * @adev: amdgpu_device pointer
411  * @id: VMID structure
412  *
413  * Check if GPU reset occured since last use of the VMID.
414  */
415 static bool amdgpu_vm_had_gpu_reset(struct amdgpu_device *adev,
416                                     struct amdgpu_vm_id *id)
417 {
418         return id->current_gpu_reset_count !=
419                 atomic_read(&adev->gpu_reset_counter);
420 }
421
422 static bool amdgpu_vm_reserved_vmid_ready(struct amdgpu_vm *vm, unsigned vmhub)
423 {
424         return !!vm->reserved_vmid[vmhub];
425 }
426
427 /* idr_mgr->lock must be held */
428 static int amdgpu_vm_grab_reserved_vmid_locked(struct amdgpu_vm *vm,
429                                                struct amdgpu_ring *ring,
430                                                struct amdgpu_sync *sync,
431                                                struct dma_fence *fence,
432                                                struct amdgpu_job *job)
433 {
434         struct amdgpu_device *adev = ring->adev;
435         unsigned vmhub = ring->funcs->vmhub;
436         uint64_t fence_context = adev->fence_context + ring->idx;
437         struct amdgpu_vm_id *id = vm->reserved_vmid[vmhub];
438         struct amdgpu_vm_id_manager *id_mgr = &adev->vm_manager.id_mgr[vmhub];
439         struct dma_fence *updates = sync->last_vm_update;
440         int r = 0;
441         struct dma_fence *flushed, *tmp;
442         bool needs_flush = vm->use_cpu_for_update;
443
444         flushed  = id->flushed_updates;
445         if ((amdgpu_vm_had_gpu_reset(adev, id)) ||
446             (atomic64_read(&id->owner) != vm->client_id) ||
447             (job->vm_pd_addr != id->pd_gpu_addr) ||
448             (updates && (!flushed || updates->context != flushed->context ||
449                         dma_fence_is_later(updates, flushed))) ||
450             (!id->last_flush || (id->last_flush->context != fence_context &&
451                                  !dma_fence_is_signaled(id->last_flush)))) {
452                 needs_flush = true;
453                 /* to prevent one context starved by another context */
454                 id->pd_gpu_addr = 0;
455                 tmp = amdgpu_sync_peek_fence(&id->active, ring);
456                 if (tmp) {
457                         r = amdgpu_sync_fence(adev, sync, tmp);
458                         return r;
459                 }
460         }
461
462         /* Good we can use this VMID. Remember this submission as
463         * user of the VMID.
464         */
465         r = amdgpu_sync_fence(ring->adev, &id->active, fence);
466         if (r)
467                 goto out;
468
469         if (updates && (!flushed || updates->context != flushed->context ||
470                         dma_fence_is_later(updates, flushed))) {
471                 dma_fence_put(id->flushed_updates);
472                 id->flushed_updates = dma_fence_get(updates);
473         }
474         id->pd_gpu_addr = job->vm_pd_addr;
475         atomic64_set(&id->owner, vm->client_id);
476         job->vm_needs_flush = needs_flush;
477         if (needs_flush) {
478                 dma_fence_put(id->last_flush);
479                 id->last_flush = NULL;
480         }
481         job->vm_id = id - id_mgr->ids;
482         trace_amdgpu_vm_grab_id(vm, ring, job);
483 out:
484         return r;
485 }
486
487 /**
488  * amdgpu_vm_grab_id - allocate the next free VMID
489  *
490  * @vm: vm to allocate id for
491  * @ring: ring we want to submit job to
492  * @sync: sync object where we add dependencies
493  * @fence: fence protecting ID from reuse
494  *
495  * Allocate an id for the vm, adding fences to the sync obj as necessary.
496  */
497 int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
498                       struct amdgpu_sync *sync, struct dma_fence *fence,
499                       struct amdgpu_job *job)
500 {
501         struct amdgpu_device *adev = ring->adev;
502         unsigned vmhub = ring->funcs->vmhub;
503         struct amdgpu_vm_id_manager *id_mgr = &adev->vm_manager.id_mgr[vmhub];
504         uint64_t fence_context = adev->fence_context + ring->idx;
505         struct dma_fence *updates = sync->last_vm_update;
506         struct amdgpu_vm_id *id, *idle;
507         struct dma_fence **fences;
508         unsigned i;
509         int r = 0;
510
511         mutex_lock(&id_mgr->lock);
512         if (amdgpu_vm_reserved_vmid_ready(vm, vmhub)) {
513                 r = amdgpu_vm_grab_reserved_vmid_locked(vm, ring, sync, fence, job);
514                 mutex_unlock(&id_mgr->lock);
515                 return r;
516         }
517         fences = kmalloc_array(sizeof(void *), id_mgr->num_ids, GFP_KERNEL);
518         if (!fences) {
519                 mutex_unlock(&id_mgr->lock);
520                 return -ENOMEM;
521         }
522         /* Check if we have an idle VMID */
523         i = 0;
524         list_for_each_entry(idle, &id_mgr->ids_lru, list) {
525                 fences[i] = amdgpu_sync_peek_fence(&idle->active, ring);
526                 if (!fences[i])
527                         break;
528                 ++i;
529         }
530
531         /* If we can't find a idle VMID to use, wait till one becomes available */
532         if (&idle->list == &id_mgr->ids_lru) {
533                 u64 fence_context = adev->vm_manager.fence_context + ring->idx;
534                 unsigned seqno = ++adev->vm_manager.seqno[ring->idx];
535                 struct dma_fence_array *array;
536                 unsigned j;
537
538                 for (j = 0; j < i; ++j)
539                         dma_fence_get(fences[j]);
540
541                 array = dma_fence_array_create(i, fences, fence_context,
542                                            seqno, true);
543                 if (!array) {
544                         for (j = 0; j < i; ++j)
545                                 dma_fence_put(fences[j]);
546                         kfree(fences);
547                         r = -ENOMEM;
548                         goto error;
549                 }
550
551
552                 r = amdgpu_sync_fence(ring->adev, sync, &array->base);
553                 dma_fence_put(&array->base);
554                 if (r)
555                         goto error;
556
557                 mutex_unlock(&id_mgr->lock);
558                 return 0;
559
560         }
561         kfree(fences);
562
563         job->vm_needs_flush = vm->use_cpu_for_update;
564         /* Check if we can use a VMID already assigned to this VM */
565         list_for_each_entry_reverse(id, &id_mgr->ids_lru, list) {
566                 struct dma_fence *flushed;
567                 bool needs_flush = vm->use_cpu_for_update;
568
569                 /* Check all the prerequisites to using this VMID */
570                 if (amdgpu_vm_had_gpu_reset(adev, id))
571                         continue;
572
573                 if (atomic64_read(&id->owner) != vm->client_id)
574                         continue;
575
576                 if (job->vm_pd_addr != id->pd_gpu_addr)
577                         continue;
578
579                 if (!id->last_flush ||
580                     (id->last_flush->context != fence_context &&
581                      !dma_fence_is_signaled(id->last_flush)))
582                         needs_flush = true;
583
584                 flushed  = id->flushed_updates;
585                 if (updates && (!flushed || dma_fence_is_later(updates, flushed)))
586                         needs_flush = true;
587
588                 /* Concurrent flushes are only possible starting with Vega10 */
589                 if (adev->asic_type < CHIP_VEGA10 && needs_flush)
590                         continue;
591
592                 /* Good we can use this VMID. Remember this submission as
593                  * user of the VMID.
594                  */
595                 r = amdgpu_sync_fence(ring->adev, &id->active, fence);
596                 if (r)
597                         goto error;
598
599                 if (updates && (!flushed || dma_fence_is_later(updates, flushed))) {
600                         dma_fence_put(id->flushed_updates);
601                         id->flushed_updates = dma_fence_get(updates);
602                 }
603
604                 if (needs_flush)
605                         goto needs_flush;
606                 else
607                         goto no_flush_needed;
608
609         };
610
611         /* Still no ID to use? Then use the idle one found earlier */
612         id = idle;
613
614         /* Remember this submission as user of the VMID */
615         r = amdgpu_sync_fence(ring->adev, &id->active, fence);
616         if (r)
617                 goto error;
618
619         id->pd_gpu_addr = job->vm_pd_addr;
620         dma_fence_put(id->flushed_updates);
621         id->flushed_updates = dma_fence_get(updates);
622         atomic64_set(&id->owner, vm->client_id);
623
624 needs_flush:
625         job->vm_needs_flush = true;
626         dma_fence_put(id->last_flush);
627         id->last_flush = NULL;
628
629 no_flush_needed:
630         list_move_tail(&id->list, &id_mgr->ids_lru);
631
632         job->vm_id = id - id_mgr->ids;
633         trace_amdgpu_vm_grab_id(vm, ring, job);
634
635 error:
636         mutex_unlock(&id_mgr->lock);
637         return r;
638 }
639
640 static void amdgpu_vm_free_reserved_vmid(struct amdgpu_device *adev,
641                                           struct amdgpu_vm *vm,
642                                           unsigned vmhub)
643 {
644         struct amdgpu_vm_id_manager *id_mgr = &adev->vm_manager.id_mgr[vmhub];
645
646         mutex_lock(&id_mgr->lock);
647         if (vm->reserved_vmid[vmhub]) {
648                 list_add(&vm->reserved_vmid[vmhub]->list,
649                         &id_mgr->ids_lru);
650                 vm->reserved_vmid[vmhub] = NULL;
651                 atomic_dec(&id_mgr->reserved_vmid_num);
652         }
653         mutex_unlock(&id_mgr->lock);
654 }
655
656 static int amdgpu_vm_alloc_reserved_vmid(struct amdgpu_device *adev,
657                                          struct amdgpu_vm *vm,
658                                          unsigned vmhub)
659 {
660         struct amdgpu_vm_id_manager *id_mgr;
661         struct amdgpu_vm_id *idle;
662         int r = 0;
663
664         id_mgr = &adev->vm_manager.id_mgr[vmhub];
665         mutex_lock(&id_mgr->lock);
666         if (vm->reserved_vmid[vmhub])
667                 goto unlock;
668         if (atomic_inc_return(&id_mgr->reserved_vmid_num) >
669             AMDGPU_VM_MAX_RESERVED_VMID) {
670                 DRM_ERROR("Over limitation of reserved vmid\n");
671                 atomic_dec(&id_mgr->reserved_vmid_num);
672                 r = -EINVAL;
673                 goto unlock;
674         }
675         /* Select the first entry VMID */
676         idle = list_first_entry(&id_mgr->ids_lru, struct amdgpu_vm_id, list);
677         list_del_init(&idle->list);
678         vm->reserved_vmid[vmhub] = idle;
679         mutex_unlock(&id_mgr->lock);
680
681         return 0;
682 unlock:
683         mutex_unlock(&id_mgr->lock);
684         return r;
685 }
686
687 /**
688  * amdgpu_vm_check_compute_bug - check whether asic has compute vm bug
689  *
690  * @adev: amdgpu_device pointer
691  */
692 void amdgpu_vm_check_compute_bug(struct amdgpu_device *adev)
693 {
694         const struct amdgpu_ip_block *ip_block;
695         bool has_compute_vm_bug;
696         struct amdgpu_ring *ring;
697         int i;
698
699         has_compute_vm_bug = false;
700
701         ip_block = amdgpu_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX);
702         if (ip_block) {
703                 /* Compute has a VM bug for GFX version < 7.
704                    Compute has a VM bug for GFX 8 MEC firmware version < 673.*/
705                 if (ip_block->version->major <= 7)
706                         has_compute_vm_bug = true;
707                 else if (ip_block->version->major == 8)
708                         if (adev->gfx.mec_fw_version < 673)
709                                 has_compute_vm_bug = true;
710         }
711
712         for (i = 0; i < adev->num_rings; i++) {
713                 ring = adev->rings[i];
714                 if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE)
715                         /* only compute rings */
716                         ring->has_compute_vm_bug = has_compute_vm_bug;
717                 else
718                         ring->has_compute_vm_bug = false;
719         }
720 }
721
722 bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
723                                   struct amdgpu_job *job)
724 {
725         struct amdgpu_device *adev = ring->adev;
726         unsigned vmhub = ring->funcs->vmhub;
727         struct amdgpu_vm_id_manager *id_mgr = &adev->vm_manager.id_mgr[vmhub];
728         struct amdgpu_vm_id *id;
729         bool gds_switch_needed;
730         bool vm_flush_needed = job->vm_needs_flush || ring->has_compute_vm_bug;
731
732         if (job->vm_id == 0)
733                 return false;
734         id = &id_mgr->ids[job->vm_id];
735         gds_switch_needed = ring->funcs->emit_gds_switch && (
736                 id->gds_base != job->gds_base ||
737                 id->gds_size != job->gds_size ||
738                 id->gws_base != job->gws_base ||
739                 id->gws_size != job->gws_size ||
740                 id->oa_base != job->oa_base ||
741                 id->oa_size != job->oa_size);
742
743         if (amdgpu_vm_had_gpu_reset(adev, id))
744                 return true;
745
746         return vm_flush_needed || gds_switch_needed;
747 }
748
749 static bool amdgpu_vm_is_large_bar(struct amdgpu_device *adev)
750 {
751         return (adev->mc.real_vram_size == adev->mc.visible_vram_size);
752 }
753
754 /**
755  * amdgpu_vm_flush - hardware flush the vm
756  *
757  * @ring: ring to use for flush
758  * @vm_id: vmid number to use
759  * @pd_addr: address of the page directory
760  *
761  * Emit a VM flush when it is necessary.
762  */
763 int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_pipe_sync)
764 {
765         struct amdgpu_device *adev = ring->adev;
766         unsigned vmhub = ring->funcs->vmhub;
767         struct amdgpu_vm_id_manager *id_mgr = &adev->vm_manager.id_mgr[vmhub];
768         struct amdgpu_vm_id *id = &id_mgr->ids[job->vm_id];
769         bool gds_switch_needed = ring->funcs->emit_gds_switch && (
770                 id->gds_base != job->gds_base ||
771                 id->gds_size != job->gds_size ||
772                 id->gws_base != job->gws_base ||
773                 id->gws_size != job->gws_size ||
774                 id->oa_base != job->oa_base ||
775                 id->oa_size != job->oa_size);
776         bool vm_flush_needed = job->vm_needs_flush;
777         unsigned patch_offset = 0;
778         int r;
779
780         if (amdgpu_vm_had_gpu_reset(adev, id)) {
781                 gds_switch_needed = true;
782                 vm_flush_needed = true;
783         }
784
785         if (!vm_flush_needed && !gds_switch_needed && !need_pipe_sync)
786                 return 0;
787
788         if (ring->funcs->init_cond_exec)
789                 patch_offset = amdgpu_ring_init_cond_exec(ring);
790
791         if (need_pipe_sync)
792                 amdgpu_ring_emit_pipeline_sync(ring);
793
794         if (ring->funcs->emit_vm_flush && vm_flush_needed) {
795                 struct dma_fence *fence;
796
797                 trace_amdgpu_vm_flush(ring, job->vm_id, job->vm_pd_addr);
798                 amdgpu_ring_emit_vm_flush(ring, job->vm_id, job->vm_pd_addr);
799
800                 r = amdgpu_fence_emit(ring, &fence);
801                 if (r)
802                         return r;
803
804                 mutex_lock(&id_mgr->lock);
805                 dma_fence_put(id->last_flush);
806                 id->last_flush = fence;
807                 id->current_gpu_reset_count = atomic_read(&adev->gpu_reset_counter);
808                 mutex_unlock(&id_mgr->lock);
809         }
810
811         if (ring->funcs->emit_gds_switch && gds_switch_needed) {
812                 id->gds_base = job->gds_base;
813                 id->gds_size = job->gds_size;
814                 id->gws_base = job->gws_base;
815                 id->gws_size = job->gws_size;
816                 id->oa_base = job->oa_base;
817                 id->oa_size = job->oa_size;
818                 amdgpu_ring_emit_gds_switch(ring, job->vm_id, job->gds_base,
819                                             job->gds_size, job->gws_base,
820                                             job->gws_size, job->oa_base,
821                                             job->oa_size);
822         }
823
824         if (ring->funcs->patch_cond_exec)
825                 amdgpu_ring_patch_cond_exec(ring, patch_offset);
826
827         /* the double SWITCH_BUFFER here *cannot* be skipped by COND_EXEC */
828         if (ring->funcs->emit_switch_buffer) {
829                 amdgpu_ring_emit_switch_buffer(ring);
830                 amdgpu_ring_emit_switch_buffer(ring);
831         }
832         return 0;
833 }
834
835 /**
836  * amdgpu_vm_reset_id - reset VMID to zero
837  *
838  * @adev: amdgpu device structure
839  * @vm_id: vmid number to use
840  *
841  * Reset saved GDW, GWS and OA to force switch on next flush.
842  */
843 void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vmhub,
844                         unsigned vmid)
845 {
846         struct amdgpu_vm_id_manager *id_mgr = &adev->vm_manager.id_mgr[vmhub];
847         struct amdgpu_vm_id *id = &id_mgr->ids[vmid];
848
849         atomic64_set(&id->owner, 0);
850         id->gds_base = 0;
851         id->gds_size = 0;
852         id->gws_base = 0;
853         id->gws_size = 0;
854         id->oa_base = 0;
855         id->oa_size = 0;
856 }
857
858 /**
859  * amdgpu_vm_reset_all_id - reset VMID to zero
860  *
861  * @adev: amdgpu device structure
862  *
863  * Reset VMID to force flush on next use
864  */
865 void amdgpu_vm_reset_all_ids(struct amdgpu_device *adev)
866 {
867         unsigned i, j;
868
869         for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
870                 struct amdgpu_vm_id_manager *id_mgr =
871                         &adev->vm_manager.id_mgr[i];
872
873                 for (j = 1; j < id_mgr->num_ids; ++j)
874                         amdgpu_vm_reset_id(adev, i, j);
875         }
876 }
877
878 /**
879  * amdgpu_vm_bo_find - find the bo_va for a specific vm & bo
880  *
881  * @vm: requested vm
882  * @bo: requested buffer object
883  *
884  * Find @bo inside the requested vm.
885  * Search inside the @bos vm list for the requested vm
886  * Returns the found bo_va or NULL if none is found
887  *
888  * Object has to be reserved!
889  */
890 struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
891                                        struct amdgpu_bo *bo)
892 {
893         struct amdgpu_bo_va *bo_va;
894
895         list_for_each_entry(bo_va, &bo->va, bo_list) {
896                 if (bo_va->vm == vm) {
897                         return bo_va;
898                 }
899         }
900         return NULL;
901 }
902
903 /**
904  * amdgpu_vm_do_set_ptes - helper to call the right asic function
905  *
906  * @params: see amdgpu_pte_update_params definition
907  * @pe: addr of the page entry
908  * @addr: dst addr to write into pe
909  * @count: number of page entries to update
910  * @incr: increase next addr by incr bytes
911  * @flags: hw access flags
912  *
913  * Traces the parameters and calls the right asic functions
914  * to setup the page table using the DMA.
915  */
916 static void amdgpu_vm_do_set_ptes(struct amdgpu_pte_update_params *params,
917                                   uint64_t pe, uint64_t addr,
918                                   unsigned count, uint32_t incr,
919                                   uint64_t flags)
920 {
921         trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags);
922
923         if (count < 3) {
924                 amdgpu_vm_write_pte(params->adev, params->ib, pe,
925                                     addr | flags, count, incr);
926
927         } else {
928                 amdgpu_vm_set_pte_pde(params->adev, params->ib, pe, addr,
929                                       count, incr, flags);
930         }
931 }
932
933 /**
934  * amdgpu_vm_do_copy_ptes - copy the PTEs from the GART
935  *
936  * @params: see amdgpu_pte_update_params definition
937  * @pe: addr of the page entry
938  * @addr: dst addr to write into pe
939  * @count: number of page entries to update
940  * @incr: increase next addr by incr bytes
941  * @flags: hw access flags
942  *
943  * Traces the parameters and calls the DMA function to copy the PTEs.
944  */
945 static void amdgpu_vm_do_copy_ptes(struct amdgpu_pte_update_params *params,
946                                    uint64_t pe, uint64_t addr,
947                                    unsigned count, uint32_t incr,
948                                    uint64_t flags)
949 {
950         uint64_t src = (params->src + (addr >> 12) * 8);
951
952
953         trace_amdgpu_vm_copy_ptes(pe, src, count);
954
955         amdgpu_vm_copy_pte(params->adev, params->ib, pe, src, count);
956 }
957
958 /**
959  * amdgpu_vm_map_gart - Resolve gart mapping of addr
960  *
961  * @pages_addr: optional DMA address to use for lookup
962  * @addr: the unmapped addr
963  *
964  * Look up the physical address of the page that the pte resolves
965  * to and return the pointer for the page table entry.
966  */
967 static uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr)
968 {
969         uint64_t result;
970
971         /* page table offset */
972         result = pages_addr[addr >> PAGE_SHIFT];
973
974         /* in case cpu page size != gpu page size*/
975         result |= addr & (~PAGE_MASK);
976
977         result &= 0xFFFFFFFFFFFFF000ULL;
978
979         return result;
980 }
981
982 /**
983  * amdgpu_vm_cpu_set_ptes - helper to update page tables via CPU
984  *
985  * @params: see amdgpu_pte_update_params definition
986  * @pe: kmap addr of the page entry
987  * @addr: dst addr to write into pe
988  * @count: number of page entries to update
989  * @incr: increase next addr by incr bytes
990  * @flags: hw access flags
991  *
992  * Write count number of PT/PD entries directly.
993  */
994 static void amdgpu_vm_cpu_set_ptes(struct amdgpu_pte_update_params *params,
995                                    uint64_t pe, uint64_t addr,
996                                    unsigned count, uint32_t incr,
997                                    uint64_t flags)
998 {
999         unsigned int i;
1000         uint64_t value;
1001
1002         trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags);
1003
1004         for (i = 0; i < count; i++) {
1005                 value = params->pages_addr ?
1006                         amdgpu_vm_map_gart(params->pages_addr, addr) :
1007                         addr;
1008                 amdgpu_gart_set_pte_pde(params->adev, (void *)(uintptr_t)pe,
1009                                         i, value, flags);
1010                 addr += incr;
1011         }
1012 }
1013
1014 static int amdgpu_vm_wait_pd(struct amdgpu_device *adev, struct amdgpu_vm *vm,
1015                              void *owner)
1016 {
1017         struct amdgpu_sync sync;
1018         int r;
1019
1020         amdgpu_sync_create(&sync);
1021         amdgpu_sync_resv(adev, &sync, vm->root.bo->tbo.resv, owner);
1022         r = amdgpu_sync_wait(&sync, true);
1023         amdgpu_sync_free(&sync);
1024
1025         return r;
1026 }
1027
1028 /*
1029  * amdgpu_vm_update_level - update a single level in the hierarchy
1030  *
1031  * @adev: amdgpu_device pointer
1032  * @vm: requested vm
1033  * @parent: parent directory
1034  *
1035  * Makes sure all entries in @parent are up to date.
1036  * Returns 0 for success, error for failure.
1037  */
1038 static int amdgpu_vm_update_level(struct amdgpu_device *adev,
1039                                   struct amdgpu_vm *vm,
1040                                   struct amdgpu_vm_pt *parent,
1041                                   unsigned level)
1042 {
1043         struct amdgpu_bo *shadow;
1044         struct amdgpu_ring *ring = NULL;
1045         uint64_t pd_addr, shadow_addr = 0;
1046         uint32_t incr = amdgpu_vm_bo_size(adev, level + 1);
1047         uint64_t last_pde = ~0, last_pt = ~0, last_shadow = ~0;
1048         unsigned count = 0, pt_idx, ndw = 0;
1049         struct amdgpu_job *job;
1050         struct amdgpu_pte_update_params params;
1051         struct dma_fence *fence = NULL;
1052
1053         int r;
1054
1055         if (!parent->entries)
1056                 return 0;
1057
1058         memset(&params, 0, sizeof(params));
1059         params.adev = adev;
1060         shadow = parent->bo->shadow;
1061
1062         if (vm->use_cpu_for_update) {
1063                 pd_addr = (unsigned long)amdgpu_bo_kptr(parent->bo);
1064                 r = amdgpu_vm_wait_pd(adev, vm, AMDGPU_FENCE_OWNER_VM);
1065                 if (unlikely(r))
1066                         return r;
1067
1068                 params.func = amdgpu_vm_cpu_set_ptes;
1069         } else {
1070                 if (shadow) {
1071                         r = amdgpu_ttm_bind(&shadow->tbo, &shadow->tbo.mem);
1072                         if (r)
1073                                 return r;
1074                 }
1075                 ring = container_of(vm->entity.sched, struct amdgpu_ring,
1076                                     sched);
1077
1078                 /* padding, etc. */
1079                 ndw = 64;
1080
1081                 /* assume the worst case */
1082                 ndw += parent->last_entry_used * 6;
1083
1084                 pd_addr = amdgpu_bo_gpu_offset(parent->bo);
1085
1086                 if (shadow) {
1087                         shadow_addr = amdgpu_bo_gpu_offset(shadow);
1088                         ndw *= 2;
1089                 } else {
1090                         shadow_addr = 0;
1091                 }
1092
1093                 r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job);
1094                 if (r)
1095                         return r;
1096
1097                 params.ib = &job->ibs[0];
1098                 params.func = amdgpu_vm_do_set_ptes;
1099         }
1100
1101
1102         /* walk over the address space and update the directory */
1103         for (pt_idx = 0; pt_idx <= parent->last_entry_used; ++pt_idx) {
1104                 struct amdgpu_bo *bo = parent->entries[pt_idx].bo;
1105                 uint64_t pde, pt;
1106
1107                 if (bo == NULL)
1108                         continue;
1109
1110                 if (bo->shadow) {
1111                         struct amdgpu_bo *pt_shadow = bo->shadow;
1112
1113                         r = amdgpu_ttm_bind(&pt_shadow->tbo,
1114                                             &pt_shadow->tbo.mem);
1115                         if (r)
1116                                 return r;
1117                 }
1118
1119                 pt = amdgpu_bo_gpu_offset(bo);
1120                 pt = amdgpu_gart_get_vm_pde(adev, pt);
1121                 if (parent->entries[pt_idx].addr == pt ||
1122                     parent->entries[pt_idx].huge_page)
1123                         continue;
1124
1125                 parent->entries[pt_idx].addr = pt;
1126
1127                 pde = pd_addr + pt_idx * 8;
1128                 if (((last_pde + 8 * count) != pde) ||
1129                     ((last_pt + incr * count) != pt) ||
1130                     (count == AMDGPU_VM_MAX_UPDATE_SIZE)) {
1131
1132                         if (count) {
1133                                 if (shadow)
1134                                         params.func(&params,
1135                                                     last_shadow,
1136                                                     last_pt, count,
1137                                                     incr,
1138                                                     AMDGPU_PTE_VALID);
1139
1140                                 params.func(&params, last_pde,
1141                                             last_pt, count, incr,
1142                                             AMDGPU_PTE_VALID);
1143                         }
1144
1145                         count = 1;
1146                         last_pde = pde;
1147                         last_shadow = shadow_addr + pt_idx * 8;
1148                         last_pt = pt;
1149                 } else {
1150                         ++count;
1151                 }
1152         }
1153
1154         if (count) {
1155                 if (vm->root.bo->shadow)
1156                         params.func(&params, last_shadow, last_pt,
1157                                     count, incr, AMDGPU_PTE_VALID);
1158
1159                 params.func(&params, last_pde, last_pt,
1160                             count, incr, AMDGPU_PTE_VALID);
1161         }
1162
1163         if (!vm->use_cpu_for_update) {
1164                 if (params.ib->length_dw == 0) {
1165                         amdgpu_job_free(job);
1166                 } else {
1167                         amdgpu_ring_pad_ib(ring, params.ib);
1168                         amdgpu_sync_resv(adev, &job->sync, parent->bo->tbo.resv,
1169                                          AMDGPU_FENCE_OWNER_VM);
1170                         if (shadow)
1171                                 amdgpu_sync_resv(adev, &job->sync,
1172                                                  shadow->tbo.resv,
1173                                                  AMDGPU_FENCE_OWNER_VM);
1174
1175                         WARN_ON(params.ib->length_dw > ndw);
1176                         r = amdgpu_job_submit(job, ring, &vm->entity,
1177                                         AMDGPU_FENCE_OWNER_VM, &fence);
1178                         if (r)
1179                                 goto error_free;
1180
1181                         amdgpu_bo_fence(parent->bo, fence, true);
1182                         dma_fence_put(vm->last_dir_update);
1183                         vm->last_dir_update = dma_fence_get(fence);
1184                         dma_fence_put(fence);
1185                 }
1186         }
1187         /*
1188          * Recurse into the subdirectories. This recursion is harmless because
1189          * we only have a maximum of 5 layers.
1190          */
1191         for (pt_idx = 0; pt_idx <= parent->last_entry_used; ++pt_idx) {
1192                 struct amdgpu_vm_pt *entry = &parent->entries[pt_idx];
1193
1194                 if (!entry->bo)
1195                         continue;
1196
1197                 r = amdgpu_vm_update_level(adev, vm, entry, level + 1);
1198                 if (r)
1199                         return r;
1200         }
1201
1202         return 0;
1203
1204 error_free:
1205         amdgpu_job_free(job);
1206         return r;
1207 }
1208
1209 /*
1210  * amdgpu_vm_invalidate_level - mark all PD levels as invalid
1211  *
1212  * @parent: parent PD
1213  *
1214  * Mark all PD level as invalid after an error.
1215  */
1216 static void amdgpu_vm_invalidate_level(struct amdgpu_vm_pt *parent)
1217 {
1218         unsigned pt_idx;
1219
1220         /*
1221          * Recurse into the subdirectories. This recursion is harmless because
1222          * we only have a maximum of 5 layers.
1223          */
1224         for (pt_idx = 0; pt_idx <= parent->last_entry_used; ++pt_idx) {
1225                 struct amdgpu_vm_pt *entry = &parent->entries[pt_idx];
1226
1227                 if (!entry->bo)
1228                         continue;
1229
1230                 entry->addr = ~0ULL;
1231                 amdgpu_vm_invalidate_level(entry);
1232         }
1233 }
1234
1235 /*
1236  * amdgpu_vm_update_directories - make sure that all directories are valid
1237  *
1238  * @adev: amdgpu_device pointer
1239  * @vm: requested vm
1240  *
1241  * Makes sure all directories are up to date.
1242  * Returns 0 for success, error for failure.
1243  */
1244 int amdgpu_vm_update_directories(struct amdgpu_device *adev,
1245                                  struct amdgpu_vm *vm)
1246 {
1247         int r;
1248
1249         r = amdgpu_vm_update_level(adev, vm, &vm->root, 0);
1250         if (r)
1251                 amdgpu_vm_invalidate_level(&vm->root);
1252
1253         if (vm->use_cpu_for_update) {
1254                 /* Flush HDP */
1255                 mb();
1256                 amdgpu_gart_flush_gpu_tlb(adev, 0);
1257         }
1258
1259         return r;
1260 }
1261
1262 /**
1263  * amdgpu_vm_find_entry - find the entry for an address
1264  *
1265  * @p: see amdgpu_pte_update_params definition
1266  * @addr: virtual address in question
1267  * @entry: resulting entry or NULL
1268  * @parent: parent entry
1269  *
1270  * Find the vm_pt entry and it's parent for the given address.
1271  */
1272 void amdgpu_vm_get_entry(struct amdgpu_pte_update_params *p, uint64_t addr,
1273                          struct amdgpu_vm_pt **entry,
1274                          struct amdgpu_vm_pt **parent)
1275 {
1276         unsigned idx, level = p->adev->vm_manager.num_level;
1277
1278         *parent = NULL;
1279         *entry = &p->vm->root;
1280         while ((*entry)->entries) {
1281                 idx = addr >> (p->adev->vm_manager.block_size * level--);
1282                 idx %= amdgpu_bo_size((*entry)->bo) / 8;
1283                 *parent = *entry;
1284                 *entry = &(*entry)->entries[idx];
1285         }
1286
1287         if (level)
1288                 *entry = NULL;
1289 }
1290
1291 /**
1292  * amdgpu_vm_handle_huge_pages - handle updating the PD with huge pages
1293  *
1294  * @p: see amdgpu_pte_update_params definition
1295  * @entry: vm_pt entry to check
1296  * @parent: parent entry
1297  * @nptes: number of PTEs updated with this operation
1298  * @dst: destination address where the PTEs should point to
1299  * @flags: access flags fro the PTEs
1300  *
1301  * Check if we can update the PD with a huge page.
1302  */
1303 static int amdgpu_vm_handle_huge_pages(struct amdgpu_pte_update_params *p,
1304                                        struct amdgpu_vm_pt *entry,
1305                                        struct amdgpu_vm_pt *parent,
1306                                        unsigned nptes, uint64_t dst,
1307                                        uint64_t flags)
1308 {
1309         bool use_cpu_update = (p->func == amdgpu_vm_cpu_set_ptes);
1310         uint64_t pd_addr, pde;
1311         int r;
1312
1313         /* In the case of a mixed PT the PDE must point to it*/
1314         if (p->adev->asic_type < CHIP_VEGA10 ||
1315             nptes != AMDGPU_VM_PTE_COUNT(p->adev) ||
1316             p->func == amdgpu_vm_do_copy_ptes ||
1317             !(flags & AMDGPU_PTE_VALID)) {
1318
1319                 dst = amdgpu_bo_gpu_offset(entry->bo);
1320                 dst = amdgpu_gart_get_vm_pde(p->adev, dst);
1321                 flags = AMDGPU_PTE_VALID;
1322         } else {
1323                 flags |= AMDGPU_PDE_PTE;
1324         }
1325
1326         if (entry->addr == dst &&
1327             entry->huge_page == !!(flags & AMDGPU_PDE_PTE))
1328                 return 0;
1329
1330         entry->addr = dst;
1331         entry->huge_page = !!(flags & AMDGPU_PDE_PTE);
1332
1333         if (use_cpu_update) {
1334                 r = amdgpu_bo_kmap(parent->bo, (void *)&pd_addr);
1335                 if (r)
1336                         return r;
1337
1338                 pde = pd_addr + (entry - parent->entries) * 8;
1339                 amdgpu_vm_cpu_set_ptes(p, pde, dst, 1, 0, flags);
1340         } else {
1341                 if (parent->bo->shadow) {
1342                         pd_addr = amdgpu_bo_gpu_offset(parent->bo->shadow);
1343                         pde = pd_addr + (entry - parent->entries) * 8;
1344                         amdgpu_vm_do_set_ptes(p, pde, dst, 1, 0, flags);
1345                 }
1346                 pd_addr = amdgpu_bo_gpu_offset(parent->bo);
1347                 pde = pd_addr + (entry - parent->entries) * 8;
1348                 amdgpu_vm_do_set_ptes(p, pde, dst, 1, 0, flags);
1349         }
1350
1351         return 0;
1352 }
1353
1354 /**
1355  * amdgpu_vm_update_ptes - make sure that page tables are valid
1356  *
1357  * @params: see amdgpu_pte_update_params definition
1358  * @vm: requested vm
1359  * @start: start of GPU address range
1360  * @end: end of GPU address range
1361  * @dst: destination address to map to, the next dst inside the function
1362  * @flags: mapping flags
1363  *
1364  * Update the page tables in the range @start - @end.
1365  * Returns 0 for success, -EINVAL for failure.
1366  */
1367 static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
1368                                   uint64_t start, uint64_t end,
1369                                   uint64_t dst, uint64_t flags)
1370 {
1371         struct amdgpu_device *adev = params->adev;
1372         const uint64_t mask = AMDGPU_VM_PTE_COUNT(adev) - 1;
1373
1374         uint64_t addr, pe_start;
1375         struct amdgpu_bo *pt;
1376         unsigned nptes;
1377         bool use_cpu_update = (params->func == amdgpu_vm_cpu_set_ptes);
1378         int r;
1379
1380         /* walk over the address space and update the page tables */
1381         for (addr = start; addr < end; addr += nptes,
1382              dst += nptes * AMDGPU_GPU_PAGE_SIZE) {
1383                 struct amdgpu_vm_pt *entry, *parent;
1384
1385                 amdgpu_vm_get_entry(params, addr, &entry, &parent);
1386                 if (!entry)
1387                         return -ENOENT;
1388
1389                 if ((addr & ~mask) == (end & ~mask))
1390                         nptes = end - addr;
1391                 else
1392                         nptes = AMDGPU_VM_PTE_COUNT(adev) - (addr & mask);
1393
1394                 r = amdgpu_vm_handle_huge_pages(params, entry, parent,
1395                                                 nptes, dst, flags);
1396                 if (r)
1397                         return r;
1398
1399                 if (entry->huge_page)
1400                         continue;
1401
1402                 pt = entry->bo;
1403                 if (use_cpu_update) {
1404                         pe_start = (unsigned long)amdgpu_bo_kptr(pt);
1405                 } else {
1406                         if (pt->shadow) {
1407                                 pe_start = amdgpu_bo_gpu_offset(pt->shadow);
1408                                 pe_start += (addr & mask) * 8;
1409                                 params->func(params, pe_start, dst, nptes,
1410                                              AMDGPU_GPU_PAGE_SIZE, flags);
1411                         }
1412                         pe_start = amdgpu_bo_gpu_offset(pt);
1413                 }
1414
1415                 pe_start += (addr & mask) * 8;
1416                 params->func(params, pe_start, dst, nptes,
1417                              AMDGPU_GPU_PAGE_SIZE, flags);
1418         }
1419
1420         return 0;
1421 }
1422
1423 /*
1424  * amdgpu_vm_frag_ptes - add fragment information to PTEs
1425  *
1426  * @params: see amdgpu_pte_update_params definition
1427  * @vm: requested vm
1428  * @start: first PTE to handle
1429  * @end: last PTE to handle
1430  * @dst: addr those PTEs should point to
1431  * @flags: hw mapping flags
1432  * Returns 0 for success, -EINVAL for failure.
1433  */
1434 static int amdgpu_vm_frag_ptes(struct amdgpu_pte_update_params  *params,
1435                                 uint64_t start, uint64_t end,
1436                                 uint64_t dst, uint64_t flags)
1437 {
1438         int r;
1439
1440         /**
1441          * The MC L1 TLB supports variable sized pages, based on a fragment
1442          * field in the PTE. When this field is set to a non-zero value, page
1443          * granularity is increased from 4KB to (1 << (12 + frag)). The PTE
1444          * flags are considered valid for all PTEs within the fragment range
1445          * and corresponding mappings are assumed to be physically contiguous.
1446          *
1447          * The L1 TLB can store a single PTE for the whole fragment,
1448          * significantly increasing the space available for translation
1449          * caching. This leads to large improvements in throughput when the
1450          * TLB is under pressure.
1451          *
1452          * The L2 TLB distributes small and large fragments into two
1453          * asymmetric partitions. The large fragment cache is significantly
1454          * larger. Thus, we try to use large fragments wherever possible.
1455          * Userspace can support this by aligning virtual base address and
1456          * allocation size to the fragment size.
1457          */
1458
1459         /* SI and newer are optimized for 64KB */
1460         unsigned pages_per_frag = AMDGPU_LOG2_PAGES_PER_FRAG(params->adev);
1461         uint64_t frag_flags = AMDGPU_PTE_FRAG(pages_per_frag);
1462         uint64_t frag_align = 1 << pages_per_frag;
1463
1464         uint64_t frag_start = ALIGN(start, frag_align);
1465         uint64_t frag_end = end & ~(frag_align - 1);
1466
1467         /* system pages are non continuously */
1468         if (params->src || !(flags & AMDGPU_PTE_VALID) ||
1469             (frag_start >= frag_end))
1470                 return amdgpu_vm_update_ptes(params, start, end, dst, flags);
1471
1472         /* handle the 4K area at the beginning */
1473         if (start != frag_start) {
1474                 r = amdgpu_vm_update_ptes(params, start, frag_start,
1475                                           dst, flags);
1476                 if (r)
1477                         return r;
1478                 dst += (frag_start - start) * AMDGPU_GPU_PAGE_SIZE;
1479         }
1480
1481         /* handle the area in the middle */
1482         r = amdgpu_vm_update_ptes(params, frag_start, frag_end, dst,
1483                                   flags | frag_flags);
1484         if (r)
1485                 return r;
1486
1487         /* handle the 4K area at the end */
1488         if (frag_end != end) {
1489                 dst += (frag_end - frag_start) * AMDGPU_GPU_PAGE_SIZE;
1490                 r = amdgpu_vm_update_ptes(params, frag_end, end, dst, flags);
1491         }
1492         return r;
1493 }
1494
1495 /**
1496  * amdgpu_vm_bo_update_mapping - update a mapping in the vm page table
1497  *
1498  * @adev: amdgpu_device pointer
1499  * @exclusive: fence we need to sync to
1500  * @src: address where to copy page table entries from
1501  * @pages_addr: DMA addresses to use for mapping
1502  * @vm: requested vm
1503  * @start: start of mapped range
1504  * @last: last mapped entry
1505  * @flags: flags for the entries
1506  * @addr: addr to set the area to
1507  * @fence: optional resulting fence
1508  *
1509  * Fill in the page table entries between @start and @last.
1510  * Returns 0 for success, -EINVAL for failure.
1511  */
1512 static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
1513                                        struct dma_fence *exclusive,
1514                                        uint64_t src,
1515                                        dma_addr_t *pages_addr,
1516                                        struct amdgpu_vm *vm,
1517                                        uint64_t start, uint64_t last,
1518                                        uint64_t flags, uint64_t addr,
1519                                        struct dma_fence **fence)
1520 {
1521         struct amdgpu_ring *ring;
1522         void *owner = AMDGPU_FENCE_OWNER_VM;
1523         unsigned nptes, ncmds, ndw;
1524         struct amdgpu_job *job;
1525         struct amdgpu_pte_update_params params;
1526         struct dma_fence *f = NULL;
1527         int r;
1528
1529         memset(&params, 0, sizeof(params));
1530         params.adev = adev;
1531         params.vm = vm;
1532         params.src = src;
1533
1534         /* sync to everything on unmapping */
1535         if (!(flags & AMDGPU_PTE_VALID))
1536                 owner = AMDGPU_FENCE_OWNER_UNDEFINED;
1537
1538         if (vm->use_cpu_for_update) {
1539                 /* params.src is used as flag to indicate system Memory */
1540                 if (pages_addr)
1541                         params.src = ~0;
1542
1543                 /* Wait for PT BOs to be free. PTs share the same resv. object
1544                  * as the root PD BO
1545                  */
1546                 r = amdgpu_vm_wait_pd(adev, vm, owner);
1547                 if (unlikely(r))
1548                         return r;
1549
1550                 params.func = amdgpu_vm_cpu_set_ptes;
1551                 params.pages_addr = pages_addr;
1552                 return amdgpu_vm_frag_ptes(&params, start, last + 1,
1553                                            addr, flags);
1554         }
1555
1556         ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
1557
1558         nptes = last - start + 1;
1559
1560         /*
1561          * reserve space for one command every (1 << BLOCK_SIZE)
1562          *  entries or 2k dwords (whatever is smaller)
1563          */
1564         ncmds = (nptes >> min(adev->vm_manager.block_size, 11u)) + 1;
1565
1566         /* padding, etc. */
1567         ndw = 64;
1568
1569         /* one PDE write for each huge page */
1570         ndw += ((nptes >> adev->vm_manager.block_size) + 1) * 6;
1571
1572         if (src) {
1573                 /* only copy commands needed */
1574                 ndw += ncmds * 7;
1575
1576                 params.func = amdgpu_vm_do_copy_ptes;
1577
1578         } else if (pages_addr) {
1579                 /* copy commands needed */
1580                 ndw += ncmds * 7;
1581
1582                 /* and also PTEs */
1583                 ndw += nptes * 2;
1584
1585                 params.func = amdgpu_vm_do_copy_ptes;
1586
1587         } else {
1588                 /* set page commands needed */
1589                 ndw += ncmds * 10;
1590
1591                 /* two extra commands for begin/end of fragment */
1592                 ndw += 2 * 10;
1593
1594                 params.func = amdgpu_vm_do_set_ptes;
1595         }
1596
1597         r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job);
1598         if (r)
1599                 return r;
1600
1601         params.ib = &job->ibs[0];
1602
1603         if (!src && pages_addr) {
1604                 uint64_t *pte;
1605                 unsigned i;
1606
1607                 /* Put the PTEs at the end of the IB. */
1608                 i = ndw - nptes * 2;
1609                 pte= (uint64_t *)&(job->ibs->ptr[i]);
1610                 params.src = job->ibs->gpu_addr + i * 4;
1611
1612                 for (i = 0; i < nptes; ++i) {
1613                         pte[i] = amdgpu_vm_map_gart(pages_addr, addr + i *
1614                                                     AMDGPU_GPU_PAGE_SIZE);
1615                         pte[i] |= flags;
1616                 }
1617                 addr = 0;
1618         }
1619
1620         r = amdgpu_sync_fence(adev, &job->sync, exclusive);
1621         if (r)
1622                 goto error_free;
1623
1624         r = amdgpu_sync_resv(adev, &job->sync, vm->root.bo->tbo.resv,
1625                              owner);
1626         if (r)
1627                 goto error_free;
1628
1629         r = reservation_object_reserve_shared(vm->root.bo->tbo.resv);
1630         if (r)
1631                 goto error_free;
1632
1633         r = amdgpu_vm_frag_ptes(&params, start, last + 1, addr, flags);
1634         if (r)
1635                 goto error_free;
1636
1637         amdgpu_ring_pad_ib(ring, params.ib);
1638         WARN_ON(params.ib->length_dw > ndw);
1639         r = amdgpu_job_submit(job, ring, &vm->entity,
1640                               AMDGPU_FENCE_OWNER_VM, &f);
1641         if (r)
1642                 goto error_free;
1643
1644         amdgpu_bo_fence(vm->root.bo, f, true);
1645         dma_fence_put(*fence);
1646         *fence = f;
1647         return 0;
1648
1649 error_free:
1650         amdgpu_job_free(job);
1651         amdgpu_vm_invalidate_level(&vm->root);
1652         return r;
1653 }
1654
1655 /**
1656  * amdgpu_vm_bo_split_mapping - split a mapping into smaller chunks
1657  *
1658  * @adev: amdgpu_device pointer
1659  * @exclusive: fence we need to sync to
1660  * @gtt_flags: flags as they are used for GTT
1661  * @pages_addr: DMA addresses to use for mapping
1662  * @vm: requested vm
1663  * @mapping: mapped range and flags to use for the update
1664  * @flags: HW flags for the mapping
1665  * @nodes: array of drm_mm_nodes with the MC addresses
1666  * @fence: optional resulting fence
1667  *
1668  * Split the mapping into smaller chunks so that each update fits
1669  * into a SDMA IB.
1670  * Returns 0 for success, -EINVAL for failure.
1671  */
1672 static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
1673                                       struct dma_fence *exclusive,
1674                                       uint64_t gtt_flags,
1675                                       dma_addr_t *pages_addr,
1676                                       struct amdgpu_vm *vm,
1677                                       struct amdgpu_bo_va_mapping *mapping,
1678                                       uint64_t flags,
1679                                       struct drm_mm_node *nodes,
1680                                       struct dma_fence **fence)
1681 {
1682         uint64_t pfn, src = 0, start = mapping->start;
1683         int r;
1684
1685         /* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here
1686          * but in case of something, we filter the flags in first place
1687          */
1688         if (!(mapping->flags & AMDGPU_PTE_READABLE))
1689                 flags &= ~AMDGPU_PTE_READABLE;
1690         if (!(mapping->flags & AMDGPU_PTE_WRITEABLE))
1691                 flags &= ~AMDGPU_PTE_WRITEABLE;
1692
1693         flags &= ~AMDGPU_PTE_EXECUTABLE;
1694         flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
1695
1696         flags &= ~AMDGPU_PTE_MTYPE_MASK;
1697         flags |= (mapping->flags & AMDGPU_PTE_MTYPE_MASK);
1698
1699         if ((mapping->flags & AMDGPU_PTE_PRT) &&
1700             (adev->asic_type >= CHIP_VEGA10)) {
1701                 flags |= AMDGPU_PTE_PRT;
1702                 flags &= ~AMDGPU_PTE_VALID;
1703         }
1704
1705         trace_amdgpu_vm_bo_update(mapping);
1706
1707         pfn = mapping->offset >> PAGE_SHIFT;
1708         if (nodes) {
1709                 while (pfn >= nodes->size) {
1710                         pfn -= nodes->size;
1711                         ++nodes;
1712                 }
1713         }
1714
1715         do {
1716                 uint64_t max_entries;
1717                 uint64_t addr, last;
1718
1719                 if (nodes) {
1720                         addr = nodes->start << PAGE_SHIFT;
1721                         max_entries = (nodes->size - pfn) *
1722                                 (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
1723                 } else {
1724                         addr = 0;
1725                         max_entries = S64_MAX;
1726                 }
1727
1728                 if (pages_addr) {
1729                         if (flags == gtt_flags)
1730                                 src = adev->gart.table_addr +
1731                                         (addr >> AMDGPU_GPU_PAGE_SHIFT) * 8;
1732                         else
1733                                 max_entries = min(max_entries, 16ull * 1024ull);
1734                         addr = 0;
1735                 } else if (flags & AMDGPU_PTE_VALID) {
1736                         addr += adev->vm_manager.vram_base_offset;
1737                 }
1738                 addr += pfn << PAGE_SHIFT;
1739
1740                 last = min((uint64_t)mapping->last, start + max_entries - 1);
1741                 r = amdgpu_vm_bo_update_mapping(adev, exclusive,
1742                                                 src, pages_addr, vm,
1743                                                 start, last, flags, addr,
1744                                                 fence);
1745                 if (r)
1746                         return r;
1747
1748                 pfn += last - start + 1;
1749                 if (nodes && nodes->size == pfn) {
1750                         pfn = 0;
1751                         ++nodes;
1752                 }
1753                 start = last + 1;
1754
1755         } while (unlikely(start != mapping->last + 1));
1756
1757         return 0;
1758 }
1759
1760 /**
1761  * amdgpu_vm_bo_update - update all BO mappings in the vm page table
1762  *
1763  * @adev: amdgpu_device pointer
1764  * @bo_va: requested BO and VM object
1765  * @clear: if true clear the entries
1766  *
1767  * Fill in the page table entries for @bo_va.
1768  * Returns 0 for success, -EINVAL for failure.
1769  */
1770 int amdgpu_vm_bo_update(struct amdgpu_device *adev,
1771                         struct amdgpu_bo_va *bo_va,
1772                         bool clear)
1773 {
1774         struct amdgpu_vm *vm = bo_va->vm;
1775         struct amdgpu_bo_va_mapping *mapping;
1776         dma_addr_t *pages_addr = NULL;
1777         uint64_t gtt_flags, flags;
1778         struct ttm_mem_reg *mem;
1779         struct drm_mm_node *nodes;
1780         struct dma_fence *exclusive;
1781         int r;
1782
1783         if (clear || !bo_va->bo) {
1784                 mem = NULL;
1785                 nodes = NULL;
1786                 exclusive = NULL;
1787         } else {
1788                 struct ttm_dma_tt *ttm;
1789
1790                 mem = &bo_va->bo->tbo.mem;
1791                 nodes = mem->mm_node;
1792                 if (mem->mem_type == TTM_PL_TT) {
1793                         ttm = container_of(bo_va->bo->tbo.ttm, struct
1794                                            ttm_dma_tt, ttm);
1795                         pages_addr = ttm->dma_address;
1796                 }
1797                 exclusive = reservation_object_get_excl(bo_va->bo->tbo.resv);
1798         }
1799
1800         if (bo_va->bo) {
1801                 flags = amdgpu_ttm_tt_pte_flags(adev, bo_va->bo->tbo.ttm, mem);
1802                 gtt_flags = (amdgpu_ttm_is_bound(bo_va->bo->tbo.ttm) &&
1803                         adev == amdgpu_ttm_adev(bo_va->bo->tbo.bdev)) ?
1804                         flags : 0;
1805         } else {
1806                 flags = 0x0;
1807                 gtt_flags = ~0x0;
1808         }
1809
1810         spin_lock(&vm->status_lock);
1811         if (!list_empty(&bo_va->vm_status))
1812                 list_splice_init(&bo_va->valids, &bo_va->invalids);
1813         spin_unlock(&vm->status_lock);
1814
1815         list_for_each_entry(mapping, &bo_va->invalids, list) {
1816                 r = amdgpu_vm_bo_split_mapping(adev, exclusive,
1817                                                gtt_flags, pages_addr, vm,
1818                                                mapping, flags, nodes,
1819                                                &bo_va->last_pt_update);
1820                 if (r)
1821                         return r;
1822         }
1823
1824         if (trace_amdgpu_vm_bo_mapping_enabled()) {
1825                 list_for_each_entry(mapping, &bo_va->valids, list)
1826                         trace_amdgpu_vm_bo_mapping(mapping);
1827
1828                 list_for_each_entry(mapping, &bo_va->invalids, list)
1829                         trace_amdgpu_vm_bo_mapping(mapping);
1830         }
1831
1832         spin_lock(&vm->status_lock);
1833         list_splice_init(&bo_va->invalids, &bo_va->valids);
1834         list_del_init(&bo_va->vm_status);
1835         if (clear)
1836                 list_add(&bo_va->vm_status, &vm->cleared);
1837         spin_unlock(&vm->status_lock);
1838
1839         if (vm->use_cpu_for_update) {
1840                 /* Flush HDP */
1841                 mb();
1842                 amdgpu_gart_flush_gpu_tlb(adev, 0);
1843         }
1844
1845         return 0;
1846 }
1847
1848 /**
1849  * amdgpu_vm_update_prt_state - update the global PRT state
1850  */
1851 static void amdgpu_vm_update_prt_state(struct amdgpu_device *adev)
1852 {
1853         unsigned long flags;
1854         bool enable;
1855
1856         spin_lock_irqsave(&adev->vm_manager.prt_lock, flags);
1857         enable = !!atomic_read(&adev->vm_manager.num_prt_users);
1858         adev->gart.gart_funcs->set_prt(adev, enable);
1859         spin_unlock_irqrestore(&adev->vm_manager.prt_lock, flags);
1860 }
1861
1862 /**
1863  * amdgpu_vm_prt_get - add a PRT user
1864  */
1865 static void amdgpu_vm_prt_get(struct amdgpu_device *adev)
1866 {
1867         if (!adev->gart.gart_funcs->set_prt)
1868                 return;
1869
1870         if (atomic_inc_return(&adev->vm_manager.num_prt_users) == 1)
1871                 amdgpu_vm_update_prt_state(adev);
1872 }
1873
1874 /**
1875  * amdgpu_vm_prt_put - drop a PRT user
1876  */
1877 static void amdgpu_vm_prt_put(struct amdgpu_device *adev)
1878 {
1879         if (atomic_dec_return(&adev->vm_manager.num_prt_users) == 0)
1880                 amdgpu_vm_update_prt_state(adev);
1881 }
1882
1883 /**
1884  * amdgpu_vm_prt_cb - callback for updating the PRT status
1885  */
1886 static void amdgpu_vm_prt_cb(struct dma_fence *fence, struct dma_fence_cb *_cb)
1887 {
1888         struct amdgpu_prt_cb *cb = container_of(_cb, struct amdgpu_prt_cb, cb);
1889
1890         amdgpu_vm_prt_put(cb->adev);
1891         kfree(cb);
1892 }
1893
1894 /**
1895  * amdgpu_vm_add_prt_cb - add callback for updating the PRT status
1896  */
1897 static void amdgpu_vm_add_prt_cb(struct amdgpu_device *adev,
1898                                  struct dma_fence *fence)
1899 {
1900         struct amdgpu_prt_cb *cb;
1901
1902         if (!adev->gart.gart_funcs->set_prt)
1903                 return;
1904
1905         cb = kmalloc(sizeof(struct amdgpu_prt_cb), GFP_KERNEL);
1906         if (!cb) {
1907                 /* Last resort when we are OOM */
1908                 if (fence)
1909                         dma_fence_wait(fence, false);
1910
1911                 amdgpu_vm_prt_put(adev);
1912         } else {
1913                 cb->adev = adev;
1914                 if (!fence || dma_fence_add_callback(fence, &cb->cb,
1915                                                      amdgpu_vm_prt_cb))
1916                         amdgpu_vm_prt_cb(fence, &cb->cb);
1917         }
1918 }
1919
1920 /**
1921  * amdgpu_vm_free_mapping - free a mapping
1922  *
1923  * @adev: amdgpu_device pointer
1924  * @vm: requested vm
1925  * @mapping: mapping to be freed
1926  * @fence: fence of the unmap operation
1927  *
1928  * Free a mapping and make sure we decrease the PRT usage count if applicable.
1929  */
1930 static void amdgpu_vm_free_mapping(struct amdgpu_device *adev,
1931                                    struct amdgpu_vm *vm,
1932                                    struct amdgpu_bo_va_mapping *mapping,
1933                                    struct dma_fence *fence)
1934 {
1935         if (mapping->flags & AMDGPU_PTE_PRT)
1936                 amdgpu_vm_add_prt_cb(adev, fence);
1937         kfree(mapping);
1938 }
1939
1940 /**
1941  * amdgpu_vm_prt_fini - finish all prt mappings
1942  *
1943  * @adev: amdgpu_device pointer
1944  * @vm: requested vm
1945  *
1946  * Register a cleanup callback to disable PRT support after VM dies.
1947  */
1948 static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
1949 {
1950         struct reservation_object *resv = vm->root.bo->tbo.resv;
1951         struct dma_fence *excl, **shared;
1952         unsigned i, shared_count;
1953         int r;
1954
1955         r = reservation_object_get_fences_rcu(resv, &excl,
1956                                               &shared_count, &shared);
1957         if (r) {
1958                 /* Not enough memory to grab the fence list, as last resort
1959                  * block for all the fences to complete.
1960                  */
1961                 reservation_object_wait_timeout_rcu(resv, true, false,
1962                                                     MAX_SCHEDULE_TIMEOUT);
1963                 return;
1964         }
1965
1966         /* Add a callback for each fence in the reservation object */
1967         amdgpu_vm_prt_get(adev);
1968         amdgpu_vm_add_prt_cb(adev, excl);
1969
1970         for (i = 0; i < shared_count; ++i) {
1971                 amdgpu_vm_prt_get(adev);
1972                 amdgpu_vm_add_prt_cb(adev, shared[i]);
1973         }
1974
1975         kfree(shared);
1976 }
1977
1978 /**
1979  * amdgpu_vm_clear_freed - clear freed BOs in the PT
1980  *
1981  * @adev: amdgpu_device pointer
1982  * @vm: requested vm
1983  * @fence: optional resulting fence (unchanged if no work needed to be done
1984  * or if an error occurred)
1985  *
1986  * Make sure all freed BOs are cleared in the PT.
1987  * Returns 0 for success.
1988  *
1989  * PTs have to be reserved and mutex must be locked!
1990  */
1991 int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
1992                           struct amdgpu_vm *vm,
1993                           struct dma_fence **fence)
1994 {
1995         struct amdgpu_bo_va_mapping *mapping;
1996         struct dma_fence *f = NULL;
1997         int r;
1998
1999         while (!list_empty(&vm->freed)) {
2000                 mapping = list_first_entry(&vm->freed,
2001                         struct amdgpu_bo_va_mapping, list);
2002                 list_del(&mapping->list);
2003
2004                 r = amdgpu_vm_bo_update_mapping(adev, NULL, 0, NULL, vm,
2005                                                 mapping->start, mapping->last,
2006                                                 0, 0, &f);
2007                 amdgpu_vm_free_mapping(adev, vm, mapping, f);
2008                 if (r) {
2009                         dma_fence_put(f);
2010                         return r;
2011                 }
2012         }
2013
2014         if (fence && f) {
2015                 dma_fence_put(*fence);
2016                 *fence = f;
2017         } else {
2018                 dma_fence_put(f);
2019         }
2020
2021         return 0;
2022
2023 }
2024
2025 /**
2026  * amdgpu_vm_clear_invalids - clear invalidated BOs in the PT
2027  *
2028  * @adev: amdgpu_device pointer
2029  * @vm: requested vm
2030  *
2031  * Make sure all invalidated BOs are cleared in the PT.
2032  * Returns 0 for success.
2033  *
2034  * PTs have to be reserved and mutex must be locked!
2035  */
2036 int amdgpu_vm_clear_invalids(struct amdgpu_device *adev,
2037                              struct amdgpu_vm *vm, struct amdgpu_sync *sync)
2038 {
2039         struct amdgpu_bo_va *bo_va = NULL;
2040         int r = 0;
2041
2042         spin_lock(&vm->status_lock);
2043         while (!list_empty(&vm->invalidated)) {
2044                 bo_va = list_first_entry(&vm->invalidated,
2045                         struct amdgpu_bo_va, vm_status);
2046                 spin_unlock(&vm->status_lock);
2047
2048                 r = amdgpu_vm_bo_update(adev, bo_va, true);
2049                 if (r)
2050                         return r;
2051
2052                 spin_lock(&vm->status_lock);
2053         }
2054         spin_unlock(&vm->status_lock);
2055
2056         if (bo_va)
2057                 r = amdgpu_sync_fence(adev, sync, bo_va->last_pt_update);
2058
2059         return r;
2060 }
2061
2062 /**
2063  * amdgpu_vm_bo_add - add a bo to a specific vm
2064  *
2065  * @adev: amdgpu_device pointer
2066  * @vm: requested vm
2067  * @bo: amdgpu buffer object
2068  *
2069  * Add @bo into the requested vm.
2070  * Add @bo to the list of bos associated with the vm
2071  * Returns newly added bo_va or NULL for failure
2072  *
2073  * Object has to be reserved!
2074  */
2075 struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
2076                                       struct amdgpu_vm *vm,
2077                                       struct amdgpu_bo *bo)
2078 {
2079         struct amdgpu_bo_va *bo_va;
2080
2081         bo_va = kzalloc(sizeof(struct amdgpu_bo_va), GFP_KERNEL);
2082         if (bo_va == NULL) {
2083                 return NULL;
2084         }
2085         bo_va->vm = vm;
2086         bo_va->bo = bo;
2087         bo_va->ref_count = 1;
2088         INIT_LIST_HEAD(&bo_va->bo_list);
2089         INIT_LIST_HEAD(&bo_va->valids);
2090         INIT_LIST_HEAD(&bo_va->invalids);
2091         INIT_LIST_HEAD(&bo_va->vm_status);
2092
2093         if (bo)
2094                 list_add_tail(&bo_va->bo_list, &bo->va);
2095
2096         return bo_va;
2097 }
2098
2099 /**
2100  * amdgpu_vm_bo_map - map bo inside a vm
2101  *
2102  * @adev: amdgpu_device pointer
2103  * @bo_va: bo_va to store the address
2104  * @saddr: where to map the BO
2105  * @offset: requested offset in the BO
2106  * @flags: attributes of pages (read/write/valid/etc.)
2107  *
2108  * Add a mapping of the BO at the specefied addr into the VM.
2109  * Returns 0 for success, error for failure.
2110  *
2111  * Object has to be reserved and unreserved outside!
2112  */
2113 int amdgpu_vm_bo_map(struct amdgpu_device *adev,
2114                      struct amdgpu_bo_va *bo_va,
2115                      uint64_t saddr, uint64_t offset,
2116                      uint64_t size, uint64_t flags)
2117 {
2118         struct amdgpu_bo_va_mapping *mapping, *tmp;
2119         struct amdgpu_vm *vm = bo_va->vm;
2120         uint64_t eaddr;
2121
2122         /* validate the parameters */
2123         if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK ||
2124             size == 0 || size & AMDGPU_GPU_PAGE_MASK)
2125                 return -EINVAL;
2126
2127         /* make sure object fit at this offset */
2128         eaddr = saddr + size - 1;
2129         if (saddr >= eaddr ||
2130             (bo_va->bo && offset + size > amdgpu_bo_size(bo_va->bo)))
2131                 return -EINVAL;
2132
2133         saddr /= AMDGPU_GPU_PAGE_SIZE;
2134         eaddr /= AMDGPU_GPU_PAGE_SIZE;
2135
2136         tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
2137         if (tmp) {
2138                 /* bo and tmp overlap, invalid addr */
2139                 dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with "
2140                         "0x%010Lx-0x%010Lx\n", bo_va->bo, saddr, eaddr,
2141                         tmp->start, tmp->last + 1);
2142                 return -EINVAL;
2143         }
2144
2145         mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
2146         if (!mapping)
2147                 return -ENOMEM;
2148
2149         INIT_LIST_HEAD(&mapping->list);
2150         mapping->start = saddr;
2151         mapping->last = eaddr;
2152         mapping->offset = offset;
2153         mapping->flags = flags;
2154
2155         list_add(&mapping->list, &bo_va->invalids);
2156         amdgpu_vm_it_insert(mapping, &vm->va);
2157
2158         if (flags & AMDGPU_PTE_PRT)
2159                 amdgpu_vm_prt_get(adev);
2160
2161         return 0;
2162 }
2163
2164 /**
2165  * amdgpu_vm_bo_replace_map - map bo inside a vm, replacing existing mappings
2166  *
2167  * @adev: amdgpu_device pointer
2168  * @bo_va: bo_va to store the address
2169  * @saddr: where to map the BO
2170  * @offset: requested offset in the BO
2171  * @flags: attributes of pages (read/write/valid/etc.)
2172  *
2173  * Add a mapping of the BO at the specefied addr into the VM. Replace existing
2174  * mappings as we do so.
2175  * Returns 0 for success, error for failure.
2176  *
2177  * Object has to be reserved and unreserved outside!
2178  */
2179 int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
2180                              struct amdgpu_bo_va *bo_va,
2181                              uint64_t saddr, uint64_t offset,
2182                              uint64_t size, uint64_t flags)
2183 {
2184         struct amdgpu_bo_va_mapping *mapping;
2185         struct amdgpu_vm *vm = bo_va->vm;
2186         uint64_t eaddr;
2187         int r;
2188
2189         /* validate the parameters */
2190         if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK ||
2191             size == 0 || size & AMDGPU_GPU_PAGE_MASK)
2192                 return -EINVAL;
2193
2194         /* make sure object fit at this offset */
2195         eaddr = saddr + size - 1;
2196         if (saddr >= eaddr ||
2197             (bo_va->bo && offset + size > amdgpu_bo_size(bo_va->bo)))
2198                 return -EINVAL;
2199
2200         /* Allocate all the needed memory */
2201         mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
2202         if (!mapping)
2203                 return -ENOMEM;
2204
2205         r = amdgpu_vm_bo_clear_mappings(adev, bo_va->vm, saddr, size);
2206         if (r) {
2207                 kfree(mapping);
2208                 return r;
2209         }
2210
2211         saddr /= AMDGPU_GPU_PAGE_SIZE;
2212         eaddr /= AMDGPU_GPU_PAGE_SIZE;
2213
2214         mapping->start = saddr;
2215         mapping->last = eaddr;
2216         mapping->offset = offset;
2217         mapping->flags = flags;
2218
2219         list_add(&mapping->list, &bo_va->invalids);
2220         amdgpu_vm_it_insert(mapping, &vm->va);
2221
2222         if (flags & AMDGPU_PTE_PRT)
2223                 amdgpu_vm_prt_get(adev);
2224
2225         return 0;
2226 }
2227
2228 /**
2229  * amdgpu_vm_bo_unmap - remove bo mapping from vm
2230  *
2231  * @adev: amdgpu_device pointer
2232  * @bo_va: bo_va to remove the address from
2233  * @saddr: where to the BO is mapped
2234  *
2235  * Remove a mapping of the BO at the specefied addr from the VM.
2236  * Returns 0 for success, error for failure.
2237  *
2238  * Object has to be reserved and unreserved outside!
2239  */
2240 int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
2241                        struct amdgpu_bo_va *bo_va,
2242                        uint64_t saddr)
2243 {
2244         struct amdgpu_bo_va_mapping *mapping;
2245         struct amdgpu_vm *vm = bo_va->vm;
2246         bool valid = true;
2247
2248         saddr /= AMDGPU_GPU_PAGE_SIZE;
2249
2250         list_for_each_entry(mapping, &bo_va->valids, list) {
2251                 if (mapping->start == saddr)
2252                         break;
2253         }
2254
2255         if (&mapping->list == &bo_va->valids) {
2256                 valid = false;
2257
2258                 list_for_each_entry(mapping, &bo_va->invalids, list) {
2259                         if (mapping->start == saddr)
2260                                 break;
2261                 }
2262
2263                 if (&mapping->list == &bo_va->invalids)
2264                         return -ENOENT;
2265         }
2266
2267         list_del(&mapping->list);
2268         amdgpu_vm_it_remove(mapping, &vm->va);
2269         trace_amdgpu_vm_bo_unmap(bo_va, mapping);
2270
2271         if (valid)
2272                 list_add(&mapping->list, &vm->freed);
2273         else
2274                 amdgpu_vm_free_mapping(adev, vm, mapping,
2275                                        bo_va->last_pt_update);
2276
2277         return 0;
2278 }
2279
2280 /**
2281  * amdgpu_vm_bo_clear_mappings - remove all mappings in a specific range
2282  *
2283  * @adev: amdgpu_device pointer
2284  * @vm: VM structure to use
2285  * @saddr: start of the range
2286  * @size: size of the range
2287  *
2288  * Remove all mappings in a range, split them as appropriate.
2289  * Returns 0 for success, error for failure.
2290  */
2291 int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
2292                                 struct amdgpu_vm *vm,
2293                                 uint64_t saddr, uint64_t size)
2294 {
2295         struct amdgpu_bo_va_mapping *before, *after, *tmp, *next;
2296         LIST_HEAD(removed);
2297         uint64_t eaddr;
2298
2299         eaddr = saddr + size - 1;
2300         saddr /= AMDGPU_GPU_PAGE_SIZE;
2301         eaddr /= AMDGPU_GPU_PAGE_SIZE;
2302
2303         /* Allocate all the needed memory */
2304         before = kzalloc(sizeof(*before), GFP_KERNEL);
2305         if (!before)
2306                 return -ENOMEM;
2307         INIT_LIST_HEAD(&before->list);
2308
2309         after = kzalloc(sizeof(*after), GFP_KERNEL);
2310         if (!after) {
2311                 kfree(before);
2312                 return -ENOMEM;
2313         }
2314         INIT_LIST_HEAD(&after->list);
2315
2316         /* Now gather all removed mappings */
2317         tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
2318         while (tmp) {
2319                 /* Remember mapping split at the start */
2320                 if (tmp->start < saddr) {
2321                         before->start = tmp->start;
2322                         before->last = saddr - 1;
2323                         before->offset = tmp->offset;
2324                         before->flags = tmp->flags;
2325                         list_add(&before->list, &tmp->list);
2326                 }
2327
2328                 /* Remember mapping split at the end */
2329                 if (tmp->last > eaddr) {
2330                         after->start = eaddr + 1;
2331                         after->last = tmp->last;
2332                         after->offset = tmp->offset;
2333                         after->offset += after->start - tmp->start;
2334                         after->flags = tmp->flags;
2335                         list_add(&after->list, &tmp->list);
2336                 }
2337
2338                 list_del(&tmp->list);
2339                 list_add(&tmp->list, &removed);
2340
2341                 tmp = amdgpu_vm_it_iter_next(tmp, saddr, eaddr);
2342         }
2343
2344         /* And free them up */
2345         list_for_each_entry_safe(tmp, next, &removed, list) {
2346                 amdgpu_vm_it_remove(tmp, &vm->va);
2347                 list_del(&tmp->list);
2348
2349                 if (tmp->start < saddr)
2350                     tmp->start = saddr;
2351                 if (tmp->last > eaddr)
2352                     tmp->last = eaddr;
2353
2354                 list_add(&tmp->list, &vm->freed);
2355                 trace_amdgpu_vm_bo_unmap(NULL, tmp);
2356         }
2357
2358         /* Insert partial mapping before the range */
2359         if (!list_empty(&before->list)) {
2360                 amdgpu_vm_it_insert(before, &vm->va);
2361                 if (before->flags & AMDGPU_PTE_PRT)
2362                         amdgpu_vm_prt_get(adev);
2363         } else {
2364                 kfree(before);
2365         }
2366
2367         /* Insert partial mapping after the range */
2368         if (!list_empty(&after->list)) {
2369                 amdgpu_vm_it_insert(after, &vm->va);
2370                 if (after->flags & AMDGPU_PTE_PRT)
2371                         amdgpu_vm_prt_get(adev);
2372         } else {
2373                 kfree(after);
2374         }
2375
2376         return 0;
2377 }
2378
2379 /**
2380  * amdgpu_vm_bo_rmv - remove a bo to a specific vm
2381  *
2382  * @adev: amdgpu_device pointer
2383  * @bo_va: requested bo_va
2384  *
2385  * Remove @bo_va->bo from the requested vm.
2386  *
2387  * Object have to be reserved!
2388  */
2389 void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
2390                       struct amdgpu_bo_va *bo_va)
2391 {
2392         struct amdgpu_bo_va_mapping *mapping, *next;
2393         struct amdgpu_vm *vm = bo_va->vm;
2394
2395         list_del(&bo_va->bo_list);
2396
2397         spin_lock(&vm->status_lock);
2398         list_del(&bo_va->vm_status);
2399         spin_unlock(&vm->status_lock);
2400
2401         list_for_each_entry_safe(mapping, next, &bo_va->valids, list) {
2402                 list_del(&mapping->list);
2403                 amdgpu_vm_it_remove(mapping, &vm->va);
2404                 trace_amdgpu_vm_bo_unmap(bo_va, mapping);
2405                 list_add(&mapping->list, &vm->freed);
2406         }
2407         list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) {
2408                 list_del(&mapping->list);
2409                 amdgpu_vm_it_remove(mapping, &vm->va);
2410                 amdgpu_vm_free_mapping(adev, vm, mapping,
2411                                        bo_va->last_pt_update);
2412         }
2413
2414         dma_fence_put(bo_va->last_pt_update);
2415         kfree(bo_va);
2416 }
2417
2418 /**
2419  * amdgpu_vm_bo_invalidate - mark the bo as invalid
2420  *
2421  * @adev: amdgpu_device pointer
2422  * @vm: requested vm
2423  * @bo: amdgpu buffer object
2424  *
2425  * Mark @bo as invalid.
2426  */
2427 void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
2428                              struct amdgpu_bo *bo)
2429 {
2430         struct amdgpu_bo_va *bo_va;
2431
2432         list_for_each_entry(bo_va, &bo->va, bo_list) {
2433                 spin_lock(&bo_va->vm->status_lock);
2434                 if (list_empty(&bo_va->vm_status))
2435                         list_add(&bo_va->vm_status, &bo_va->vm->invalidated);
2436                 spin_unlock(&bo_va->vm->status_lock);
2437         }
2438 }
2439
2440 static uint32_t amdgpu_vm_get_block_size(uint64_t vm_size)
2441 {
2442         /* Total bits covered by PD + PTs */
2443         unsigned bits = ilog2(vm_size) + 18;
2444
2445         /* Make sure the PD is 4K in size up to 8GB address space.
2446            Above that split equal between PD and PTs */
2447         if (vm_size <= 8)
2448                 return (bits - 9);
2449         else
2450                 return ((bits + 3) / 2);
2451 }
2452
2453 /**
2454  * amdgpu_vm_adjust_size - adjust vm size and block size
2455  *
2456  * @adev: amdgpu_device pointer
2457  * @vm_size: the default vm size if it's set auto
2458  */
2459 void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint64_t vm_size)
2460 {
2461         /* adjust vm size firstly */
2462         if (amdgpu_vm_size == -1)
2463                 adev->vm_manager.vm_size = vm_size;
2464         else
2465                 adev->vm_manager.vm_size = amdgpu_vm_size;
2466
2467         /* block size depends on vm size */
2468         if (amdgpu_vm_block_size == -1)
2469                 adev->vm_manager.block_size =
2470                         amdgpu_vm_get_block_size(adev->vm_manager.vm_size);
2471         else
2472                 adev->vm_manager.block_size = amdgpu_vm_block_size;
2473
2474         DRM_INFO("vm size is %llu GB, block size is %u-bit\n",
2475                 adev->vm_manager.vm_size, adev->vm_manager.block_size);
2476 }
2477
2478 /**
2479  * amdgpu_vm_init - initialize a vm instance
2480  *
2481  * @adev: amdgpu_device pointer
2482  * @vm: requested vm
2483  * @vm_context: Indicates if it GFX or Compute context
2484  *
2485  * Init @vm fields.
2486  */
2487 int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
2488                    int vm_context)
2489 {
2490         const unsigned align = min(AMDGPU_VM_PTB_ALIGN_SIZE,
2491                 AMDGPU_VM_PTE_COUNT(adev) * 8);
2492         unsigned ring_instance;
2493         struct amdgpu_ring *ring;
2494         struct amd_sched_rq *rq;
2495         int r, i;
2496         u64 flags;
2497
2498         vm->va = RB_ROOT;
2499         vm->client_id = atomic64_inc_return(&adev->vm_manager.client_counter);
2500         for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
2501                 vm->reserved_vmid[i] = NULL;
2502         spin_lock_init(&vm->status_lock);
2503         INIT_LIST_HEAD(&vm->invalidated);
2504         INIT_LIST_HEAD(&vm->cleared);
2505         INIT_LIST_HEAD(&vm->freed);
2506
2507         /* create scheduler entity for page table updates */
2508
2509         ring_instance = atomic_inc_return(&adev->vm_manager.vm_pte_next_ring);
2510         ring_instance %= adev->vm_manager.vm_pte_num_rings;
2511         ring = adev->vm_manager.vm_pte_rings[ring_instance];
2512         rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_KERNEL];
2513         r = amd_sched_entity_init(&ring->sched, &vm->entity,
2514                                   rq, amdgpu_sched_jobs);
2515         if (r)
2516                 return r;
2517
2518         if (vm_context == AMDGPU_VM_CONTEXT_COMPUTE)
2519                 vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
2520                                                 AMDGPU_VM_USE_CPU_FOR_COMPUTE);
2521         else
2522                 vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
2523                                                 AMDGPU_VM_USE_CPU_FOR_GFX);
2524         DRM_DEBUG_DRIVER("VM update mode is %s\n",
2525                          vm->use_cpu_for_update ? "CPU" : "SDMA");
2526         WARN_ONCE((vm->use_cpu_for_update & !amdgpu_vm_is_large_bar(adev)),
2527                   "CPU update of VM recommended only for large BAR system\n");
2528         vm->last_dir_update = NULL;
2529
2530         flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
2531                         AMDGPU_GEM_CREATE_VRAM_CLEARED;
2532         if (vm->use_cpu_for_update)
2533                 flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
2534         else
2535                 flags |= (AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
2536                                 AMDGPU_GEM_CREATE_SHADOW);
2537
2538         r = amdgpu_bo_create(adev, amdgpu_vm_bo_size(adev, 0), align, true,
2539                              AMDGPU_GEM_DOMAIN_VRAM,
2540                              flags,
2541                              NULL, NULL, &vm->root.bo);
2542         if (r)
2543                 goto error_free_sched_entity;
2544
2545         r = amdgpu_bo_reserve(vm->root.bo, false);
2546         if (r)
2547                 goto error_free_root;
2548
2549         vm->last_eviction_counter = atomic64_read(&adev->num_evictions);
2550
2551         if (vm->use_cpu_for_update) {
2552                 r = amdgpu_bo_kmap(vm->root.bo, NULL);
2553                 if (r)
2554                         goto error_free_root;
2555         }
2556
2557         amdgpu_bo_unreserve(vm->root.bo);
2558
2559         return 0;
2560
2561 error_free_root:
2562         amdgpu_bo_unref(&vm->root.bo->shadow);
2563         amdgpu_bo_unref(&vm->root.bo);
2564         vm->root.bo = NULL;
2565
2566 error_free_sched_entity:
2567         amd_sched_entity_fini(&ring->sched, &vm->entity);
2568
2569         return r;
2570 }
2571
2572 /**
2573  * amdgpu_vm_free_levels - free PD/PT levels
2574  *
2575  * @level: PD/PT starting level to free
2576  *
2577  * Free the page directory or page table level and all sub levels.
2578  */
2579 static void amdgpu_vm_free_levels(struct amdgpu_vm_pt *level)
2580 {
2581         unsigned i;
2582
2583         if (level->bo) {
2584                 amdgpu_bo_unref(&level->bo->shadow);
2585                 amdgpu_bo_unref(&level->bo);
2586         }
2587
2588         if (level->entries)
2589                 for (i = 0; i <= level->last_entry_used; i++)
2590                         amdgpu_vm_free_levels(&level->entries[i]);
2591
2592         kvfree(level->entries);
2593 }
2594
2595 /**
2596  * amdgpu_vm_fini - tear down a vm instance
2597  *
2598  * @adev: amdgpu_device pointer
2599  * @vm: requested vm
2600  *
2601  * Tear down @vm.
2602  * Unbind the VM and remove all bos from the vm bo list
2603  */
2604 void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
2605 {
2606         struct amdgpu_bo_va_mapping *mapping, *tmp;
2607         bool prt_fini_needed = !!adev->gart.gart_funcs->set_prt;
2608         int i;
2609
2610         amd_sched_entity_fini(vm->entity.sched, &vm->entity);
2611
2612         if (!RB_EMPTY_ROOT(&vm->va)) {
2613                 dev_err(adev->dev, "still active bo inside vm\n");
2614         }
2615         rbtree_postorder_for_each_entry_safe(mapping, tmp, &vm->va, rb) {
2616                 list_del(&mapping->list);
2617                 amdgpu_vm_it_remove(mapping, &vm->va);
2618                 kfree(mapping);
2619         }
2620         list_for_each_entry_safe(mapping, tmp, &vm->freed, list) {
2621                 if (mapping->flags & AMDGPU_PTE_PRT && prt_fini_needed) {
2622                         amdgpu_vm_prt_fini(adev, vm);
2623                         prt_fini_needed = false;
2624                 }
2625
2626                 list_del(&mapping->list);
2627                 amdgpu_vm_free_mapping(adev, vm, mapping, NULL);
2628         }
2629
2630         amdgpu_vm_free_levels(&vm->root);
2631         dma_fence_put(vm->last_dir_update);
2632         for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
2633                 amdgpu_vm_free_reserved_vmid(adev, vm, i);
2634 }
2635
2636 /**
2637  * amdgpu_vm_manager_init - init the VM manager
2638  *
2639  * @adev: amdgpu_device pointer
2640  *
2641  * Initialize the VM manager structures
2642  */
2643 void amdgpu_vm_manager_init(struct amdgpu_device *adev)
2644 {
2645         unsigned i, j;
2646
2647         for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
2648                 struct amdgpu_vm_id_manager *id_mgr =
2649                         &adev->vm_manager.id_mgr[i];
2650
2651                 mutex_init(&id_mgr->lock);
2652                 INIT_LIST_HEAD(&id_mgr->ids_lru);
2653                 atomic_set(&id_mgr->reserved_vmid_num, 0);
2654
2655                 /* skip over VMID 0, since it is the system VM */
2656                 for (j = 1; j < id_mgr->num_ids; ++j) {
2657                         amdgpu_vm_reset_id(adev, i, j);
2658                         amdgpu_sync_create(&id_mgr->ids[i].active);
2659                         list_add_tail(&id_mgr->ids[j].list, &id_mgr->ids_lru);
2660                 }
2661         }
2662
2663         adev->vm_manager.fence_context =
2664                 dma_fence_context_alloc(AMDGPU_MAX_RINGS);
2665         for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
2666                 adev->vm_manager.seqno[i] = 0;
2667
2668         atomic_set(&adev->vm_manager.vm_pte_next_ring, 0);
2669         atomic64_set(&adev->vm_manager.client_counter, 0);
2670         spin_lock_init(&adev->vm_manager.prt_lock);
2671         atomic_set(&adev->vm_manager.num_prt_users, 0);
2672
2673         /* If not overridden by the user, by default, only in large BAR systems
2674          * Compute VM tables will be updated by CPU
2675          */
2676 #ifdef CONFIG_X86_64
2677         if (amdgpu_vm_update_mode == -1) {
2678                 if (amdgpu_vm_is_large_bar(adev))
2679                         adev->vm_manager.vm_update_mode =
2680                                 AMDGPU_VM_USE_CPU_FOR_COMPUTE;
2681                 else
2682                         adev->vm_manager.vm_update_mode = 0;
2683         } else
2684                 adev->vm_manager.vm_update_mode = amdgpu_vm_update_mode;
2685 #else
2686         adev->vm_manager.vm_update_mode = 0;
2687 #endif
2688
2689 }
2690
2691 /**
2692  * amdgpu_vm_manager_fini - cleanup VM manager
2693  *
2694  * @adev: amdgpu_device pointer
2695  *
2696  * Cleanup the VM manager and free resources.
2697  */
2698 void amdgpu_vm_manager_fini(struct amdgpu_device *adev)
2699 {
2700         unsigned i, j;
2701
2702         for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
2703                 struct amdgpu_vm_id_manager *id_mgr =
2704                         &adev->vm_manager.id_mgr[i];
2705
2706                 mutex_destroy(&id_mgr->lock);
2707                 for (j = 0; j < AMDGPU_NUM_VM; ++j) {
2708                         struct amdgpu_vm_id *id = &id_mgr->ids[j];
2709
2710                         amdgpu_sync_free(&id->active);
2711                         dma_fence_put(id->flushed_updates);
2712                         dma_fence_put(id->last_flush);
2713                 }
2714         }
2715 }
2716
2717 int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
2718 {
2719         union drm_amdgpu_vm *args = data;
2720         struct amdgpu_device *adev = dev->dev_private;
2721         struct amdgpu_fpriv *fpriv = filp->driver_priv;
2722         int r;
2723
2724         switch (args->in.op) {
2725         case AMDGPU_VM_OP_RESERVE_VMID:
2726                 /* current, we only have requirement to reserve vmid from gfxhub */
2727                 r = amdgpu_vm_alloc_reserved_vmid(adev, &fpriv->vm,
2728                                                   AMDGPU_GFXHUB);
2729                 if (r)
2730                         return r;
2731                 break;
2732         case AMDGPU_VM_OP_UNRESERVE_VMID:
2733                 amdgpu_vm_free_reserved_vmid(adev, &fpriv->vm, AMDGPU_GFXHUB);
2734                 break;
2735         default:
2736                 return -EINVAL;
2737         }
2738
2739         return 0;
2740 }