Merge tag 'kvmarm-fixes-5.11-2' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-microblaze.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_amdkfd_gpuvm.c
1 /*
2  * Copyright 2014-2018 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22 #include <linux/dma-buf.h>
23 #include <linux/list.h>
24 #include <linux/pagemap.h>
25 #include <linux/sched/mm.h>
26 #include <linux/sched/task.h>
27
28 #include "amdgpu_object.h"
29 #include "amdgpu_vm.h"
30 #include "amdgpu_amdkfd.h"
31 #include "amdgpu_dma_buf.h"
32 #include <uapi/linux/kfd_ioctl.h>
33
34 /* BO flag to indicate a KFD userptr BO */
35 #define AMDGPU_AMDKFD_USERPTR_BO (1ULL << 63)
36
37 /* Userptr restore delay, just long enough to allow consecutive VM
38  * changes to accumulate
39  */
40 #define AMDGPU_USERPTR_RESTORE_DELAY_MS 1
41
42 /* Impose limit on how much memory KFD can use */
43 static struct {
44         uint64_t max_system_mem_limit;
45         uint64_t max_ttm_mem_limit;
46         int64_t system_mem_used;
47         int64_t ttm_mem_used;
48         spinlock_t mem_limit_lock;
49 } kfd_mem_limit;
50
51 /* Struct used for amdgpu_amdkfd_bo_validate */
52 struct amdgpu_vm_parser {
53         uint32_t        domain;
54         bool            wait;
55 };
56
57 static const char * const domain_bit_to_string[] = {
58                 "CPU",
59                 "GTT",
60                 "VRAM",
61                 "GDS",
62                 "GWS",
63                 "OA"
64 };
65
66 #define domain_string(domain) domain_bit_to_string[ffs(domain)-1]
67
68 static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work);
69
70
71 static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
72 {
73         return (struct amdgpu_device *)kgd;
74 }
75
76 static bool check_if_add_bo_to_vm(struct amdgpu_vm *avm,
77                 struct kgd_mem *mem)
78 {
79         struct kfd_bo_va_list *entry;
80
81         list_for_each_entry(entry, &mem->bo_va_list, bo_list)
82                 if (entry->bo_va->base.vm == avm)
83                         return false;
84
85         return true;
86 }
87
88 /* Set memory usage limits. Current, limits are
89  *  System (TTM + userptr) memory - 15/16th System RAM
90  *  TTM memory - 3/8th System RAM
91  */
92 void amdgpu_amdkfd_gpuvm_init_mem_limits(void)
93 {
94         struct sysinfo si;
95         uint64_t mem;
96
97         si_meminfo(&si);
98         mem = si.totalram - si.totalhigh;
99         mem *= si.mem_unit;
100
101         spin_lock_init(&kfd_mem_limit.mem_limit_lock);
102         kfd_mem_limit.max_system_mem_limit = mem - (mem >> 4);
103         kfd_mem_limit.max_ttm_mem_limit = (mem >> 1) - (mem >> 3);
104         pr_debug("Kernel memory limit %lluM, TTM limit %lluM\n",
105                 (kfd_mem_limit.max_system_mem_limit >> 20),
106                 (kfd_mem_limit.max_ttm_mem_limit >> 20));
107 }
108
109 /* Estimate page table size needed to represent a given memory size
110  *
111  * With 4KB pages, we need one 8 byte PTE for each 4KB of memory
112  * (factor 512, >> 9). With 2MB pages, we need one 8 byte PTE for 2MB
113  * of memory (factor 256K, >> 18). ROCm user mode tries to optimize
114  * for 2MB pages for TLB efficiency. However, small allocations and
115  * fragmented system memory still need some 4KB pages. We choose a
116  * compromise that should work in most cases without reserving too
117  * much memory for page tables unnecessarily (factor 16K, >> 14).
118  */
119 #define ESTIMATE_PT_SIZE(mem_size) ((mem_size) >> 14)
120
121 static int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
122                 uint64_t size, u32 domain, bool sg)
123 {
124         uint64_t reserved_for_pt =
125                 ESTIMATE_PT_SIZE(amdgpu_amdkfd_total_mem_size);
126         size_t acc_size, system_mem_needed, ttm_mem_needed, vram_needed;
127         int ret = 0;
128
129         acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size,
130                                        sizeof(struct amdgpu_bo));
131
132         vram_needed = 0;
133         if (domain == AMDGPU_GEM_DOMAIN_GTT) {
134                 /* TTM GTT memory */
135                 system_mem_needed = acc_size + size;
136                 ttm_mem_needed = acc_size + size;
137         } else if (domain == AMDGPU_GEM_DOMAIN_CPU && !sg) {
138                 /* Userptr */
139                 system_mem_needed = acc_size + size;
140                 ttm_mem_needed = acc_size;
141         } else {
142                 /* VRAM and SG */
143                 system_mem_needed = acc_size;
144                 ttm_mem_needed = acc_size;
145                 if (domain == AMDGPU_GEM_DOMAIN_VRAM)
146                         vram_needed = size;
147         }
148
149         spin_lock(&kfd_mem_limit.mem_limit_lock);
150
151         if (kfd_mem_limit.system_mem_used + system_mem_needed >
152             kfd_mem_limit.max_system_mem_limit)
153                 pr_debug("Set no_system_mem_limit=1 if using shared memory\n");
154
155         if ((kfd_mem_limit.system_mem_used + system_mem_needed >
156              kfd_mem_limit.max_system_mem_limit && !no_system_mem_limit) ||
157             (kfd_mem_limit.ttm_mem_used + ttm_mem_needed >
158              kfd_mem_limit.max_ttm_mem_limit) ||
159             (adev->kfd.vram_used + vram_needed >
160              adev->gmc.real_vram_size - reserved_for_pt)) {
161                 ret = -ENOMEM;
162         } else {
163                 kfd_mem_limit.system_mem_used += system_mem_needed;
164                 kfd_mem_limit.ttm_mem_used += ttm_mem_needed;
165                 adev->kfd.vram_used += vram_needed;
166         }
167
168         spin_unlock(&kfd_mem_limit.mem_limit_lock);
169         return ret;
170 }
171
172 static void unreserve_mem_limit(struct amdgpu_device *adev,
173                 uint64_t size, u32 domain, bool sg)
174 {
175         size_t acc_size;
176
177         acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size,
178                                        sizeof(struct amdgpu_bo));
179
180         spin_lock(&kfd_mem_limit.mem_limit_lock);
181         if (domain == AMDGPU_GEM_DOMAIN_GTT) {
182                 kfd_mem_limit.system_mem_used -= (acc_size + size);
183                 kfd_mem_limit.ttm_mem_used -= (acc_size + size);
184         } else if (domain == AMDGPU_GEM_DOMAIN_CPU && !sg) {
185                 kfd_mem_limit.system_mem_used -= (acc_size + size);
186                 kfd_mem_limit.ttm_mem_used -= acc_size;
187         } else {
188                 kfd_mem_limit.system_mem_used -= acc_size;
189                 kfd_mem_limit.ttm_mem_used -= acc_size;
190                 if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
191                         adev->kfd.vram_used -= size;
192                         WARN_ONCE(adev->kfd.vram_used < 0,
193                                   "kfd VRAM memory accounting unbalanced");
194                 }
195         }
196         WARN_ONCE(kfd_mem_limit.system_mem_used < 0,
197                   "kfd system memory accounting unbalanced");
198         WARN_ONCE(kfd_mem_limit.ttm_mem_used < 0,
199                   "kfd TTM memory accounting unbalanced");
200
201         spin_unlock(&kfd_mem_limit.mem_limit_lock);
202 }
203
204 void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo)
205 {
206         struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
207         u32 domain = bo->preferred_domains;
208         bool sg = (bo->preferred_domains == AMDGPU_GEM_DOMAIN_CPU);
209
210         if (bo->flags & AMDGPU_AMDKFD_USERPTR_BO) {
211                 domain = AMDGPU_GEM_DOMAIN_CPU;
212                 sg = false;
213         }
214
215         unreserve_mem_limit(adev, amdgpu_bo_size(bo), domain, sg);
216 }
217
218
219 /* amdgpu_amdkfd_remove_eviction_fence - Removes eviction fence from BO's
220  *  reservation object.
221  *
222  * @bo: [IN] Remove eviction fence(s) from this BO
223  * @ef: [IN] This eviction fence is removed if it
224  *  is present in the shared list.
225  *
226  * NOTE: Must be called with BO reserved i.e. bo->tbo.resv->lock held.
227  */
228 static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
229                                         struct amdgpu_amdkfd_fence *ef)
230 {
231         struct dma_resv *resv = bo->tbo.base.resv;
232         struct dma_resv_list *old, *new;
233         unsigned int i, j, k;
234
235         if (!ef)
236                 return -EINVAL;
237
238         old = dma_resv_get_list(resv);
239         if (!old)
240                 return 0;
241
242         new = kmalloc(struct_size(new, shared, old->shared_max), GFP_KERNEL);
243         if (!new)
244                 return -ENOMEM;
245
246         /* Go through all the shared fences in the resevation object and sort
247          * the interesting ones to the end of the list.
248          */
249         for (i = 0, j = old->shared_count, k = 0; i < old->shared_count; ++i) {
250                 struct dma_fence *f;
251
252                 f = rcu_dereference_protected(old->shared[i],
253                                               dma_resv_held(resv));
254
255                 if (f->context == ef->base.context)
256                         RCU_INIT_POINTER(new->shared[--j], f);
257                 else
258                         RCU_INIT_POINTER(new->shared[k++], f);
259         }
260         new->shared_max = old->shared_max;
261         new->shared_count = k;
262
263         /* Install the new fence list, seqcount provides the barriers */
264         write_seqcount_begin(&resv->seq);
265         RCU_INIT_POINTER(resv->fence, new);
266         write_seqcount_end(&resv->seq);
267
268         /* Drop the references to the removed fences or move them to ef_list */
269         for (i = j, k = 0; i < old->shared_count; ++i) {
270                 struct dma_fence *f;
271
272                 f = rcu_dereference_protected(new->shared[i],
273                                               dma_resv_held(resv));
274                 dma_fence_put(f);
275         }
276         kfree_rcu(old, rcu);
277
278         return 0;
279 }
280
281 int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo)
282 {
283         struct amdgpu_bo *root = bo;
284         struct amdgpu_vm_bo_base *vm_bo;
285         struct amdgpu_vm *vm;
286         struct amdkfd_process_info *info;
287         struct amdgpu_amdkfd_fence *ef;
288         int ret;
289
290         /* we can always get vm_bo from root PD bo.*/
291         while (root->parent)
292                 root = root->parent;
293
294         vm_bo = root->vm_bo;
295         if (!vm_bo)
296                 return 0;
297
298         vm = vm_bo->vm;
299         if (!vm)
300                 return 0;
301
302         info = vm->process_info;
303         if (!info || !info->eviction_fence)
304                 return 0;
305
306         ef = container_of(dma_fence_get(&info->eviction_fence->base),
307                         struct amdgpu_amdkfd_fence, base);
308
309         BUG_ON(!dma_resv_trylock(bo->tbo.base.resv));
310         ret = amdgpu_amdkfd_remove_eviction_fence(bo, ef);
311         dma_resv_unlock(bo->tbo.base.resv);
312
313         dma_fence_put(&ef->base);
314         return ret;
315 }
316
317 static int amdgpu_amdkfd_bo_validate(struct amdgpu_bo *bo, uint32_t domain,
318                                      bool wait)
319 {
320         struct ttm_operation_ctx ctx = { false, false };
321         int ret;
322
323         if (WARN(amdgpu_ttm_tt_get_usermm(bo->tbo.ttm),
324                  "Called with userptr BO"))
325                 return -EINVAL;
326
327         amdgpu_bo_placement_from_domain(bo, domain);
328
329         ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
330         if (ret)
331                 goto validate_fail;
332         if (wait)
333                 amdgpu_bo_sync_wait(bo, AMDGPU_FENCE_OWNER_KFD, false);
334
335 validate_fail:
336         return ret;
337 }
338
339 static int amdgpu_amdkfd_validate(void *param, struct amdgpu_bo *bo)
340 {
341         struct amdgpu_vm_parser *p = param;
342
343         return amdgpu_amdkfd_bo_validate(bo, p->domain, p->wait);
344 }
345
346 /* vm_validate_pt_pd_bos - Validate page table and directory BOs
347  *
348  * Page directories are not updated here because huge page handling
349  * during page table updates can invalidate page directory entries
350  * again. Page directories are only updated after updating page
351  * tables.
352  */
353 static int vm_validate_pt_pd_bos(struct amdgpu_vm *vm)
354 {
355         struct amdgpu_bo *pd = vm->root.base.bo;
356         struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
357         struct amdgpu_vm_parser param;
358         int ret;
359
360         param.domain = AMDGPU_GEM_DOMAIN_VRAM;
361         param.wait = false;
362
363         ret = amdgpu_vm_validate_pt_bos(adev, vm, amdgpu_amdkfd_validate,
364                                         &param);
365         if (ret) {
366                 pr_err("failed to validate PT BOs\n");
367                 return ret;
368         }
369
370         ret = amdgpu_amdkfd_validate(&param, pd);
371         if (ret) {
372                 pr_err("failed to validate PD\n");
373                 return ret;
374         }
375
376         vm->pd_phys_addr = amdgpu_gmc_pd_addr(vm->root.base.bo);
377
378         if (vm->use_cpu_for_update) {
379                 ret = amdgpu_bo_kmap(pd, NULL);
380                 if (ret) {
381                         pr_err("failed to kmap PD, ret=%d\n", ret);
382                         return ret;
383                 }
384         }
385
386         return 0;
387 }
388
389 static int vm_update_pds(struct amdgpu_vm *vm, struct amdgpu_sync *sync)
390 {
391         struct amdgpu_bo *pd = vm->root.base.bo;
392         struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
393         int ret;
394
395         ret = amdgpu_vm_update_pdes(adev, vm, false);
396         if (ret)
397                 return ret;
398
399         return amdgpu_sync_fence(sync, vm->last_update);
400 }
401
402 static uint64_t get_pte_flags(struct amdgpu_device *adev, struct kgd_mem *mem)
403 {
404         struct amdgpu_device *bo_adev = amdgpu_ttm_adev(mem->bo->tbo.bdev);
405         bool coherent = mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_COHERENT;
406         uint32_t mapping_flags;
407
408         mapping_flags = AMDGPU_VM_PAGE_READABLE;
409         if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE)
410                 mapping_flags |= AMDGPU_VM_PAGE_WRITEABLE;
411         if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE)
412                 mapping_flags |= AMDGPU_VM_PAGE_EXECUTABLE;
413
414         switch (adev->asic_type) {
415         case CHIP_ARCTURUS:
416                 if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
417                         if (bo_adev == adev)
418                                 mapping_flags |= coherent ?
419                                         AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW;
420                         else
421                                 mapping_flags |= AMDGPU_VM_MTYPE_UC;
422                 } else {
423                         mapping_flags |= coherent ?
424                                 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
425                 }
426                 break;
427         default:
428                 mapping_flags |= coherent ?
429                         AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
430         }
431
432         return amdgpu_gem_va_map_flags(adev, mapping_flags);
433 }
434
435 /* add_bo_to_vm - Add a BO to a VM
436  *
437  * Everything that needs to bo done only once when a BO is first added
438  * to a VM. It can later be mapped and unmapped many times without
439  * repeating these steps.
440  *
441  * 1. Allocate and initialize BO VA entry data structure
442  * 2. Add BO to the VM
443  * 3. Determine ASIC-specific PTE flags
444  * 4. Alloc page tables and directories if needed
445  * 4a.  Validate new page tables and directories
446  */
447 static int add_bo_to_vm(struct amdgpu_device *adev, struct kgd_mem *mem,
448                 struct amdgpu_vm *vm, bool is_aql,
449                 struct kfd_bo_va_list **p_bo_va_entry)
450 {
451         int ret;
452         struct kfd_bo_va_list *bo_va_entry;
453         struct amdgpu_bo *bo = mem->bo;
454         uint64_t va = mem->va;
455         struct list_head *list_bo_va = &mem->bo_va_list;
456         unsigned long bo_size = bo->tbo.mem.size;
457
458         if (!va) {
459                 pr_err("Invalid VA when adding BO to VM\n");
460                 return -EINVAL;
461         }
462
463         if (is_aql)
464                 va += bo_size;
465
466         bo_va_entry = kzalloc(sizeof(*bo_va_entry), GFP_KERNEL);
467         if (!bo_va_entry)
468                 return -ENOMEM;
469
470         pr_debug("\t add VA 0x%llx - 0x%llx to vm %p\n", va,
471                         va + bo_size, vm);
472
473         /* Add BO to VM internal data structures*/
474         bo_va_entry->bo_va = amdgpu_vm_bo_add(adev, vm, bo);
475         if (!bo_va_entry->bo_va) {
476                 ret = -EINVAL;
477                 pr_err("Failed to add BO object to VM. ret == %d\n",
478                                 ret);
479                 goto err_vmadd;
480         }
481
482         bo_va_entry->va = va;
483         bo_va_entry->pte_flags = get_pte_flags(adev, mem);
484         bo_va_entry->kgd_dev = (void *)adev;
485         list_add(&bo_va_entry->bo_list, list_bo_va);
486
487         if (p_bo_va_entry)
488                 *p_bo_va_entry = bo_va_entry;
489
490         /* Allocate validate page tables if needed */
491         ret = vm_validate_pt_pd_bos(vm);
492         if (ret) {
493                 pr_err("validate_pt_pd_bos() failed\n");
494                 goto err_alloc_pts;
495         }
496
497         return 0;
498
499 err_alloc_pts:
500         amdgpu_vm_bo_rmv(adev, bo_va_entry->bo_va);
501         list_del(&bo_va_entry->bo_list);
502 err_vmadd:
503         kfree(bo_va_entry);
504         return ret;
505 }
506
507 static void remove_bo_from_vm(struct amdgpu_device *adev,
508                 struct kfd_bo_va_list *entry, unsigned long size)
509 {
510         pr_debug("\t remove VA 0x%llx - 0x%llx in entry %p\n",
511                         entry->va,
512                         entry->va + size, entry);
513         amdgpu_vm_bo_rmv(adev, entry->bo_va);
514         list_del(&entry->bo_list);
515         kfree(entry);
516 }
517
518 static void add_kgd_mem_to_kfd_bo_list(struct kgd_mem *mem,
519                                 struct amdkfd_process_info *process_info,
520                                 bool userptr)
521 {
522         struct ttm_validate_buffer *entry = &mem->validate_list;
523         struct amdgpu_bo *bo = mem->bo;
524
525         INIT_LIST_HEAD(&entry->head);
526         entry->num_shared = 1;
527         entry->bo = &bo->tbo;
528         mutex_lock(&process_info->lock);
529         if (userptr)
530                 list_add_tail(&entry->head, &process_info->userptr_valid_list);
531         else
532                 list_add_tail(&entry->head, &process_info->kfd_bo_list);
533         mutex_unlock(&process_info->lock);
534 }
535
536 static void remove_kgd_mem_from_kfd_bo_list(struct kgd_mem *mem,
537                 struct amdkfd_process_info *process_info)
538 {
539         struct ttm_validate_buffer *bo_list_entry;
540
541         bo_list_entry = &mem->validate_list;
542         mutex_lock(&process_info->lock);
543         list_del(&bo_list_entry->head);
544         mutex_unlock(&process_info->lock);
545 }
546
547 /* Initializes user pages. It registers the MMU notifier and validates
548  * the userptr BO in the GTT domain.
549  *
550  * The BO must already be on the userptr_valid_list. Otherwise an
551  * eviction and restore may happen that leaves the new BO unmapped
552  * with the user mode queues running.
553  *
554  * Takes the process_info->lock to protect against concurrent restore
555  * workers.
556  *
557  * Returns 0 for success, negative errno for errors.
558  */
559 static int init_user_pages(struct kgd_mem *mem, uint64_t user_addr)
560 {
561         struct amdkfd_process_info *process_info = mem->process_info;
562         struct amdgpu_bo *bo = mem->bo;
563         struct ttm_operation_ctx ctx = { true, false };
564         int ret = 0;
565
566         mutex_lock(&process_info->lock);
567
568         ret = amdgpu_ttm_tt_set_userptr(&bo->tbo, user_addr, 0);
569         if (ret) {
570                 pr_err("%s: Failed to set userptr: %d\n", __func__, ret);
571                 goto out;
572         }
573
574         ret = amdgpu_mn_register(bo, user_addr);
575         if (ret) {
576                 pr_err("%s: Failed to register MMU notifier: %d\n",
577                        __func__, ret);
578                 goto out;
579         }
580
581         ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages);
582         if (ret) {
583                 pr_err("%s: Failed to get user pages: %d\n", __func__, ret);
584                 goto unregister_out;
585         }
586
587         ret = amdgpu_bo_reserve(bo, true);
588         if (ret) {
589                 pr_err("%s: Failed to reserve BO\n", __func__);
590                 goto release_out;
591         }
592         amdgpu_bo_placement_from_domain(bo, mem->domain);
593         ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
594         if (ret)
595                 pr_err("%s: failed to validate BO\n", __func__);
596         amdgpu_bo_unreserve(bo);
597
598 release_out:
599         amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
600 unregister_out:
601         if (ret)
602                 amdgpu_mn_unregister(bo);
603 out:
604         mutex_unlock(&process_info->lock);
605         return ret;
606 }
607
608 /* Reserving a BO and its page table BOs must happen atomically to
609  * avoid deadlocks. Some operations update multiple VMs at once. Track
610  * all the reservation info in a context structure. Optionally a sync
611  * object can track VM updates.
612  */
613 struct bo_vm_reservation_context {
614         struct amdgpu_bo_list_entry kfd_bo; /* BO list entry for the KFD BO */
615         unsigned int n_vms;                 /* Number of VMs reserved       */
616         struct amdgpu_bo_list_entry *vm_pd; /* Array of VM BO list entries  */
617         struct ww_acquire_ctx ticket;       /* Reservation ticket           */
618         struct list_head list, duplicates;  /* BO lists                     */
619         struct amdgpu_sync *sync;           /* Pointer to sync object       */
620         bool reserved;                      /* Whether BOs are reserved     */
621 };
622
623 enum bo_vm_match {
624         BO_VM_NOT_MAPPED = 0,   /* Match VMs where a BO is not mapped */
625         BO_VM_MAPPED,           /* Match VMs where a BO is mapped     */
626         BO_VM_ALL,              /* Match all VMs a BO was added to    */
627 };
628
629 /**
630  * reserve_bo_and_vm - reserve a BO and a VM unconditionally.
631  * @mem: KFD BO structure.
632  * @vm: the VM to reserve.
633  * @ctx: the struct that will be used in unreserve_bo_and_vms().
634  */
635 static int reserve_bo_and_vm(struct kgd_mem *mem,
636                               struct amdgpu_vm *vm,
637                               struct bo_vm_reservation_context *ctx)
638 {
639         struct amdgpu_bo *bo = mem->bo;
640         int ret;
641
642         WARN_ON(!vm);
643
644         ctx->reserved = false;
645         ctx->n_vms = 1;
646         ctx->sync = &mem->sync;
647
648         INIT_LIST_HEAD(&ctx->list);
649         INIT_LIST_HEAD(&ctx->duplicates);
650
651         ctx->vm_pd = kcalloc(ctx->n_vms, sizeof(*ctx->vm_pd), GFP_KERNEL);
652         if (!ctx->vm_pd)
653                 return -ENOMEM;
654
655         ctx->kfd_bo.priority = 0;
656         ctx->kfd_bo.tv.bo = &bo->tbo;
657         ctx->kfd_bo.tv.num_shared = 1;
658         list_add(&ctx->kfd_bo.tv.head, &ctx->list);
659
660         amdgpu_vm_get_pd_bo(vm, &ctx->list, &ctx->vm_pd[0]);
661
662         ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list,
663                                      false, &ctx->duplicates);
664         if (ret) {
665                 pr_err("Failed to reserve buffers in ttm.\n");
666                 kfree(ctx->vm_pd);
667                 ctx->vm_pd = NULL;
668                 return ret;
669         }
670
671         ctx->reserved = true;
672         return 0;
673 }
674
675 /**
676  * reserve_bo_and_cond_vms - reserve a BO and some VMs conditionally
677  * @mem: KFD BO structure.
678  * @vm: the VM to reserve. If NULL, then all VMs associated with the BO
679  * is used. Otherwise, a single VM associated with the BO.
680  * @map_type: the mapping status that will be used to filter the VMs.
681  * @ctx: the struct that will be used in unreserve_bo_and_vms().
682  *
683  * Returns 0 for success, negative for failure.
684  */
685 static int reserve_bo_and_cond_vms(struct kgd_mem *mem,
686                                 struct amdgpu_vm *vm, enum bo_vm_match map_type,
687                                 struct bo_vm_reservation_context *ctx)
688 {
689         struct amdgpu_bo *bo = mem->bo;
690         struct kfd_bo_va_list *entry;
691         unsigned int i;
692         int ret;
693
694         ctx->reserved = false;
695         ctx->n_vms = 0;
696         ctx->vm_pd = NULL;
697         ctx->sync = &mem->sync;
698
699         INIT_LIST_HEAD(&ctx->list);
700         INIT_LIST_HEAD(&ctx->duplicates);
701
702         list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
703                 if ((vm && vm != entry->bo_va->base.vm) ||
704                         (entry->is_mapped != map_type
705                         && map_type != BO_VM_ALL))
706                         continue;
707
708                 ctx->n_vms++;
709         }
710
711         if (ctx->n_vms != 0) {
712                 ctx->vm_pd = kcalloc(ctx->n_vms, sizeof(*ctx->vm_pd),
713                                      GFP_KERNEL);
714                 if (!ctx->vm_pd)
715                         return -ENOMEM;
716         }
717
718         ctx->kfd_bo.priority = 0;
719         ctx->kfd_bo.tv.bo = &bo->tbo;
720         ctx->kfd_bo.tv.num_shared = 1;
721         list_add(&ctx->kfd_bo.tv.head, &ctx->list);
722
723         i = 0;
724         list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
725                 if ((vm && vm != entry->bo_va->base.vm) ||
726                         (entry->is_mapped != map_type
727                         && map_type != BO_VM_ALL))
728                         continue;
729
730                 amdgpu_vm_get_pd_bo(entry->bo_va->base.vm, &ctx->list,
731                                 &ctx->vm_pd[i]);
732                 i++;
733         }
734
735         ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list,
736                                      false, &ctx->duplicates);
737         if (ret) {
738                 pr_err("Failed to reserve buffers in ttm.\n");
739                 kfree(ctx->vm_pd);
740                 ctx->vm_pd = NULL;
741                 return ret;
742         }
743
744         ctx->reserved = true;
745         return 0;
746 }
747
748 /**
749  * unreserve_bo_and_vms - Unreserve BO and VMs from a reservation context
750  * @ctx: Reservation context to unreserve
751  * @wait: Optionally wait for a sync object representing pending VM updates
752  * @intr: Whether the wait is interruptible
753  *
754  * Also frees any resources allocated in
755  * reserve_bo_and_(cond_)vm(s). Returns the status from
756  * amdgpu_sync_wait.
757  */
758 static int unreserve_bo_and_vms(struct bo_vm_reservation_context *ctx,
759                                  bool wait, bool intr)
760 {
761         int ret = 0;
762
763         if (wait)
764                 ret = amdgpu_sync_wait(ctx->sync, intr);
765
766         if (ctx->reserved)
767                 ttm_eu_backoff_reservation(&ctx->ticket, &ctx->list);
768         kfree(ctx->vm_pd);
769
770         ctx->sync = NULL;
771
772         ctx->reserved = false;
773         ctx->vm_pd = NULL;
774
775         return ret;
776 }
777
778 static int unmap_bo_from_gpuvm(struct amdgpu_device *adev,
779                                 struct kfd_bo_va_list *entry,
780                                 struct amdgpu_sync *sync)
781 {
782         struct amdgpu_bo_va *bo_va = entry->bo_va;
783         struct amdgpu_vm *vm = bo_va->base.vm;
784
785         amdgpu_vm_bo_unmap(adev, bo_va, entry->va);
786
787         amdgpu_vm_clear_freed(adev, vm, &bo_va->last_pt_update);
788
789         amdgpu_sync_fence(sync, bo_va->last_pt_update);
790
791         return 0;
792 }
793
794 static int update_gpuvm_pte(struct amdgpu_device *adev,
795                 struct kfd_bo_va_list *entry,
796                 struct amdgpu_sync *sync)
797 {
798         int ret;
799         struct amdgpu_bo_va *bo_va = entry->bo_va;
800
801         /* Update the page tables  */
802         ret = amdgpu_vm_bo_update(adev, bo_va, false);
803         if (ret) {
804                 pr_err("amdgpu_vm_bo_update failed\n");
805                 return ret;
806         }
807
808         return amdgpu_sync_fence(sync, bo_va->last_pt_update);
809 }
810
811 static int map_bo_to_gpuvm(struct amdgpu_device *adev,
812                 struct kfd_bo_va_list *entry, struct amdgpu_sync *sync,
813                 bool no_update_pte)
814 {
815         int ret;
816
817         /* Set virtual address for the allocation */
818         ret = amdgpu_vm_bo_map(adev, entry->bo_va, entry->va, 0,
819                                amdgpu_bo_size(entry->bo_va->base.bo),
820                                entry->pte_flags);
821         if (ret) {
822                 pr_err("Failed to map VA 0x%llx in vm. ret %d\n",
823                                 entry->va, ret);
824                 return ret;
825         }
826
827         if (no_update_pte)
828                 return 0;
829
830         ret = update_gpuvm_pte(adev, entry, sync);
831         if (ret) {
832                 pr_err("update_gpuvm_pte() failed\n");
833                 goto update_gpuvm_pte_failed;
834         }
835
836         return 0;
837
838 update_gpuvm_pte_failed:
839         unmap_bo_from_gpuvm(adev, entry, sync);
840         return ret;
841 }
842
843 static struct sg_table *create_doorbell_sg(uint64_t addr, uint32_t size)
844 {
845         struct sg_table *sg = kmalloc(sizeof(*sg), GFP_KERNEL);
846
847         if (!sg)
848                 return NULL;
849         if (sg_alloc_table(sg, 1, GFP_KERNEL)) {
850                 kfree(sg);
851                 return NULL;
852         }
853         sg->sgl->dma_address = addr;
854         sg->sgl->length = size;
855 #ifdef CONFIG_NEED_SG_DMA_LENGTH
856         sg->sgl->dma_length = size;
857 #endif
858         return sg;
859 }
860
861 static int process_validate_vms(struct amdkfd_process_info *process_info)
862 {
863         struct amdgpu_vm *peer_vm;
864         int ret;
865
866         list_for_each_entry(peer_vm, &process_info->vm_list_head,
867                             vm_list_node) {
868                 ret = vm_validate_pt_pd_bos(peer_vm);
869                 if (ret)
870                         return ret;
871         }
872
873         return 0;
874 }
875
876 static int process_sync_pds_resv(struct amdkfd_process_info *process_info,
877                                  struct amdgpu_sync *sync)
878 {
879         struct amdgpu_vm *peer_vm;
880         int ret;
881
882         list_for_each_entry(peer_vm, &process_info->vm_list_head,
883                             vm_list_node) {
884                 struct amdgpu_bo *pd = peer_vm->root.base.bo;
885
886                 ret = amdgpu_sync_resv(NULL, sync, pd->tbo.base.resv,
887                                        AMDGPU_SYNC_NE_OWNER,
888                                        AMDGPU_FENCE_OWNER_KFD);
889                 if (ret)
890                         return ret;
891         }
892
893         return 0;
894 }
895
896 static int process_update_pds(struct amdkfd_process_info *process_info,
897                               struct amdgpu_sync *sync)
898 {
899         struct amdgpu_vm *peer_vm;
900         int ret;
901
902         list_for_each_entry(peer_vm, &process_info->vm_list_head,
903                             vm_list_node) {
904                 ret = vm_update_pds(peer_vm, sync);
905                 if (ret)
906                         return ret;
907         }
908
909         return 0;
910 }
911
912 static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info,
913                        struct dma_fence **ef)
914 {
915         struct amdkfd_process_info *info = NULL;
916         int ret;
917
918         if (!*process_info) {
919                 info = kzalloc(sizeof(*info), GFP_KERNEL);
920                 if (!info)
921                         return -ENOMEM;
922
923                 mutex_init(&info->lock);
924                 INIT_LIST_HEAD(&info->vm_list_head);
925                 INIT_LIST_HEAD(&info->kfd_bo_list);
926                 INIT_LIST_HEAD(&info->userptr_valid_list);
927                 INIT_LIST_HEAD(&info->userptr_inval_list);
928
929                 info->eviction_fence =
930                         amdgpu_amdkfd_fence_create(dma_fence_context_alloc(1),
931                                                    current->mm);
932                 if (!info->eviction_fence) {
933                         pr_err("Failed to create eviction fence\n");
934                         ret = -ENOMEM;
935                         goto create_evict_fence_fail;
936                 }
937
938                 info->pid = get_task_pid(current->group_leader, PIDTYPE_PID);
939                 atomic_set(&info->evicted_bos, 0);
940                 INIT_DELAYED_WORK(&info->restore_userptr_work,
941                                   amdgpu_amdkfd_restore_userptr_worker);
942
943                 *process_info = info;
944                 *ef = dma_fence_get(&info->eviction_fence->base);
945         }
946
947         vm->process_info = *process_info;
948
949         /* Validate page directory and attach eviction fence */
950         ret = amdgpu_bo_reserve(vm->root.base.bo, true);
951         if (ret)
952                 goto reserve_pd_fail;
953         ret = vm_validate_pt_pd_bos(vm);
954         if (ret) {
955                 pr_err("validate_pt_pd_bos() failed\n");
956                 goto validate_pd_fail;
957         }
958         ret = amdgpu_bo_sync_wait(vm->root.base.bo,
959                                   AMDGPU_FENCE_OWNER_KFD, false);
960         if (ret)
961                 goto wait_pd_fail;
962         ret = dma_resv_reserve_shared(vm->root.base.bo->tbo.base.resv, 1);
963         if (ret)
964                 goto reserve_shared_fail;
965         amdgpu_bo_fence(vm->root.base.bo,
966                         &vm->process_info->eviction_fence->base, true);
967         amdgpu_bo_unreserve(vm->root.base.bo);
968
969         /* Update process info */
970         mutex_lock(&vm->process_info->lock);
971         list_add_tail(&vm->vm_list_node,
972                         &(vm->process_info->vm_list_head));
973         vm->process_info->n_vms++;
974         mutex_unlock(&vm->process_info->lock);
975
976         return 0;
977
978 reserve_shared_fail:
979 wait_pd_fail:
980 validate_pd_fail:
981         amdgpu_bo_unreserve(vm->root.base.bo);
982 reserve_pd_fail:
983         vm->process_info = NULL;
984         if (info) {
985                 /* Two fence references: one in info and one in *ef */
986                 dma_fence_put(&info->eviction_fence->base);
987                 dma_fence_put(*ef);
988                 *ef = NULL;
989                 *process_info = NULL;
990                 put_pid(info->pid);
991 create_evict_fence_fail:
992                 mutex_destroy(&info->lock);
993                 kfree(info);
994         }
995         return ret;
996 }
997
998 int amdgpu_amdkfd_gpuvm_create_process_vm(struct kgd_dev *kgd, u32 pasid,
999                                           void **vm, void **process_info,
1000                                           struct dma_fence **ef)
1001 {
1002         struct amdgpu_device *adev = get_amdgpu_device(kgd);
1003         struct amdgpu_vm *new_vm;
1004         int ret;
1005
1006         new_vm = kzalloc(sizeof(*new_vm), GFP_KERNEL);
1007         if (!new_vm)
1008                 return -ENOMEM;
1009
1010         /* Initialize AMDGPU part of the VM */
1011         ret = amdgpu_vm_init(adev, new_vm, AMDGPU_VM_CONTEXT_COMPUTE, pasid);
1012         if (ret) {
1013                 pr_err("Failed init vm ret %d\n", ret);
1014                 goto amdgpu_vm_init_fail;
1015         }
1016
1017         /* Initialize KFD part of the VM and process info */
1018         ret = init_kfd_vm(new_vm, process_info, ef);
1019         if (ret)
1020                 goto init_kfd_vm_fail;
1021
1022         *vm = (void *) new_vm;
1023
1024         return 0;
1025
1026 init_kfd_vm_fail:
1027         amdgpu_vm_fini(adev, new_vm);
1028 amdgpu_vm_init_fail:
1029         kfree(new_vm);
1030         return ret;
1031 }
1032
1033 int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct kgd_dev *kgd,
1034                                            struct file *filp, u32 pasid,
1035                                            void **vm, void **process_info,
1036                                            struct dma_fence **ef)
1037 {
1038         struct amdgpu_device *adev = get_amdgpu_device(kgd);
1039         struct drm_file *drm_priv = filp->private_data;
1040         struct amdgpu_fpriv *drv_priv = drm_priv->driver_priv;
1041         struct amdgpu_vm *avm = &drv_priv->vm;
1042         int ret;
1043
1044         /* Already a compute VM? */
1045         if (avm->process_info)
1046                 return -EINVAL;
1047
1048         /* Convert VM into a compute VM */
1049         ret = amdgpu_vm_make_compute(adev, avm, pasid);
1050         if (ret)
1051                 return ret;
1052
1053         /* Initialize KFD part of the VM and process info */
1054         ret = init_kfd_vm(avm, process_info, ef);
1055         if (ret)
1056                 return ret;
1057
1058         *vm = (void *)avm;
1059
1060         return 0;
1061 }
1062
1063 void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
1064                                     struct amdgpu_vm *vm)
1065 {
1066         struct amdkfd_process_info *process_info = vm->process_info;
1067         struct amdgpu_bo *pd = vm->root.base.bo;
1068
1069         if (!process_info)
1070                 return;
1071
1072         /* Release eviction fence from PD */
1073         amdgpu_bo_reserve(pd, false);
1074         amdgpu_bo_fence(pd, NULL, false);
1075         amdgpu_bo_unreserve(pd);
1076
1077         /* Update process info */
1078         mutex_lock(&process_info->lock);
1079         process_info->n_vms--;
1080         list_del(&vm->vm_list_node);
1081         mutex_unlock(&process_info->lock);
1082
1083         vm->process_info = NULL;
1084
1085         /* Release per-process resources when last compute VM is destroyed */
1086         if (!process_info->n_vms) {
1087                 WARN_ON(!list_empty(&process_info->kfd_bo_list));
1088                 WARN_ON(!list_empty(&process_info->userptr_valid_list));
1089                 WARN_ON(!list_empty(&process_info->userptr_inval_list));
1090
1091                 dma_fence_put(&process_info->eviction_fence->base);
1092                 cancel_delayed_work_sync(&process_info->restore_userptr_work);
1093                 put_pid(process_info->pid);
1094                 mutex_destroy(&process_info->lock);
1095                 kfree(process_info);
1096         }
1097 }
1098
1099 void amdgpu_amdkfd_gpuvm_destroy_process_vm(struct kgd_dev *kgd, void *vm)
1100 {
1101         struct amdgpu_device *adev = get_amdgpu_device(kgd);
1102         struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1103
1104         if (WARN_ON(!kgd || !vm))
1105                 return;
1106
1107         pr_debug("Destroying process vm %p\n", vm);
1108
1109         /* Release the VM context */
1110         amdgpu_vm_fini(adev, avm);
1111         kfree(vm);
1112 }
1113
1114 void amdgpu_amdkfd_gpuvm_release_process_vm(struct kgd_dev *kgd, void *vm)
1115 {
1116         struct amdgpu_device *adev = get_amdgpu_device(kgd);
1117         struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1118
1119         if (WARN_ON(!kgd || !vm))
1120                 return;
1121
1122         pr_debug("Releasing process vm %p\n", vm);
1123
1124         /* The original pasid of amdgpu vm has already been
1125          * released during making a amdgpu vm to a compute vm
1126          * The current pasid is managed by kfd and will be
1127          * released on kfd process destroy. Set amdgpu pasid
1128          * to 0 to avoid duplicate release.
1129          */
1130         amdgpu_vm_release_compute(adev, avm);
1131 }
1132
1133 uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *vm)
1134 {
1135         struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1136         struct amdgpu_bo *pd = avm->root.base.bo;
1137         struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
1138
1139         if (adev->asic_type < CHIP_VEGA10)
1140                 return avm->pd_phys_addr >> AMDGPU_GPU_PAGE_SHIFT;
1141         return avm->pd_phys_addr;
1142 }
1143
1144 int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
1145                 struct kgd_dev *kgd, uint64_t va, uint64_t size,
1146                 void *vm, struct kgd_mem **mem,
1147                 uint64_t *offset, uint32_t flags)
1148 {
1149         struct amdgpu_device *adev = get_amdgpu_device(kgd);
1150         struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1151         enum ttm_bo_type bo_type = ttm_bo_type_device;
1152         struct sg_table *sg = NULL;
1153         uint64_t user_addr = 0;
1154         struct amdgpu_bo *bo;
1155         struct amdgpu_bo_param bp;
1156         u32 domain, alloc_domain;
1157         u64 alloc_flags;
1158         int ret;
1159
1160         /*
1161          * Check on which domain to allocate BO
1162          */
1163         if (flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
1164                 domain = alloc_domain = AMDGPU_GEM_DOMAIN_VRAM;
1165                 alloc_flags = AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE;
1166                 alloc_flags |= (flags & KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC) ?
1167                         AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED :
1168                         AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
1169         } else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_GTT) {
1170                 domain = alloc_domain = AMDGPU_GEM_DOMAIN_GTT;
1171                 alloc_flags = 0;
1172         } else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) {
1173                 domain = AMDGPU_GEM_DOMAIN_GTT;
1174                 alloc_domain = AMDGPU_GEM_DOMAIN_CPU;
1175                 alloc_flags = 0;
1176                 if (!offset || !*offset)
1177                         return -EINVAL;
1178                 user_addr = untagged_addr(*offset);
1179         } else if (flags & (KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL |
1180                         KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)) {
1181                 domain = AMDGPU_GEM_DOMAIN_GTT;
1182                 alloc_domain = AMDGPU_GEM_DOMAIN_CPU;
1183                 bo_type = ttm_bo_type_sg;
1184                 alloc_flags = 0;
1185                 if (size > UINT_MAX)
1186                         return -EINVAL;
1187                 sg = create_doorbell_sg(*offset, size);
1188                 if (!sg)
1189                         return -ENOMEM;
1190         } else {
1191                 return -EINVAL;
1192         }
1193
1194         *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
1195         if (!*mem) {
1196                 ret = -ENOMEM;
1197                 goto err;
1198         }
1199         INIT_LIST_HEAD(&(*mem)->bo_va_list);
1200         mutex_init(&(*mem)->lock);
1201         (*mem)->aql_queue = !!(flags & KFD_IOC_ALLOC_MEM_FLAGS_AQL_QUEUE_MEM);
1202
1203         /* Workaround for AQL queue wraparound bug. Map the same
1204          * memory twice. That means we only actually allocate half
1205          * the memory.
1206          */
1207         if ((*mem)->aql_queue)
1208                 size = size >> 1;
1209
1210         (*mem)->alloc_flags = flags;
1211
1212         amdgpu_sync_create(&(*mem)->sync);
1213
1214         ret = amdgpu_amdkfd_reserve_mem_limit(adev, size, alloc_domain, !!sg);
1215         if (ret) {
1216                 pr_debug("Insufficient memory\n");
1217                 goto err_reserve_limit;
1218         }
1219
1220         pr_debug("\tcreate BO VA 0x%llx size 0x%llx domain %s\n",
1221                         va, size, domain_string(alloc_domain));
1222
1223         memset(&bp, 0, sizeof(bp));
1224         bp.size = size;
1225         bp.byte_align = 1;
1226         bp.domain = alloc_domain;
1227         bp.flags = alloc_flags;
1228         bp.type = bo_type;
1229         bp.resv = NULL;
1230         ret = amdgpu_bo_create(adev, &bp, &bo);
1231         if (ret) {
1232                 pr_debug("Failed to create BO on domain %s. ret %d\n",
1233                                 domain_string(alloc_domain), ret);
1234                 goto err_bo_create;
1235         }
1236         if (bo_type == ttm_bo_type_sg) {
1237                 bo->tbo.sg = sg;
1238                 bo->tbo.ttm->sg = sg;
1239         }
1240         bo->kfd_bo = *mem;
1241         (*mem)->bo = bo;
1242         if (user_addr)
1243                 bo->flags |= AMDGPU_AMDKFD_USERPTR_BO;
1244
1245         (*mem)->va = va;
1246         (*mem)->domain = domain;
1247         (*mem)->mapped_to_gpu_memory = 0;
1248         (*mem)->process_info = avm->process_info;
1249         add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, user_addr);
1250
1251         if (user_addr) {
1252                 ret = init_user_pages(*mem, user_addr);
1253                 if (ret)
1254                         goto allocate_init_user_pages_failed;
1255         }
1256
1257         if (offset)
1258                 *offset = amdgpu_bo_mmap_offset(bo);
1259
1260         return 0;
1261
1262 allocate_init_user_pages_failed:
1263         remove_kgd_mem_from_kfd_bo_list(*mem, avm->process_info);
1264         amdgpu_bo_unref(&bo);
1265         /* Don't unreserve system mem limit twice */
1266         goto err_reserve_limit;
1267 err_bo_create:
1268         unreserve_mem_limit(adev, size, alloc_domain, !!sg);
1269 err_reserve_limit:
1270         mutex_destroy(&(*mem)->lock);
1271         kfree(*mem);
1272 err:
1273         if (sg) {
1274                 sg_free_table(sg);
1275                 kfree(sg);
1276         }
1277         return ret;
1278 }
1279
1280 int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
1281                 struct kgd_dev *kgd, struct kgd_mem *mem, uint64_t *size)
1282 {
1283         struct amdkfd_process_info *process_info = mem->process_info;
1284         unsigned long bo_size = mem->bo->tbo.mem.size;
1285         struct kfd_bo_va_list *entry, *tmp;
1286         struct bo_vm_reservation_context ctx;
1287         struct ttm_validate_buffer *bo_list_entry;
1288         unsigned int mapped_to_gpu_memory;
1289         int ret;
1290         bool is_imported = false;
1291
1292         mutex_lock(&mem->lock);
1293         mapped_to_gpu_memory = mem->mapped_to_gpu_memory;
1294         is_imported = mem->is_imported;
1295         mutex_unlock(&mem->lock);
1296         /* lock is not needed after this, since mem is unused and will
1297          * be freed anyway
1298          */
1299
1300         if (mapped_to_gpu_memory > 0) {
1301                 pr_debug("BO VA 0x%llx size 0x%lx is still mapped.\n",
1302                                 mem->va, bo_size);
1303                 return -EBUSY;
1304         }
1305
1306         /* Make sure restore workers don't access the BO any more */
1307         bo_list_entry = &mem->validate_list;
1308         mutex_lock(&process_info->lock);
1309         list_del(&bo_list_entry->head);
1310         mutex_unlock(&process_info->lock);
1311
1312         /* No more MMU notifiers */
1313         amdgpu_mn_unregister(mem->bo);
1314
1315         ret = reserve_bo_and_cond_vms(mem, NULL, BO_VM_ALL, &ctx);
1316         if (unlikely(ret))
1317                 return ret;
1318
1319         /* The eviction fence should be removed by the last unmap.
1320          * TODO: Log an error condition if the bo still has the eviction fence
1321          * attached
1322          */
1323         amdgpu_amdkfd_remove_eviction_fence(mem->bo,
1324                                         process_info->eviction_fence);
1325         pr_debug("Release VA 0x%llx - 0x%llx\n", mem->va,
1326                 mem->va + bo_size * (1 + mem->aql_queue));
1327
1328         /* Remove from VM internal data structures */
1329         list_for_each_entry_safe(entry, tmp, &mem->bo_va_list, bo_list)
1330                 remove_bo_from_vm((struct amdgpu_device *)entry->kgd_dev,
1331                                 entry, bo_size);
1332
1333         ret = unreserve_bo_and_vms(&ctx, false, false);
1334
1335         /* Free the sync object */
1336         amdgpu_sync_free(&mem->sync);
1337
1338         /* If the SG is not NULL, it's one we created for a doorbell or mmio
1339          * remap BO. We need to free it.
1340          */
1341         if (mem->bo->tbo.sg) {
1342                 sg_free_table(mem->bo->tbo.sg);
1343                 kfree(mem->bo->tbo.sg);
1344         }
1345
1346         /* Update the size of the BO being freed if it was allocated from
1347          * VRAM and is not imported.
1348          */
1349         if (size) {
1350                 if ((mem->bo->preferred_domains == AMDGPU_GEM_DOMAIN_VRAM) &&
1351                     (!is_imported))
1352                         *size = bo_size;
1353                 else
1354                         *size = 0;
1355         }
1356
1357         /* Free the BO*/
1358         drm_gem_object_put(&mem->bo->tbo.base);
1359         mutex_destroy(&mem->lock);
1360         kfree(mem);
1361
1362         return ret;
1363 }
1364
1365 int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
1366                 struct kgd_dev *kgd, struct kgd_mem *mem, void *vm)
1367 {
1368         struct amdgpu_device *adev = get_amdgpu_device(kgd);
1369         struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1370         int ret;
1371         struct amdgpu_bo *bo;
1372         uint32_t domain;
1373         struct kfd_bo_va_list *entry;
1374         struct bo_vm_reservation_context ctx;
1375         struct kfd_bo_va_list *bo_va_entry = NULL;
1376         struct kfd_bo_va_list *bo_va_entry_aql = NULL;
1377         unsigned long bo_size;
1378         bool is_invalid_userptr = false;
1379
1380         bo = mem->bo;
1381         if (!bo) {
1382                 pr_err("Invalid BO when mapping memory to GPU\n");
1383                 return -EINVAL;
1384         }
1385
1386         /* Make sure restore is not running concurrently. Since we
1387          * don't map invalid userptr BOs, we rely on the next restore
1388          * worker to do the mapping
1389          */
1390         mutex_lock(&mem->process_info->lock);
1391
1392         /* Lock mmap-sem. If we find an invalid userptr BO, we can be
1393          * sure that the MMU notifier is no longer running
1394          * concurrently and the queues are actually stopped
1395          */
1396         if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
1397                 mmap_write_lock(current->mm);
1398                 is_invalid_userptr = atomic_read(&mem->invalid);
1399                 mmap_write_unlock(current->mm);
1400         }
1401
1402         mutex_lock(&mem->lock);
1403
1404         domain = mem->domain;
1405         bo_size = bo->tbo.mem.size;
1406
1407         pr_debug("Map VA 0x%llx - 0x%llx to vm %p domain %s\n",
1408                         mem->va,
1409                         mem->va + bo_size * (1 + mem->aql_queue),
1410                         vm, domain_string(domain));
1411
1412         ret = reserve_bo_and_vm(mem, vm, &ctx);
1413         if (unlikely(ret))
1414                 goto out;
1415
1416         /* Userptr can be marked as "not invalid", but not actually be
1417          * validated yet (still in the system domain). In that case
1418          * the queues are still stopped and we can leave mapping for
1419          * the next restore worker
1420          */
1421         if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) &&
1422             bo->tbo.mem.mem_type == TTM_PL_SYSTEM)
1423                 is_invalid_userptr = true;
1424
1425         if (check_if_add_bo_to_vm(avm, mem)) {
1426                 ret = add_bo_to_vm(adev, mem, avm, false,
1427                                 &bo_va_entry);
1428                 if (ret)
1429                         goto add_bo_to_vm_failed;
1430                 if (mem->aql_queue) {
1431                         ret = add_bo_to_vm(adev, mem, avm,
1432                                         true, &bo_va_entry_aql);
1433                         if (ret)
1434                                 goto add_bo_to_vm_failed_aql;
1435                 }
1436         } else {
1437                 ret = vm_validate_pt_pd_bos(avm);
1438                 if (unlikely(ret))
1439                         goto add_bo_to_vm_failed;
1440         }
1441
1442         if (mem->mapped_to_gpu_memory == 0 &&
1443             !amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
1444                 /* Validate BO only once. The eviction fence gets added to BO
1445                  * the first time it is mapped. Validate will wait for all
1446                  * background evictions to complete.
1447                  */
1448                 ret = amdgpu_amdkfd_bo_validate(bo, domain, true);
1449                 if (ret) {
1450                         pr_debug("Validate failed\n");
1451                         goto map_bo_to_gpuvm_failed;
1452                 }
1453         }
1454
1455         list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
1456                 if (entry->bo_va->base.vm == vm && !entry->is_mapped) {
1457                         pr_debug("\t map VA 0x%llx - 0x%llx in entry %p\n",
1458                                         entry->va, entry->va + bo_size,
1459                                         entry);
1460
1461                         ret = map_bo_to_gpuvm(adev, entry, ctx.sync,
1462                                               is_invalid_userptr);
1463                         if (ret) {
1464                                 pr_err("Failed to map bo to gpuvm\n");
1465                                 goto map_bo_to_gpuvm_failed;
1466                         }
1467
1468                         ret = vm_update_pds(vm, ctx.sync);
1469                         if (ret) {
1470                                 pr_err("Failed to update page directories\n");
1471                                 goto map_bo_to_gpuvm_failed;
1472                         }
1473
1474                         entry->is_mapped = true;
1475                         mem->mapped_to_gpu_memory++;
1476                         pr_debug("\t INC mapping count %d\n",
1477                                         mem->mapped_to_gpu_memory);
1478                 }
1479         }
1480
1481         if (!amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) && !bo->tbo.pin_count)
1482                 amdgpu_bo_fence(bo,
1483                                 &avm->process_info->eviction_fence->base,
1484                                 true);
1485         ret = unreserve_bo_and_vms(&ctx, false, false);
1486
1487         goto out;
1488
1489 map_bo_to_gpuvm_failed:
1490         if (bo_va_entry_aql)
1491                 remove_bo_from_vm(adev, bo_va_entry_aql, bo_size);
1492 add_bo_to_vm_failed_aql:
1493         if (bo_va_entry)
1494                 remove_bo_from_vm(adev, bo_va_entry, bo_size);
1495 add_bo_to_vm_failed:
1496         unreserve_bo_and_vms(&ctx, false, false);
1497 out:
1498         mutex_unlock(&mem->process_info->lock);
1499         mutex_unlock(&mem->lock);
1500         return ret;
1501 }
1502
1503 int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
1504                 struct kgd_dev *kgd, struct kgd_mem *mem, void *vm)
1505 {
1506         struct amdgpu_device *adev = get_amdgpu_device(kgd);
1507         struct amdkfd_process_info *process_info =
1508                 ((struct amdgpu_vm *)vm)->process_info;
1509         unsigned long bo_size = mem->bo->tbo.mem.size;
1510         struct kfd_bo_va_list *entry;
1511         struct bo_vm_reservation_context ctx;
1512         int ret;
1513
1514         mutex_lock(&mem->lock);
1515
1516         ret = reserve_bo_and_cond_vms(mem, vm, BO_VM_MAPPED, &ctx);
1517         if (unlikely(ret))
1518                 goto out;
1519         /* If no VMs were reserved, it means the BO wasn't actually mapped */
1520         if (ctx.n_vms == 0) {
1521                 ret = -EINVAL;
1522                 goto unreserve_out;
1523         }
1524
1525         ret = vm_validate_pt_pd_bos((struct amdgpu_vm *)vm);
1526         if (unlikely(ret))
1527                 goto unreserve_out;
1528
1529         pr_debug("Unmap VA 0x%llx - 0x%llx from vm %p\n",
1530                 mem->va,
1531                 mem->va + bo_size * (1 + mem->aql_queue),
1532                 vm);
1533
1534         list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
1535                 if (entry->bo_va->base.vm == vm && entry->is_mapped) {
1536                         pr_debug("\t unmap VA 0x%llx - 0x%llx from entry %p\n",
1537                                         entry->va,
1538                                         entry->va + bo_size,
1539                                         entry);
1540
1541                         ret = unmap_bo_from_gpuvm(adev, entry, ctx.sync);
1542                         if (ret == 0) {
1543                                 entry->is_mapped = false;
1544                         } else {
1545                                 pr_err("failed to unmap VA 0x%llx\n",
1546                                                 mem->va);
1547                                 goto unreserve_out;
1548                         }
1549
1550                         mem->mapped_to_gpu_memory--;
1551                         pr_debug("\t DEC mapping count %d\n",
1552                                         mem->mapped_to_gpu_memory);
1553                 }
1554         }
1555
1556         /* If BO is unmapped from all VMs, unfence it. It can be evicted if
1557          * required.
1558          */
1559         if (mem->mapped_to_gpu_memory == 0 &&
1560             !amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm) &&
1561             !mem->bo->tbo.pin_count)
1562                 amdgpu_amdkfd_remove_eviction_fence(mem->bo,
1563                                                 process_info->eviction_fence);
1564
1565 unreserve_out:
1566         unreserve_bo_and_vms(&ctx, false, false);
1567 out:
1568         mutex_unlock(&mem->lock);
1569         return ret;
1570 }
1571
1572 int amdgpu_amdkfd_gpuvm_sync_memory(
1573                 struct kgd_dev *kgd, struct kgd_mem *mem, bool intr)
1574 {
1575         struct amdgpu_sync sync;
1576         int ret;
1577
1578         amdgpu_sync_create(&sync);
1579
1580         mutex_lock(&mem->lock);
1581         amdgpu_sync_clone(&mem->sync, &sync);
1582         mutex_unlock(&mem->lock);
1583
1584         ret = amdgpu_sync_wait(&sync, intr);
1585         amdgpu_sync_free(&sync);
1586         return ret;
1587 }
1588
1589 int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_dev *kgd,
1590                 struct kgd_mem *mem, void **kptr, uint64_t *size)
1591 {
1592         int ret;
1593         struct amdgpu_bo *bo = mem->bo;
1594
1595         if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
1596                 pr_err("userptr can't be mapped to kernel\n");
1597                 return -EINVAL;
1598         }
1599
1600         /* delete kgd_mem from kfd_bo_list to avoid re-validating
1601          * this BO in BO's restoring after eviction.
1602          */
1603         mutex_lock(&mem->process_info->lock);
1604
1605         ret = amdgpu_bo_reserve(bo, true);
1606         if (ret) {
1607                 pr_err("Failed to reserve bo. ret %d\n", ret);
1608                 goto bo_reserve_failed;
1609         }
1610
1611         ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
1612         if (ret) {
1613                 pr_err("Failed to pin bo. ret %d\n", ret);
1614                 goto pin_failed;
1615         }
1616
1617         ret = amdgpu_bo_kmap(bo, kptr);
1618         if (ret) {
1619                 pr_err("Failed to map bo to kernel. ret %d\n", ret);
1620                 goto kmap_failed;
1621         }
1622
1623         amdgpu_amdkfd_remove_eviction_fence(
1624                 bo, mem->process_info->eviction_fence);
1625         list_del_init(&mem->validate_list.head);
1626
1627         if (size)
1628                 *size = amdgpu_bo_size(bo);
1629
1630         amdgpu_bo_unreserve(bo);
1631
1632         mutex_unlock(&mem->process_info->lock);
1633         return 0;
1634
1635 kmap_failed:
1636         amdgpu_bo_unpin(bo);
1637 pin_failed:
1638         amdgpu_bo_unreserve(bo);
1639 bo_reserve_failed:
1640         mutex_unlock(&mem->process_info->lock);
1641
1642         return ret;
1643 }
1644
1645 int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct kgd_dev *kgd,
1646                                               struct kfd_vm_fault_info *mem)
1647 {
1648         struct amdgpu_device *adev;
1649
1650         adev = (struct amdgpu_device *)kgd;
1651         if (atomic_read(&adev->gmc.vm_fault_info_updated) == 1) {
1652                 *mem = *adev->gmc.vm_fault_info;
1653                 mb();
1654                 atomic_set(&adev->gmc.vm_fault_info_updated, 0);
1655         }
1656         return 0;
1657 }
1658
1659 int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd,
1660                                       struct dma_buf *dma_buf,
1661                                       uint64_t va, void *vm,
1662                                       struct kgd_mem **mem, uint64_t *size,
1663                                       uint64_t *mmap_offset)
1664 {
1665         struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
1666         struct drm_gem_object *obj;
1667         struct amdgpu_bo *bo;
1668         struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1669
1670         if (dma_buf->ops != &amdgpu_dmabuf_ops)
1671                 /* Can't handle non-graphics buffers */
1672                 return -EINVAL;
1673
1674         obj = dma_buf->priv;
1675         if (drm_to_adev(obj->dev) != adev)
1676                 /* Can't handle buffers from other devices */
1677                 return -EINVAL;
1678
1679         bo = gem_to_amdgpu_bo(obj);
1680         if (!(bo->preferred_domains & (AMDGPU_GEM_DOMAIN_VRAM |
1681                                     AMDGPU_GEM_DOMAIN_GTT)))
1682                 /* Only VRAM and GTT BOs are supported */
1683                 return -EINVAL;
1684
1685         *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
1686         if (!*mem)
1687                 return -ENOMEM;
1688
1689         if (size)
1690                 *size = amdgpu_bo_size(bo);
1691
1692         if (mmap_offset)
1693                 *mmap_offset = amdgpu_bo_mmap_offset(bo);
1694
1695         INIT_LIST_HEAD(&(*mem)->bo_va_list);
1696         mutex_init(&(*mem)->lock);
1697
1698         (*mem)->alloc_flags =
1699                 ((bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ?
1700                 KFD_IOC_ALLOC_MEM_FLAGS_VRAM : KFD_IOC_ALLOC_MEM_FLAGS_GTT)
1701                 | KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE
1702                 | KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE;
1703
1704         drm_gem_object_get(&bo->tbo.base);
1705         (*mem)->bo = bo;
1706         (*mem)->va = va;
1707         (*mem)->domain = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ?
1708                 AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT;
1709         (*mem)->mapped_to_gpu_memory = 0;
1710         (*mem)->process_info = avm->process_info;
1711         add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, false);
1712         amdgpu_sync_create(&(*mem)->sync);
1713         (*mem)->is_imported = true;
1714
1715         return 0;
1716 }
1717
1718 /* Evict a userptr BO by stopping the queues if necessary
1719  *
1720  * Runs in MMU notifier, may be in RECLAIM_FS context. This means it
1721  * cannot do any memory allocations, and cannot take any locks that
1722  * are held elsewhere while allocating memory. Therefore this is as
1723  * simple as possible, using atomic counters.
1724  *
1725  * It doesn't do anything to the BO itself. The real work happens in
1726  * restore, where we get updated page addresses. This function only
1727  * ensures that GPU access to the BO is stopped.
1728  */
1729 int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem,
1730                                 struct mm_struct *mm)
1731 {
1732         struct amdkfd_process_info *process_info = mem->process_info;
1733         int evicted_bos;
1734         int r = 0;
1735
1736         atomic_inc(&mem->invalid);
1737         evicted_bos = atomic_inc_return(&process_info->evicted_bos);
1738         if (evicted_bos == 1) {
1739                 /* First eviction, stop the queues */
1740                 r = kgd2kfd_quiesce_mm(mm);
1741                 if (r)
1742                         pr_err("Failed to quiesce KFD\n");
1743                 schedule_delayed_work(&process_info->restore_userptr_work,
1744                         msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS));
1745         }
1746
1747         return r;
1748 }
1749
1750 /* Update invalid userptr BOs
1751  *
1752  * Moves invalidated (evicted) userptr BOs from userptr_valid_list to
1753  * userptr_inval_list and updates user pages for all BOs that have
1754  * been invalidated since their last update.
1755  */
1756 static int update_invalid_user_pages(struct amdkfd_process_info *process_info,
1757                                      struct mm_struct *mm)
1758 {
1759         struct kgd_mem *mem, *tmp_mem;
1760         struct amdgpu_bo *bo;
1761         struct ttm_operation_ctx ctx = { false, false };
1762         int invalid, ret;
1763
1764         /* Move all invalidated BOs to the userptr_inval_list and
1765          * release their user pages by migration to the CPU domain
1766          */
1767         list_for_each_entry_safe(mem, tmp_mem,
1768                                  &process_info->userptr_valid_list,
1769                                  validate_list.head) {
1770                 if (!atomic_read(&mem->invalid))
1771                         continue; /* BO is still valid */
1772
1773                 bo = mem->bo;
1774
1775                 if (amdgpu_bo_reserve(bo, true))
1776                         return -EAGAIN;
1777                 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
1778                 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
1779                 amdgpu_bo_unreserve(bo);
1780                 if (ret) {
1781                         pr_err("%s: Failed to invalidate userptr BO\n",
1782                                __func__);
1783                         return -EAGAIN;
1784                 }
1785
1786                 list_move_tail(&mem->validate_list.head,
1787                                &process_info->userptr_inval_list);
1788         }
1789
1790         if (list_empty(&process_info->userptr_inval_list))
1791                 return 0; /* All evicted userptr BOs were freed */
1792
1793         /* Go through userptr_inval_list and update any invalid user_pages */
1794         list_for_each_entry(mem, &process_info->userptr_inval_list,
1795                             validate_list.head) {
1796                 invalid = atomic_read(&mem->invalid);
1797                 if (!invalid)
1798                         /* BO hasn't been invalidated since the last
1799                          * revalidation attempt. Keep its BO list.
1800                          */
1801                         continue;
1802
1803                 bo = mem->bo;
1804
1805                 /* Get updated user pages */
1806                 ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages);
1807                 if (ret) {
1808                         pr_debug("%s: Failed to get user pages: %d\n",
1809                                 __func__, ret);
1810
1811                         /* Return error -EBUSY or -ENOMEM, retry restore */
1812                         return ret;
1813                 }
1814
1815                 /*
1816                  * FIXME: Cannot ignore the return code, must hold
1817                  * notifier_lock
1818                  */
1819                 amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
1820
1821                 /* Mark the BO as valid unless it was invalidated
1822                  * again concurrently.
1823                  */
1824                 if (atomic_cmpxchg(&mem->invalid, invalid, 0) != invalid)
1825                         return -EAGAIN;
1826         }
1827
1828         return 0;
1829 }
1830
1831 /* Validate invalid userptr BOs
1832  *
1833  * Validates BOs on the userptr_inval_list, and moves them back to the
1834  * userptr_valid_list. Also updates GPUVM page tables with new page
1835  * addresses and waits for the page table updates to complete.
1836  */
1837 static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
1838 {
1839         struct amdgpu_bo_list_entry *pd_bo_list_entries;
1840         struct list_head resv_list, duplicates;
1841         struct ww_acquire_ctx ticket;
1842         struct amdgpu_sync sync;
1843
1844         struct amdgpu_vm *peer_vm;
1845         struct kgd_mem *mem, *tmp_mem;
1846         struct amdgpu_bo *bo;
1847         struct ttm_operation_ctx ctx = { false, false };
1848         int i, ret;
1849
1850         pd_bo_list_entries = kcalloc(process_info->n_vms,
1851                                      sizeof(struct amdgpu_bo_list_entry),
1852                                      GFP_KERNEL);
1853         if (!pd_bo_list_entries) {
1854                 pr_err("%s: Failed to allocate PD BO list entries\n", __func__);
1855                 ret = -ENOMEM;
1856                 goto out_no_mem;
1857         }
1858
1859         INIT_LIST_HEAD(&resv_list);
1860         INIT_LIST_HEAD(&duplicates);
1861
1862         /* Get all the page directory BOs that need to be reserved */
1863         i = 0;
1864         list_for_each_entry(peer_vm, &process_info->vm_list_head,
1865                             vm_list_node)
1866                 amdgpu_vm_get_pd_bo(peer_vm, &resv_list,
1867                                     &pd_bo_list_entries[i++]);
1868         /* Add the userptr_inval_list entries to resv_list */
1869         list_for_each_entry(mem, &process_info->userptr_inval_list,
1870                             validate_list.head) {
1871                 list_add_tail(&mem->resv_list.head, &resv_list);
1872                 mem->resv_list.bo = mem->validate_list.bo;
1873                 mem->resv_list.num_shared = mem->validate_list.num_shared;
1874         }
1875
1876         /* Reserve all BOs and page tables for validation */
1877         ret = ttm_eu_reserve_buffers(&ticket, &resv_list, false, &duplicates);
1878         WARN(!list_empty(&duplicates), "Duplicates should be empty");
1879         if (ret)
1880                 goto out_free;
1881
1882         amdgpu_sync_create(&sync);
1883
1884         ret = process_validate_vms(process_info);
1885         if (ret)
1886                 goto unreserve_out;
1887
1888         /* Validate BOs and update GPUVM page tables */
1889         list_for_each_entry_safe(mem, tmp_mem,
1890                                  &process_info->userptr_inval_list,
1891                                  validate_list.head) {
1892                 struct kfd_bo_va_list *bo_va_entry;
1893
1894                 bo = mem->bo;
1895
1896                 /* Validate the BO if we got user pages */
1897                 if (bo->tbo.ttm->pages[0]) {
1898                         amdgpu_bo_placement_from_domain(bo, mem->domain);
1899                         ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
1900                         if (ret) {
1901                                 pr_err("%s: failed to validate BO\n", __func__);
1902                                 goto unreserve_out;
1903                         }
1904                 }
1905
1906                 list_move_tail(&mem->validate_list.head,
1907                                &process_info->userptr_valid_list);
1908
1909                 /* Update mapping. If the BO was not validated
1910                  * (because we couldn't get user pages), this will
1911                  * clear the page table entries, which will result in
1912                  * VM faults if the GPU tries to access the invalid
1913                  * memory.
1914                  */
1915                 list_for_each_entry(bo_va_entry, &mem->bo_va_list, bo_list) {
1916                         if (!bo_va_entry->is_mapped)
1917                                 continue;
1918
1919                         ret = update_gpuvm_pte((struct amdgpu_device *)
1920                                                bo_va_entry->kgd_dev,
1921                                                bo_va_entry, &sync);
1922                         if (ret) {
1923                                 pr_err("%s: update PTE failed\n", __func__);
1924                                 /* make sure this gets validated again */
1925                                 atomic_inc(&mem->invalid);
1926                                 goto unreserve_out;
1927                         }
1928                 }
1929         }
1930
1931         /* Update page directories */
1932         ret = process_update_pds(process_info, &sync);
1933
1934 unreserve_out:
1935         ttm_eu_backoff_reservation(&ticket, &resv_list);
1936         amdgpu_sync_wait(&sync, false);
1937         amdgpu_sync_free(&sync);
1938 out_free:
1939         kfree(pd_bo_list_entries);
1940 out_no_mem:
1941
1942         return ret;
1943 }
1944
1945 /* Worker callback to restore evicted userptr BOs
1946  *
1947  * Tries to update and validate all userptr BOs. If successful and no
1948  * concurrent evictions happened, the queues are restarted. Otherwise,
1949  * reschedule for another attempt later.
1950  */
1951 static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work)
1952 {
1953         struct delayed_work *dwork = to_delayed_work(work);
1954         struct amdkfd_process_info *process_info =
1955                 container_of(dwork, struct amdkfd_process_info,
1956                              restore_userptr_work);
1957         struct task_struct *usertask;
1958         struct mm_struct *mm;
1959         int evicted_bos;
1960
1961         evicted_bos = atomic_read(&process_info->evicted_bos);
1962         if (!evicted_bos)
1963                 return;
1964
1965         /* Reference task and mm in case of concurrent process termination */
1966         usertask = get_pid_task(process_info->pid, PIDTYPE_PID);
1967         if (!usertask)
1968                 return;
1969         mm = get_task_mm(usertask);
1970         if (!mm) {
1971                 put_task_struct(usertask);
1972                 return;
1973         }
1974
1975         mutex_lock(&process_info->lock);
1976
1977         if (update_invalid_user_pages(process_info, mm))
1978                 goto unlock_out;
1979         /* userptr_inval_list can be empty if all evicted userptr BOs
1980          * have been freed. In that case there is nothing to validate
1981          * and we can just restart the queues.
1982          */
1983         if (!list_empty(&process_info->userptr_inval_list)) {
1984                 if (atomic_read(&process_info->evicted_bos) != evicted_bos)
1985                         goto unlock_out; /* Concurrent eviction, try again */
1986
1987                 if (validate_invalid_user_pages(process_info))
1988                         goto unlock_out;
1989         }
1990         /* Final check for concurrent evicton and atomic update. If
1991          * another eviction happens after successful update, it will
1992          * be a first eviction that calls quiesce_mm. The eviction
1993          * reference counting inside KFD will handle this case.
1994          */
1995         if (atomic_cmpxchg(&process_info->evicted_bos, evicted_bos, 0) !=
1996             evicted_bos)
1997                 goto unlock_out;
1998         evicted_bos = 0;
1999         if (kgd2kfd_resume_mm(mm)) {
2000                 pr_err("%s: Failed to resume KFD\n", __func__);
2001                 /* No recovery from this failure. Probably the CP is
2002                  * hanging. No point trying again.
2003                  */
2004         }
2005
2006 unlock_out:
2007         mutex_unlock(&process_info->lock);
2008         mmput(mm);
2009         put_task_struct(usertask);
2010
2011         /* If validation failed, reschedule another attempt */
2012         if (evicted_bos)
2013                 schedule_delayed_work(&process_info->restore_userptr_work,
2014                         msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS));
2015 }
2016
2017 /** amdgpu_amdkfd_gpuvm_restore_process_bos - Restore all BOs for the given
2018  *   KFD process identified by process_info
2019  *
2020  * @process_info: amdkfd_process_info of the KFD process
2021  *
2022  * After memory eviction, restore thread calls this function. The function
2023  * should be called when the Process is still valid. BO restore involves -
2024  *
2025  * 1.  Release old eviction fence and create new one
2026  * 2.  Get two copies of PD BO list from all the VMs. Keep one copy as pd_list.
2027  * 3   Use the second PD list and kfd_bo_list to create a list (ctx.list) of
2028  *     BOs that need to be reserved.
2029  * 4.  Reserve all the BOs
2030  * 5.  Validate of PD and PT BOs.
2031  * 6.  Validate all KFD BOs using kfd_bo_list and Map them and add new fence
2032  * 7.  Add fence to all PD and PT BOs.
2033  * 8.  Unreserve all BOs
2034  */
2035 int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
2036 {
2037         struct amdgpu_bo_list_entry *pd_bo_list;
2038         struct amdkfd_process_info *process_info = info;
2039         struct amdgpu_vm *peer_vm;
2040         struct kgd_mem *mem;
2041         struct bo_vm_reservation_context ctx;
2042         struct amdgpu_amdkfd_fence *new_fence;
2043         int ret = 0, i;
2044         struct list_head duplicate_save;
2045         struct amdgpu_sync sync_obj;
2046         unsigned long failed_size = 0;
2047         unsigned long total_size = 0;
2048
2049         INIT_LIST_HEAD(&duplicate_save);
2050         INIT_LIST_HEAD(&ctx.list);
2051         INIT_LIST_HEAD(&ctx.duplicates);
2052
2053         pd_bo_list = kcalloc(process_info->n_vms,
2054                              sizeof(struct amdgpu_bo_list_entry),
2055                              GFP_KERNEL);
2056         if (!pd_bo_list)
2057                 return -ENOMEM;
2058
2059         i = 0;
2060         mutex_lock(&process_info->lock);
2061         list_for_each_entry(peer_vm, &process_info->vm_list_head,
2062                         vm_list_node)
2063                 amdgpu_vm_get_pd_bo(peer_vm, &ctx.list, &pd_bo_list[i++]);
2064
2065         /* Reserve all BOs and page tables/directory. Add all BOs from
2066          * kfd_bo_list to ctx.list
2067          */
2068         list_for_each_entry(mem, &process_info->kfd_bo_list,
2069                             validate_list.head) {
2070
2071                 list_add_tail(&mem->resv_list.head, &ctx.list);
2072                 mem->resv_list.bo = mem->validate_list.bo;
2073                 mem->resv_list.num_shared = mem->validate_list.num_shared;
2074         }
2075
2076         ret = ttm_eu_reserve_buffers(&ctx.ticket, &ctx.list,
2077                                      false, &duplicate_save);
2078         if (ret) {
2079                 pr_debug("Memory eviction: TTM Reserve Failed. Try again\n");
2080                 goto ttm_reserve_fail;
2081         }
2082
2083         amdgpu_sync_create(&sync_obj);
2084
2085         /* Validate PDs and PTs */
2086         ret = process_validate_vms(process_info);
2087         if (ret)
2088                 goto validate_map_fail;
2089
2090         ret = process_sync_pds_resv(process_info, &sync_obj);
2091         if (ret) {
2092                 pr_debug("Memory eviction: Failed to sync to PD BO moving fence. Try again\n");
2093                 goto validate_map_fail;
2094         }
2095
2096         /* Validate BOs and map them to GPUVM (update VM page tables). */
2097         list_for_each_entry(mem, &process_info->kfd_bo_list,
2098                             validate_list.head) {
2099
2100                 struct amdgpu_bo *bo = mem->bo;
2101                 uint32_t domain = mem->domain;
2102                 struct kfd_bo_va_list *bo_va_entry;
2103
2104                 total_size += amdgpu_bo_size(bo);
2105
2106                 ret = amdgpu_amdkfd_bo_validate(bo, domain, false);
2107                 if (ret) {
2108                         pr_debug("Memory eviction: Validate BOs failed\n");
2109                         failed_size += amdgpu_bo_size(bo);
2110                         ret = amdgpu_amdkfd_bo_validate(bo,
2111                                                 AMDGPU_GEM_DOMAIN_GTT, false);
2112                         if (ret) {
2113                                 pr_debug("Memory eviction: Try again\n");
2114                                 goto validate_map_fail;
2115                         }
2116                 }
2117                 ret = amdgpu_sync_fence(&sync_obj, bo->tbo.moving);
2118                 if (ret) {
2119                         pr_debug("Memory eviction: Sync BO fence failed. Try again\n");
2120                         goto validate_map_fail;
2121                 }
2122                 list_for_each_entry(bo_va_entry, &mem->bo_va_list,
2123                                     bo_list) {
2124                         ret = update_gpuvm_pte((struct amdgpu_device *)
2125                                               bo_va_entry->kgd_dev,
2126                                               bo_va_entry,
2127                                               &sync_obj);
2128                         if (ret) {
2129                                 pr_debug("Memory eviction: update PTE failed. Try again\n");
2130                                 goto validate_map_fail;
2131                         }
2132                 }
2133         }
2134
2135         if (failed_size)
2136                 pr_debug("0x%lx/0x%lx in system\n", failed_size, total_size);
2137
2138         /* Update page directories */
2139         ret = process_update_pds(process_info, &sync_obj);
2140         if (ret) {
2141                 pr_debug("Memory eviction: update PDs failed. Try again\n");
2142                 goto validate_map_fail;
2143         }
2144
2145         /* Wait for validate and PT updates to finish */
2146         amdgpu_sync_wait(&sync_obj, false);
2147
2148         /* Release old eviction fence and create new one, because fence only
2149          * goes from unsignaled to signaled, fence cannot be reused.
2150          * Use context and mm from the old fence.
2151          */
2152         new_fence = amdgpu_amdkfd_fence_create(
2153                                 process_info->eviction_fence->base.context,
2154                                 process_info->eviction_fence->mm);
2155         if (!new_fence) {
2156                 pr_err("Failed to create eviction fence\n");
2157                 ret = -ENOMEM;
2158                 goto validate_map_fail;
2159         }
2160         dma_fence_put(&process_info->eviction_fence->base);
2161         process_info->eviction_fence = new_fence;
2162         *ef = dma_fence_get(&new_fence->base);
2163
2164         /* Attach new eviction fence to all BOs */
2165         list_for_each_entry(mem, &process_info->kfd_bo_list,
2166                 validate_list.head)
2167                 amdgpu_bo_fence(mem->bo,
2168                         &process_info->eviction_fence->base, true);
2169
2170         /* Attach eviction fence to PD / PT BOs */
2171         list_for_each_entry(peer_vm, &process_info->vm_list_head,
2172                             vm_list_node) {
2173                 struct amdgpu_bo *bo = peer_vm->root.base.bo;
2174
2175                 amdgpu_bo_fence(bo, &process_info->eviction_fence->base, true);
2176         }
2177
2178 validate_map_fail:
2179         ttm_eu_backoff_reservation(&ctx.ticket, &ctx.list);
2180         amdgpu_sync_free(&sync_obj);
2181 ttm_reserve_fail:
2182         mutex_unlock(&process_info->lock);
2183         kfree(pd_bo_list);
2184         return ret;
2185 }
2186
2187 int amdgpu_amdkfd_add_gws_to_process(void *info, void *gws, struct kgd_mem **mem)
2188 {
2189         struct amdkfd_process_info *process_info = (struct amdkfd_process_info *)info;
2190         struct amdgpu_bo *gws_bo = (struct amdgpu_bo *)gws;
2191         int ret;
2192
2193         if (!info || !gws)
2194                 return -EINVAL;
2195
2196         *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
2197         if (!*mem)
2198                 return -ENOMEM;
2199
2200         mutex_init(&(*mem)->lock);
2201         INIT_LIST_HEAD(&(*mem)->bo_va_list);
2202         (*mem)->bo = amdgpu_bo_ref(gws_bo);
2203         (*mem)->domain = AMDGPU_GEM_DOMAIN_GWS;
2204         (*mem)->process_info = process_info;
2205         add_kgd_mem_to_kfd_bo_list(*mem, process_info, false);
2206         amdgpu_sync_create(&(*mem)->sync);
2207
2208
2209         /* Validate gws bo the first time it is added to process */
2210         mutex_lock(&(*mem)->process_info->lock);
2211         ret = amdgpu_bo_reserve(gws_bo, false);
2212         if (unlikely(ret)) {
2213                 pr_err("Reserve gws bo failed %d\n", ret);
2214                 goto bo_reservation_failure;
2215         }
2216
2217         ret = amdgpu_amdkfd_bo_validate(gws_bo, AMDGPU_GEM_DOMAIN_GWS, true);
2218         if (ret) {
2219                 pr_err("GWS BO validate failed %d\n", ret);
2220                 goto bo_validation_failure;
2221         }
2222         /* GWS resource is shared b/t amdgpu and amdkfd
2223          * Add process eviction fence to bo so they can
2224          * evict each other.
2225          */
2226         ret = dma_resv_reserve_shared(gws_bo->tbo.base.resv, 1);
2227         if (ret)
2228                 goto reserve_shared_fail;
2229         amdgpu_bo_fence(gws_bo, &process_info->eviction_fence->base, true);
2230         amdgpu_bo_unreserve(gws_bo);
2231         mutex_unlock(&(*mem)->process_info->lock);
2232
2233         return ret;
2234
2235 reserve_shared_fail:
2236 bo_validation_failure:
2237         amdgpu_bo_unreserve(gws_bo);
2238 bo_reservation_failure:
2239         mutex_unlock(&(*mem)->process_info->lock);
2240         amdgpu_sync_free(&(*mem)->sync);
2241         remove_kgd_mem_from_kfd_bo_list(*mem, process_info);
2242         amdgpu_bo_unref(&gws_bo);
2243         mutex_destroy(&(*mem)->lock);
2244         kfree(*mem);
2245         *mem = NULL;
2246         return ret;
2247 }
2248
2249 int amdgpu_amdkfd_remove_gws_from_process(void *info, void *mem)
2250 {
2251         int ret;
2252         struct amdkfd_process_info *process_info = (struct amdkfd_process_info *)info;
2253         struct kgd_mem *kgd_mem = (struct kgd_mem *)mem;
2254         struct amdgpu_bo *gws_bo = kgd_mem->bo;
2255
2256         /* Remove BO from process's validate list so restore worker won't touch
2257          * it anymore
2258          */
2259         remove_kgd_mem_from_kfd_bo_list(kgd_mem, process_info);
2260
2261         ret = amdgpu_bo_reserve(gws_bo, false);
2262         if (unlikely(ret)) {
2263                 pr_err("Reserve gws bo failed %d\n", ret);
2264                 //TODO add BO back to validate_list?
2265                 return ret;
2266         }
2267         amdgpu_amdkfd_remove_eviction_fence(gws_bo,
2268                         process_info->eviction_fence);
2269         amdgpu_bo_unreserve(gws_bo);
2270         amdgpu_sync_free(&kgd_mem->sync);
2271         amdgpu_bo_unref(&gws_bo);
2272         mutex_destroy(&kgd_mem->lock);
2273         kfree(mem);
2274         return 0;
2275 }
2276
2277 /* Returns GPU-specific tiling mode information */
2278 int amdgpu_amdkfd_get_tile_config(struct kgd_dev *kgd,
2279                                 struct tile_config *config)
2280 {
2281         struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
2282
2283         config->gb_addr_config = adev->gfx.config.gb_addr_config;
2284         config->tile_config_ptr = adev->gfx.config.tile_mode_array;
2285         config->num_tile_configs =
2286                         ARRAY_SIZE(adev->gfx.config.tile_mode_array);
2287         config->macro_tile_config_ptr =
2288                         adev->gfx.config.macrotile_mode_array;
2289         config->num_macro_tile_configs =
2290                         ARRAY_SIZE(adev->gfx.config.macrotile_mode_array);
2291
2292         /* Those values are not set from GFX9 onwards */
2293         config->num_banks = adev->gfx.config.num_banks;
2294         config->num_ranks = adev->gfx.config.num_ranks;
2295
2296         return 0;
2297 }