drm/amdgpu: Remove in_interrupt() usage in gfx_v9_0_kiq_read_clock()
[linux-2.6-microblaze.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_amdkfd_gpuvm.c
1 /*
2  * Copyright 2014-2018 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22 #include <linux/dma-buf.h>
23 #include <linux/list.h>
24 #include <linux/pagemap.h>
25 #include <linux/sched/mm.h>
26 #include <linux/sched/task.h>
27
28 #include "amdgpu_object.h"
29 #include "amdgpu_gem.h"
30 #include "amdgpu_vm.h"
31 #include "amdgpu_amdkfd.h"
32 #include "amdgpu_dma_buf.h"
33 #include <uapi/linux/kfd_ioctl.h>
34 #include "amdgpu_xgmi.h"
35
36 /* BO flag to indicate a KFD userptr BO */
37 #define AMDGPU_AMDKFD_USERPTR_BO (1ULL << 63)
38
39 /* Userptr restore delay, just long enough to allow consecutive VM
40  * changes to accumulate
41  */
42 #define AMDGPU_USERPTR_RESTORE_DELAY_MS 1
43
44 /* Impose limit on how much memory KFD can use */
45 static struct {
46         uint64_t max_system_mem_limit;
47         uint64_t max_ttm_mem_limit;
48         int64_t system_mem_used;
49         int64_t ttm_mem_used;
50         spinlock_t mem_limit_lock;
51 } kfd_mem_limit;
52
53 /* Struct used for amdgpu_amdkfd_bo_validate */
54 struct amdgpu_vm_parser {
55         uint32_t        domain;
56         bool            wait;
57 };
58
59 static const char * const domain_bit_to_string[] = {
60                 "CPU",
61                 "GTT",
62                 "VRAM",
63                 "GDS",
64                 "GWS",
65                 "OA"
66 };
67
68 #define domain_string(domain) domain_bit_to_string[ffs(domain)-1]
69
70 static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work);
71
72
73 static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
74 {
75         return (struct amdgpu_device *)kgd;
76 }
77
78 static bool check_if_add_bo_to_vm(struct amdgpu_vm *avm,
79                 struct kgd_mem *mem)
80 {
81         struct kfd_bo_va_list *entry;
82
83         list_for_each_entry(entry, &mem->bo_va_list, bo_list)
84                 if (entry->bo_va->base.vm == avm)
85                         return false;
86
87         return true;
88 }
89
90 /* Set memory usage limits. Current, limits are
91  *  System (TTM + userptr) memory - 15/16th System RAM
92  *  TTM memory - 3/8th System RAM
93  */
94 void amdgpu_amdkfd_gpuvm_init_mem_limits(void)
95 {
96         struct sysinfo si;
97         uint64_t mem;
98
99         si_meminfo(&si);
100         mem = si.freeram - si.freehigh;
101         mem *= si.mem_unit;
102
103         spin_lock_init(&kfd_mem_limit.mem_limit_lock);
104         kfd_mem_limit.max_system_mem_limit = mem - (mem >> 4);
105         kfd_mem_limit.max_ttm_mem_limit = (mem >> 1) - (mem >> 3);
106         pr_debug("Kernel memory limit %lluM, TTM limit %lluM\n",
107                 (kfd_mem_limit.max_system_mem_limit >> 20),
108                 (kfd_mem_limit.max_ttm_mem_limit >> 20));
109 }
110
111 /* Estimate page table size needed to represent a given memory size
112  *
113  * With 4KB pages, we need one 8 byte PTE for each 4KB of memory
114  * (factor 512, >> 9). With 2MB pages, we need one 8 byte PTE for 2MB
115  * of memory (factor 256K, >> 18). ROCm user mode tries to optimize
116  * for 2MB pages for TLB efficiency. However, small allocations and
117  * fragmented system memory still need some 4KB pages. We choose a
118  * compromise that should work in most cases without reserving too
119  * much memory for page tables unnecessarily (factor 16K, >> 14).
120  */
121 #define ESTIMATE_PT_SIZE(mem_size) ((mem_size) >> 14)
122
123 static int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
124                 uint64_t size, u32 domain, bool sg)
125 {
126         uint64_t reserved_for_pt =
127                 ESTIMATE_PT_SIZE(amdgpu_amdkfd_total_mem_size);
128         size_t acc_size, system_mem_needed, ttm_mem_needed, vram_needed;
129         int ret = 0;
130
131         acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size,
132                                        sizeof(struct amdgpu_bo));
133
134         vram_needed = 0;
135         if (domain == AMDGPU_GEM_DOMAIN_GTT) {
136                 /* TTM GTT memory */
137                 system_mem_needed = acc_size + size;
138                 ttm_mem_needed = acc_size + size;
139         } else if (domain == AMDGPU_GEM_DOMAIN_CPU && !sg) {
140                 /* Userptr */
141                 system_mem_needed = acc_size + size;
142                 ttm_mem_needed = acc_size;
143         } else {
144                 /* VRAM and SG */
145                 system_mem_needed = acc_size;
146                 ttm_mem_needed = acc_size;
147                 if (domain == AMDGPU_GEM_DOMAIN_VRAM)
148                         vram_needed = size;
149         }
150
151         spin_lock(&kfd_mem_limit.mem_limit_lock);
152
153         if (kfd_mem_limit.system_mem_used + system_mem_needed >
154             kfd_mem_limit.max_system_mem_limit)
155                 pr_debug("Set no_system_mem_limit=1 if using shared memory\n");
156
157         if ((kfd_mem_limit.system_mem_used + system_mem_needed >
158              kfd_mem_limit.max_system_mem_limit && !no_system_mem_limit) ||
159             (kfd_mem_limit.ttm_mem_used + ttm_mem_needed >
160              kfd_mem_limit.max_ttm_mem_limit) ||
161             (adev->kfd.vram_used + vram_needed >
162              adev->gmc.real_vram_size - reserved_for_pt)) {
163                 ret = -ENOMEM;
164         } else {
165                 kfd_mem_limit.system_mem_used += system_mem_needed;
166                 kfd_mem_limit.ttm_mem_used += ttm_mem_needed;
167                 adev->kfd.vram_used += vram_needed;
168         }
169
170         spin_unlock(&kfd_mem_limit.mem_limit_lock);
171         return ret;
172 }
173
174 static void unreserve_mem_limit(struct amdgpu_device *adev,
175                 uint64_t size, u32 domain, bool sg)
176 {
177         size_t acc_size;
178
179         acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size,
180                                        sizeof(struct amdgpu_bo));
181
182         spin_lock(&kfd_mem_limit.mem_limit_lock);
183         if (domain == AMDGPU_GEM_DOMAIN_GTT) {
184                 kfd_mem_limit.system_mem_used -= (acc_size + size);
185                 kfd_mem_limit.ttm_mem_used -= (acc_size + size);
186         } else if (domain == AMDGPU_GEM_DOMAIN_CPU && !sg) {
187                 kfd_mem_limit.system_mem_used -= (acc_size + size);
188                 kfd_mem_limit.ttm_mem_used -= acc_size;
189         } else {
190                 kfd_mem_limit.system_mem_used -= acc_size;
191                 kfd_mem_limit.ttm_mem_used -= acc_size;
192                 if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
193                         adev->kfd.vram_used -= size;
194                         WARN_ONCE(adev->kfd.vram_used < 0,
195                                   "kfd VRAM memory accounting unbalanced");
196                 }
197         }
198         WARN_ONCE(kfd_mem_limit.system_mem_used < 0,
199                   "kfd system memory accounting unbalanced");
200         WARN_ONCE(kfd_mem_limit.ttm_mem_used < 0,
201                   "kfd TTM memory accounting unbalanced");
202
203         spin_unlock(&kfd_mem_limit.mem_limit_lock);
204 }
205
206 void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo)
207 {
208         struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
209         u32 domain = bo->preferred_domains;
210         bool sg = (bo->preferred_domains == AMDGPU_GEM_DOMAIN_CPU);
211
212         if (bo->flags & AMDGPU_AMDKFD_USERPTR_BO) {
213                 domain = AMDGPU_GEM_DOMAIN_CPU;
214                 sg = false;
215         }
216
217         unreserve_mem_limit(adev, amdgpu_bo_size(bo), domain, sg);
218 }
219
220
221 /* amdgpu_amdkfd_remove_eviction_fence - Removes eviction fence from BO's
222  *  reservation object.
223  *
224  * @bo: [IN] Remove eviction fence(s) from this BO
225  * @ef: [IN] This eviction fence is removed if it
226  *  is present in the shared list.
227  *
228  * NOTE: Must be called with BO reserved i.e. bo->tbo.resv->lock held.
229  */
230 static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
231                                         struct amdgpu_amdkfd_fence *ef)
232 {
233         struct dma_resv *resv = bo->tbo.base.resv;
234         struct dma_resv_list *old, *new;
235         unsigned int i, j, k;
236
237         if (!ef)
238                 return -EINVAL;
239
240         old = dma_resv_get_list(resv);
241         if (!old)
242                 return 0;
243
244         new = kmalloc(struct_size(new, shared, old->shared_max), GFP_KERNEL);
245         if (!new)
246                 return -ENOMEM;
247
248         /* Go through all the shared fences in the resevation object and sort
249          * the interesting ones to the end of the list.
250          */
251         for (i = 0, j = old->shared_count, k = 0; i < old->shared_count; ++i) {
252                 struct dma_fence *f;
253
254                 f = rcu_dereference_protected(old->shared[i],
255                                               dma_resv_held(resv));
256
257                 if (f->context == ef->base.context)
258                         RCU_INIT_POINTER(new->shared[--j], f);
259                 else
260                         RCU_INIT_POINTER(new->shared[k++], f);
261         }
262         new->shared_max = old->shared_max;
263         new->shared_count = k;
264
265         /* Install the new fence list, seqcount provides the barriers */
266         write_seqcount_begin(&resv->seq);
267         RCU_INIT_POINTER(resv->fence, new);
268         write_seqcount_end(&resv->seq);
269
270         /* Drop the references to the removed fences or move them to ef_list */
271         for (i = j, k = 0; i < old->shared_count; ++i) {
272                 struct dma_fence *f;
273
274                 f = rcu_dereference_protected(new->shared[i],
275                                               dma_resv_held(resv));
276                 dma_fence_put(f);
277         }
278         kfree_rcu(old, rcu);
279
280         return 0;
281 }
282
283 int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo)
284 {
285         struct amdgpu_bo *root = bo;
286         struct amdgpu_vm_bo_base *vm_bo;
287         struct amdgpu_vm *vm;
288         struct amdkfd_process_info *info;
289         struct amdgpu_amdkfd_fence *ef;
290         int ret;
291
292         /* we can always get vm_bo from root PD bo.*/
293         while (root->parent)
294                 root = root->parent;
295
296         vm_bo = root->vm_bo;
297         if (!vm_bo)
298                 return 0;
299
300         vm = vm_bo->vm;
301         if (!vm)
302                 return 0;
303
304         info = vm->process_info;
305         if (!info || !info->eviction_fence)
306                 return 0;
307
308         ef = container_of(dma_fence_get(&info->eviction_fence->base),
309                         struct amdgpu_amdkfd_fence, base);
310
311         BUG_ON(!dma_resv_trylock(bo->tbo.base.resv));
312         ret = amdgpu_amdkfd_remove_eviction_fence(bo, ef);
313         dma_resv_unlock(bo->tbo.base.resv);
314
315         dma_fence_put(&ef->base);
316         return ret;
317 }
318
319 static int amdgpu_amdkfd_bo_validate(struct amdgpu_bo *bo, uint32_t domain,
320                                      bool wait)
321 {
322         struct ttm_operation_ctx ctx = { false, false };
323         int ret;
324
325         if (WARN(amdgpu_ttm_tt_get_usermm(bo->tbo.ttm),
326                  "Called with userptr BO"))
327                 return -EINVAL;
328
329         amdgpu_bo_placement_from_domain(bo, domain);
330
331         ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
332         if (ret)
333                 goto validate_fail;
334         if (wait)
335                 amdgpu_bo_sync_wait(bo, AMDGPU_FENCE_OWNER_KFD, false);
336
337 validate_fail:
338         return ret;
339 }
340
341 static int amdgpu_amdkfd_validate(void *param, struct amdgpu_bo *bo)
342 {
343         struct amdgpu_vm_parser *p = param;
344
345         return amdgpu_amdkfd_bo_validate(bo, p->domain, p->wait);
346 }
347
348 /* vm_validate_pt_pd_bos - Validate page table and directory BOs
349  *
350  * Page directories are not updated here because huge page handling
351  * during page table updates can invalidate page directory entries
352  * again. Page directories are only updated after updating page
353  * tables.
354  */
355 static int vm_validate_pt_pd_bos(struct amdgpu_vm *vm)
356 {
357         struct amdgpu_bo *pd = vm->root.base.bo;
358         struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
359         struct amdgpu_vm_parser param;
360         int ret;
361
362         param.domain = AMDGPU_GEM_DOMAIN_VRAM;
363         param.wait = false;
364
365         ret = amdgpu_vm_validate_pt_bos(adev, vm, amdgpu_amdkfd_validate,
366                                         &param);
367         if (ret) {
368                 pr_err("failed to validate PT BOs\n");
369                 return ret;
370         }
371
372         ret = amdgpu_amdkfd_validate(&param, pd);
373         if (ret) {
374                 pr_err("failed to validate PD\n");
375                 return ret;
376         }
377
378         vm->pd_phys_addr = amdgpu_gmc_pd_addr(vm->root.base.bo);
379
380         if (vm->use_cpu_for_update) {
381                 ret = amdgpu_bo_kmap(pd, NULL);
382                 if (ret) {
383                         pr_err("failed to kmap PD, ret=%d\n", ret);
384                         return ret;
385                 }
386         }
387
388         return 0;
389 }
390
391 static int vm_update_pds(struct amdgpu_vm *vm, struct amdgpu_sync *sync)
392 {
393         struct amdgpu_bo *pd = vm->root.base.bo;
394         struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
395         int ret;
396
397         ret = amdgpu_vm_update_pdes(adev, vm, false);
398         if (ret)
399                 return ret;
400
401         return amdgpu_sync_fence(sync, vm->last_update);
402 }
403
404 static uint64_t get_pte_flags(struct amdgpu_device *adev, struct kgd_mem *mem)
405 {
406         struct amdgpu_device *bo_adev = amdgpu_ttm_adev(mem->bo->tbo.bdev);
407         bool coherent = mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_COHERENT;
408         bool uncached = mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_UNCACHED;
409         uint32_t mapping_flags;
410         uint64_t pte_flags;
411         bool snoop = false;
412
413         mapping_flags = AMDGPU_VM_PAGE_READABLE;
414         if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE)
415                 mapping_flags |= AMDGPU_VM_PAGE_WRITEABLE;
416         if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE)
417                 mapping_flags |= AMDGPU_VM_PAGE_EXECUTABLE;
418
419         switch (adev->asic_type) {
420         case CHIP_ARCTURUS:
421                 if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
422                         if (bo_adev == adev)
423                                 mapping_flags |= coherent ?
424                                         AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW;
425                         else
426                                 mapping_flags |= AMDGPU_VM_MTYPE_UC;
427                 } else {
428                         mapping_flags |= coherent ?
429                                 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
430                 }
431                 break;
432         case CHIP_ALDEBARAN:
433                 if (coherent && uncached) {
434                         if (adev->gmc.xgmi.connected_to_cpu ||
435                                 !(mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM))
436                                 snoop = true;
437                         mapping_flags |= AMDGPU_VM_MTYPE_UC;
438                 } else if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
439                         if (bo_adev == adev) {
440                                 mapping_flags |= AMDGPU_VM_MTYPE_RW;
441                                 if (adev->gmc.xgmi.connected_to_cpu)
442                                         snoop = true;
443                         } else {
444                                 mapping_flags |= AMDGPU_VM_MTYPE_NC;
445                                 if (amdgpu_xgmi_same_hive(adev, bo_adev))
446                                         snoop = true;
447                         }
448                 } else {
449                         snoop = true;
450                         if (adev->gmc.xgmi.connected_to_cpu)
451                                 /* system memory uses NC on A+A */
452                                 mapping_flags |= AMDGPU_VM_MTYPE_NC;
453                         else
454                                 mapping_flags |= coherent ?
455                                         AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
456                 }
457                 break;
458         default:
459                 mapping_flags |= coherent ?
460                         AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
461         }
462
463         pte_flags = amdgpu_gem_va_map_flags(adev, mapping_flags);
464         pte_flags |= snoop ? AMDGPU_PTE_SNOOPED : 0;
465
466         return pte_flags;
467 }
468
469 /* add_bo_to_vm - Add a BO to a VM
470  *
471  * Everything that needs to bo done only once when a BO is first added
472  * to a VM. It can later be mapped and unmapped many times without
473  * repeating these steps.
474  *
475  * 1. Allocate and initialize BO VA entry data structure
476  * 2. Add BO to the VM
477  * 3. Determine ASIC-specific PTE flags
478  * 4. Alloc page tables and directories if needed
479  * 4a.  Validate new page tables and directories
480  */
481 static int add_bo_to_vm(struct amdgpu_device *adev, struct kgd_mem *mem,
482                 struct amdgpu_vm *vm, bool is_aql,
483                 struct kfd_bo_va_list **p_bo_va_entry)
484 {
485         int ret;
486         struct kfd_bo_va_list *bo_va_entry;
487         struct amdgpu_bo *bo = mem->bo;
488         uint64_t va = mem->va;
489         struct list_head *list_bo_va = &mem->bo_va_list;
490         unsigned long bo_size = bo->tbo.base.size;
491
492         if (!va) {
493                 pr_err("Invalid VA when adding BO to VM\n");
494                 return -EINVAL;
495         }
496
497         if (is_aql)
498                 va += bo_size;
499
500         bo_va_entry = kzalloc(sizeof(*bo_va_entry), GFP_KERNEL);
501         if (!bo_va_entry)
502                 return -ENOMEM;
503
504         pr_debug("\t add VA 0x%llx - 0x%llx to vm %p\n", va,
505                         va + bo_size, vm);
506
507         /* Add BO to VM internal data structures*/
508         bo_va_entry->bo_va = amdgpu_vm_bo_add(adev, vm, bo);
509         if (!bo_va_entry->bo_va) {
510                 ret = -EINVAL;
511                 pr_err("Failed to add BO object to VM. ret == %d\n",
512                                 ret);
513                 goto err_vmadd;
514         }
515
516         bo_va_entry->va = va;
517         bo_va_entry->pte_flags = get_pte_flags(adev, mem);
518         bo_va_entry->kgd_dev = (void *)adev;
519         list_add(&bo_va_entry->bo_list, list_bo_va);
520
521         if (p_bo_va_entry)
522                 *p_bo_va_entry = bo_va_entry;
523
524         /* Allocate validate page tables if needed */
525         ret = vm_validate_pt_pd_bos(vm);
526         if (ret) {
527                 pr_err("validate_pt_pd_bos() failed\n");
528                 goto err_alloc_pts;
529         }
530
531         return 0;
532
533 err_alloc_pts:
534         amdgpu_vm_bo_rmv(adev, bo_va_entry->bo_va);
535         list_del(&bo_va_entry->bo_list);
536 err_vmadd:
537         kfree(bo_va_entry);
538         return ret;
539 }
540
541 static void remove_bo_from_vm(struct amdgpu_device *adev,
542                 struct kfd_bo_va_list *entry, unsigned long size)
543 {
544         pr_debug("\t remove VA 0x%llx - 0x%llx in entry %p\n",
545                         entry->va,
546                         entry->va + size, entry);
547         amdgpu_vm_bo_rmv(adev, entry->bo_va);
548         list_del(&entry->bo_list);
549         kfree(entry);
550 }
551
552 static void add_kgd_mem_to_kfd_bo_list(struct kgd_mem *mem,
553                                 struct amdkfd_process_info *process_info,
554                                 bool userptr)
555 {
556         struct ttm_validate_buffer *entry = &mem->validate_list;
557         struct amdgpu_bo *bo = mem->bo;
558
559         INIT_LIST_HEAD(&entry->head);
560         entry->num_shared = 1;
561         entry->bo = &bo->tbo;
562         mutex_lock(&process_info->lock);
563         if (userptr)
564                 list_add_tail(&entry->head, &process_info->userptr_valid_list);
565         else
566                 list_add_tail(&entry->head, &process_info->kfd_bo_list);
567         mutex_unlock(&process_info->lock);
568 }
569
570 static void remove_kgd_mem_from_kfd_bo_list(struct kgd_mem *mem,
571                 struct amdkfd_process_info *process_info)
572 {
573         struct ttm_validate_buffer *bo_list_entry;
574
575         bo_list_entry = &mem->validate_list;
576         mutex_lock(&process_info->lock);
577         list_del(&bo_list_entry->head);
578         mutex_unlock(&process_info->lock);
579 }
580
581 /* Initializes user pages. It registers the MMU notifier and validates
582  * the userptr BO in the GTT domain.
583  *
584  * The BO must already be on the userptr_valid_list. Otherwise an
585  * eviction and restore may happen that leaves the new BO unmapped
586  * with the user mode queues running.
587  *
588  * Takes the process_info->lock to protect against concurrent restore
589  * workers.
590  *
591  * Returns 0 for success, negative errno for errors.
592  */
593 static int init_user_pages(struct kgd_mem *mem, uint64_t user_addr)
594 {
595         struct amdkfd_process_info *process_info = mem->process_info;
596         struct amdgpu_bo *bo = mem->bo;
597         struct ttm_operation_ctx ctx = { true, false };
598         int ret = 0;
599
600         mutex_lock(&process_info->lock);
601
602         ret = amdgpu_ttm_tt_set_userptr(&bo->tbo, user_addr, 0);
603         if (ret) {
604                 pr_err("%s: Failed to set userptr: %d\n", __func__, ret);
605                 goto out;
606         }
607
608         ret = amdgpu_mn_register(bo, user_addr);
609         if (ret) {
610                 pr_err("%s: Failed to register MMU notifier: %d\n",
611                        __func__, ret);
612                 goto out;
613         }
614
615         ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages);
616         if (ret) {
617                 pr_err("%s: Failed to get user pages: %d\n", __func__, ret);
618                 goto unregister_out;
619         }
620
621         ret = amdgpu_bo_reserve(bo, true);
622         if (ret) {
623                 pr_err("%s: Failed to reserve BO\n", __func__);
624                 goto release_out;
625         }
626         amdgpu_bo_placement_from_domain(bo, mem->domain);
627         ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
628         if (ret)
629                 pr_err("%s: failed to validate BO\n", __func__);
630         amdgpu_bo_unreserve(bo);
631
632 release_out:
633         amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
634 unregister_out:
635         if (ret)
636                 amdgpu_mn_unregister(bo);
637 out:
638         mutex_unlock(&process_info->lock);
639         return ret;
640 }
641
642 /* Reserving a BO and its page table BOs must happen atomically to
643  * avoid deadlocks. Some operations update multiple VMs at once. Track
644  * all the reservation info in a context structure. Optionally a sync
645  * object can track VM updates.
646  */
647 struct bo_vm_reservation_context {
648         struct amdgpu_bo_list_entry kfd_bo; /* BO list entry for the KFD BO */
649         unsigned int n_vms;                 /* Number of VMs reserved       */
650         struct amdgpu_bo_list_entry *vm_pd; /* Array of VM BO list entries  */
651         struct ww_acquire_ctx ticket;       /* Reservation ticket           */
652         struct list_head list, duplicates;  /* BO lists                     */
653         struct amdgpu_sync *sync;           /* Pointer to sync object       */
654         bool reserved;                      /* Whether BOs are reserved     */
655 };
656
657 enum bo_vm_match {
658         BO_VM_NOT_MAPPED = 0,   /* Match VMs where a BO is not mapped */
659         BO_VM_MAPPED,           /* Match VMs where a BO is mapped     */
660         BO_VM_ALL,              /* Match all VMs a BO was added to    */
661 };
662
663 /**
664  * reserve_bo_and_vm - reserve a BO and a VM unconditionally.
665  * @mem: KFD BO structure.
666  * @vm: the VM to reserve.
667  * @ctx: the struct that will be used in unreserve_bo_and_vms().
668  */
669 static int reserve_bo_and_vm(struct kgd_mem *mem,
670                               struct amdgpu_vm *vm,
671                               struct bo_vm_reservation_context *ctx)
672 {
673         struct amdgpu_bo *bo = mem->bo;
674         int ret;
675
676         WARN_ON(!vm);
677
678         ctx->reserved = false;
679         ctx->n_vms = 1;
680         ctx->sync = &mem->sync;
681
682         INIT_LIST_HEAD(&ctx->list);
683         INIT_LIST_HEAD(&ctx->duplicates);
684
685         ctx->vm_pd = kcalloc(ctx->n_vms, sizeof(*ctx->vm_pd), GFP_KERNEL);
686         if (!ctx->vm_pd)
687                 return -ENOMEM;
688
689         ctx->kfd_bo.priority = 0;
690         ctx->kfd_bo.tv.bo = &bo->tbo;
691         ctx->kfd_bo.tv.num_shared = 1;
692         list_add(&ctx->kfd_bo.tv.head, &ctx->list);
693
694         amdgpu_vm_get_pd_bo(vm, &ctx->list, &ctx->vm_pd[0]);
695
696         ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list,
697                                      false, &ctx->duplicates);
698         if (ret) {
699                 pr_err("Failed to reserve buffers in ttm.\n");
700                 kfree(ctx->vm_pd);
701                 ctx->vm_pd = NULL;
702                 return ret;
703         }
704
705         ctx->reserved = true;
706         return 0;
707 }
708
709 /**
710  * reserve_bo_and_cond_vms - reserve a BO and some VMs conditionally
711  * @mem: KFD BO structure.
712  * @vm: the VM to reserve. If NULL, then all VMs associated with the BO
713  * is used. Otherwise, a single VM associated with the BO.
714  * @map_type: the mapping status that will be used to filter the VMs.
715  * @ctx: the struct that will be used in unreserve_bo_and_vms().
716  *
717  * Returns 0 for success, negative for failure.
718  */
719 static int reserve_bo_and_cond_vms(struct kgd_mem *mem,
720                                 struct amdgpu_vm *vm, enum bo_vm_match map_type,
721                                 struct bo_vm_reservation_context *ctx)
722 {
723         struct amdgpu_bo *bo = mem->bo;
724         struct kfd_bo_va_list *entry;
725         unsigned int i;
726         int ret;
727
728         ctx->reserved = false;
729         ctx->n_vms = 0;
730         ctx->vm_pd = NULL;
731         ctx->sync = &mem->sync;
732
733         INIT_LIST_HEAD(&ctx->list);
734         INIT_LIST_HEAD(&ctx->duplicates);
735
736         list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
737                 if ((vm && vm != entry->bo_va->base.vm) ||
738                         (entry->is_mapped != map_type
739                         && map_type != BO_VM_ALL))
740                         continue;
741
742                 ctx->n_vms++;
743         }
744
745         if (ctx->n_vms != 0) {
746                 ctx->vm_pd = kcalloc(ctx->n_vms, sizeof(*ctx->vm_pd),
747                                      GFP_KERNEL);
748                 if (!ctx->vm_pd)
749                         return -ENOMEM;
750         }
751
752         ctx->kfd_bo.priority = 0;
753         ctx->kfd_bo.tv.bo = &bo->tbo;
754         ctx->kfd_bo.tv.num_shared = 1;
755         list_add(&ctx->kfd_bo.tv.head, &ctx->list);
756
757         i = 0;
758         list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
759                 if ((vm && vm != entry->bo_va->base.vm) ||
760                         (entry->is_mapped != map_type
761                         && map_type != BO_VM_ALL))
762                         continue;
763
764                 amdgpu_vm_get_pd_bo(entry->bo_va->base.vm, &ctx->list,
765                                 &ctx->vm_pd[i]);
766                 i++;
767         }
768
769         ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list,
770                                      false, &ctx->duplicates);
771         if (ret) {
772                 pr_err("Failed to reserve buffers in ttm.\n");
773                 kfree(ctx->vm_pd);
774                 ctx->vm_pd = NULL;
775                 return ret;
776         }
777
778         ctx->reserved = true;
779         return 0;
780 }
781
782 /**
783  * unreserve_bo_and_vms - Unreserve BO and VMs from a reservation context
784  * @ctx: Reservation context to unreserve
785  * @wait: Optionally wait for a sync object representing pending VM updates
786  * @intr: Whether the wait is interruptible
787  *
788  * Also frees any resources allocated in
789  * reserve_bo_and_(cond_)vm(s). Returns the status from
790  * amdgpu_sync_wait.
791  */
792 static int unreserve_bo_and_vms(struct bo_vm_reservation_context *ctx,
793                                  bool wait, bool intr)
794 {
795         int ret = 0;
796
797         if (wait)
798                 ret = amdgpu_sync_wait(ctx->sync, intr);
799
800         if (ctx->reserved)
801                 ttm_eu_backoff_reservation(&ctx->ticket, &ctx->list);
802         kfree(ctx->vm_pd);
803
804         ctx->sync = NULL;
805
806         ctx->reserved = false;
807         ctx->vm_pd = NULL;
808
809         return ret;
810 }
811
812 static int unmap_bo_from_gpuvm(struct amdgpu_device *adev,
813                                 struct kfd_bo_va_list *entry,
814                                 struct amdgpu_sync *sync)
815 {
816         struct amdgpu_bo_va *bo_va = entry->bo_va;
817         struct amdgpu_vm *vm = bo_va->base.vm;
818
819         amdgpu_vm_bo_unmap(adev, bo_va, entry->va);
820
821         amdgpu_vm_clear_freed(adev, vm, &bo_va->last_pt_update);
822
823         amdgpu_sync_fence(sync, bo_va->last_pt_update);
824
825         return 0;
826 }
827
828 static int update_gpuvm_pte(struct amdgpu_device *adev,
829                 struct kfd_bo_va_list *entry,
830                 struct amdgpu_sync *sync)
831 {
832         int ret;
833         struct amdgpu_bo_va *bo_va = entry->bo_va;
834
835         /* Update the page tables  */
836         ret = amdgpu_vm_bo_update(adev, bo_va, false);
837         if (ret) {
838                 pr_err("amdgpu_vm_bo_update failed\n");
839                 return ret;
840         }
841
842         return amdgpu_sync_fence(sync, bo_va->last_pt_update);
843 }
844
845 static int map_bo_to_gpuvm(struct amdgpu_device *adev,
846                 struct kfd_bo_va_list *entry, struct amdgpu_sync *sync,
847                 bool no_update_pte)
848 {
849         int ret;
850
851         /* Set virtual address for the allocation */
852         ret = amdgpu_vm_bo_map(adev, entry->bo_va, entry->va, 0,
853                                amdgpu_bo_size(entry->bo_va->base.bo),
854                                entry->pte_flags);
855         if (ret) {
856                 pr_err("Failed to map VA 0x%llx in vm. ret %d\n",
857                                 entry->va, ret);
858                 return ret;
859         }
860
861         if (no_update_pte)
862                 return 0;
863
864         ret = update_gpuvm_pte(adev, entry, sync);
865         if (ret) {
866                 pr_err("update_gpuvm_pte() failed\n");
867                 goto update_gpuvm_pte_failed;
868         }
869
870         return 0;
871
872 update_gpuvm_pte_failed:
873         unmap_bo_from_gpuvm(adev, entry, sync);
874         return ret;
875 }
876
877 static struct sg_table *create_doorbell_sg(uint64_t addr, uint32_t size)
878 {
879         struct sg_table *sg = kmalloc(sizeof(*sg), GFP_KERNEL);
880
881         if (!sg)
882                 return NULL;
883         if (sg_alloc_table(sg, 1, GFP_KERNEL)) {
884                 kfree(sg);
885                 return NULL;
886         }
887         sg->sgl->dma_address = addr;
888         sg->sgl->length = size;
889 #ifdef CONFIG_NEED_SG_DMA_LENGTH
890         sg->sgl->dma_length = size;
891 #endif
892         return sg;
893 }
894
895 static int process_validate_vms(struct amdkfd_process_info *process_info)
896 {
897         struct amdgpu_vm *peer_vm;
898         int ret;
899
900         list_for_each_entry(peer_vm, &process_info->vm_list_head,
901                             vm_list_node) {
902                 ret = vm_validate_pt_pd_bos(peer_vm);
903                 if (ret)
904                         return ret;
905         }
906
907         return 0;
908 }
909
910 static int process_sync_pds_resv(struct amdkfd_process_info *process_info,
911                                  struct amdgpu_sync *sync)
912 {
913         struct amdgpu_vm *peer_vm;
914         int ret;
915
916         list_for_each_entry(peer_vm, &process_info->vm_list_head,
917                             vm_list_node) {
918                 struct amdgpu_bo *pd = peer_vm->root.base.bo;
919
920                 ret = amdgpu_sync_resv(NULL, sync, pd->tbo.base.resv,
921                                        AMDGPU_SYNC_NE_OWNER,
922                                        AMDGPU_FENCE_OWNER_KFD);
923                 if (ret)
924                         return ret;
925         }
926
927         return 0;
928 }
929
930 static int process_update_pds(struct amdkfd_process_info *process_info,
931                               struct amdgpu_sync *sync)
932 {
933         struct amdgpu_vm *peer_vm;
934         int ret;
935
936         list_for_each_entry(peer_vm, &process_info->vm_list_head,
937                             vm_list_node) {
938                 ret = vm_update_pds(peer_vm, sync);
939                 if (ret)
940                         return ret;
941         }
942
943         return 0;
944 }
945
946 static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info,
947                        struct dma_fence **ef)
948 {
949         struct amdkfd_process_info *info = NULL;
950         int ret;
951
952         if (!*process_info) {
953                 info = kzalloc(sizeof(*info), GFP_KERNEL);
954                 if (!info)
955                         return -ENOMEM;
956
957                 mutex_init(&info->lock);
958                 INIT_LIST_HEAD(&info->vm_list_head);
959                 INIT_LIST_HEAD(&info->kfd_bo_list);
960                 INIT_LIST_HEAD(&info->userptr_valid_list);
961                 INIT_LIST_HEAD(&info->userptr_inval_list);
962
963                 info->eviction_fence =
964                         amdgpu_amdkfd_fence_create(dma_fence_context_alloc(1),
965                                                    current->mm);
966                 if (!info->eviction_fence) {
967                         pr_err("Failed to create eviction fence\n");
968                         ret = -ENOMEM;
969                         goto create_evict_fence_fail;
970                 }
971
972                 info->pid = get_task_pid(current->group_leader, PIDTYPE_PID);
973                 atomic_set(&info->evicted_bos, 0);
974                 INIT_DELAYED_WORK(&info->restore_userptr_work,
975                                   amdgpu_amdkfd_restore_userptr_worker);
976
977                 *process_info = info;
978                 *ef = dma_fence_get(&info->eviction_fence->base);
979         }
980
981         vm->process_info = *process_info;
982
983         /* Validate page directory and attach eviction fence */
984         ret = amdgpu_bo_reserve(vm->root.base.bo, true);
985         if (ret)
986                 goto reserve_pd_fail;
987         ret = vm_validate_pt_pd_bos(vm);
988         if (ret) {
989                 pr_err("validate_pt_pd_bos() failed\n");
990                 goto validate_pd_fail;
991         }
992         ret = amdgpu_bo_sync_wait(vm->root.base.bo,
993                                   AMDGPU_FENCE_OWNER_KFD, false);
994         if (ret)
995                 goto wait_pd_fail;
996         ret = dma_resv_reserve_shared(vm->root.base.bo->tbo.base.resv, 1);
997         if (ret)
998                 goto reserve_shared_fail;
999         amdgpu_bo_fence(vm->root.base.bo,
1000                         &vm->process_info->eviction_fence->base, true);
1001         amdgpu_bo_unreserve(vm->root.base.bo);
1002
1003         /* Update process info */
1004         mutex_lock(&vm->process_info->lock);
1005         list_add_tail(&vm->vm_list_node,
1006                         &(vm->process_info->vm_list_head));
1007         vm->process_info->n_vms++;
1008         mutex_unlock(&vm->process_info->lock);
1009
1010         return 0;
1011
1012 reserve_shared_fail:
1013 wait_pd_fail:
1014 validate_pd_fail:
1015         amdgpu_bo_unreserve(vm->root.base.bo);
1016 reserve_pd_fail:
1017         vm->process_info = NULL;
1018         if (info) {
1019                 /* Two fence references: one in info and one in *ef */
1020                 dma_fence_put(&info->eviction_fence->base);
1021                 dma_fence_put(*ef);
1022                 *ef = NULL;
1023                 *process_info = NULL;
1024                 put_pid(info->pid);
1025 create_evict_fence_fail:
1026                 mutex_destroy(&info->lock);
1027                 kfree(info);
1028         }
1029         return ret;
1030 }
1031
1032 int amdgpu_amdkfd_gpuvm_create_process_vm(struct kgd_dev *kgd, u32 pasid,
1033                                           void **vm, void **process_info,
1034                                           struct dma_fence **ef)
1035 {
1036         struct amdgpu_device *adev = get_amdgpu_device(kgd);
1037         struct amdgpu_vm *new_vm;
1038         int ret;
1039
1040         new_vm = kzalloc(sizeof(*new_vm), GFP_KERNEL);
1041         if (!new_vm)
1042                 return -ENOMEM;
1043
1044         /* Initialize AMDGPU part of the VM */
1045         ret = amdgpu_vm_init(adev, new_vm, AMDGPU_VM_CONTEXT_COMPUTE, pasid);
1046         if (ret) {
1047                 pr_err("Failed init vm ret %d\n", ret);
1048                 goto amdgpu_vm_init_fail;
1049         }
1050
1051         /* Initialize KFD part of the VM and process info */
1052         ret = init_kfd_vm(new_vm, process_info, ef);
1053         if (ret)
1054                 goto init_kfd_vm_fail;
1055
1056         *vm = (void *) new_vm;
1057
1058         return 0;
1059
1060 init_kfd_vm_fail:
1061         amdgpu_vm_fini(adev, new_vm);
1062 amdgpu_vm_init_fail:
1063         kfree(new_vm);
1064         return ret;
1065 }
1066
1067 int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct kgd_dev *kgd,
1068                                            struct file *filp, u32 pasid,
1069                                            void **vm, void **process_info,
1070                                            struct dma_fence **ef)
1071 {
1072         struct amdgpu_device *adev = get_amdgpu_device(kgd);
1073         struct drm_file *drm_priv = filp->private_data;
1074         struct amdgpu_fpriv *drv_priv = drm_priv->driver_priv;
1075         struct amdgpu_vm *avm = &drv_priv->vm;
1076         int ret;
1077
1078         /* Already a compute VM? */
1079         if (avm->process_info)
1080                 return -EINVAL;
1081
1082         /* Convert VM into a compute VM */
1083         ret = amdgpu_vm_make_compute(adev, avm, pasid);
1084         if (ret)
1085                 return ret;
1086
1087         /* Initialize KFD part of the VM and process info */
1088         ret = init_kfd_vm(avm, process_info, ef);
1089         if (ret)
1090                 return ret;
1091
1092         *vm = (void *)avm;
1093
1094         return 0;
1095 }
1096
1097 void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
1098                                     struct amdgpu_vm *vm)
1099 {
1100         struct amdkfd_process_info *process_info = vm->process_info;
1101         struct amdgpu_bo *pd = vm->root.base.bo;
1102
1103         if (!process_info)
1104                 return;
1105
1106         /* Release eviction fence from PD */
1107         amdgpu_bo_reserve(pd, false);
1108         amdgpu_bo_fence(pd, NULL, false);
1109         amdgpu_bo_unreserve(pd);
1110
1111         /* Update process info */
1112         mutex_lock(&process_info->lock);
1113         process_info->n_vms--;
1114         list_del(&vm->vm_list_node);
1115         mutex_unlock(&process_info->lock);
1116
1117         vm->process_info = NULL;
1118
1119         /* Release per-process resources when last compute VM is destroyed */
1120         if (!process_info->n_vms) {
1121                 WARN_ON(!list_empty(&process_info->kfd_bo_list));
1122                 WARN_ON(!list_empty(&process_info->userptr_valid_list));
1123                 WARN_ON(!list_empty(&process_info->userptr_inval_list));
1124
1125                 dma_fence_put(&process_info->eviction_fence->base);
1126                 cancel_delayed_work_sync(&process_info->restore_userptr_work);
1127                 put_pid(process_info->pid);
1128                 mutex_destroy(&process_info->lock);
1129                 kfree(process_info);
1130         }
1131 }
1132
1133 void amdgpu_amdkfd_gpuvm_destroy_process_vm(struct kgd_dev *kgd, void *vm)
1134 {
1135         struct amdgpu_device *adev = get_amdgpu_device(kgd);
1136         struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1137
1138         if (WARN_ON(!kgd || !vm))
1139                 return;
1140
1141         pr_debug("Destroying process vm %p\n", vm);
1142
1143         /* Release the VM context */
1144         amdgpu_vm_fini(adev, avm);
1145         kfree(vm);
1146 }
1147
1148 void amdgpu_amdkfd_gpuvm_release_process_vm(struct kgd_dev *kgd, void *vm)
1149 {
1150         struct amdgpu_device *adev = get_amdgpu_device(kgd);
1151         struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1152
1153         if (WARN_ON(!kgd || !vm))
1154                 return;
1155
1156         pr_debug("Releasing process vm %p\n", vm);
1157
1158         /* The original pasid of amdgpu vm has already been
1159          * released during making a amdgpu vm to a compute vm
1160          * The current pasid is managed by kfd and will be
1161          * released on kfd process destroy. Set amdgpu pasid
1162          * to 0 to avoid duplicate release.
1163          */
1164         amdgpu_vm_release_compute(adev, avm);
1165 }
1166
1167 uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *vm)
1168 {
1169         struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1170         struct amdgpu_bo *pd = avm->root.base.bo;
1171         struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
1172
1173         if (adev->asic_type < CHIP_VEGA10)
1174                 return avm->pd_phys_addr >> AMDGPU_GPU_PAGE_SHIFT;
1175         return avm->pd_phys_addr;
1176 }
1177
1178 int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
1179                 struct kgd_dev *kgd, uint64_t va, uint64_t size,
1180                 void *vm, struct kgd_mem **mem,
1181                 uint64_t *offset, uint32_t flags)
1182 {
1183         struct amdgpu_device *adev = get_amdgpu_device(kgd);
1184         struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1185         enum ttm_bo_type bo_type = ttm_bo_type_device;
1186         struct sg_table *sg = NULL;
1187         uint64_t user_addr = 0;
1188         struct amdgpu_bo *bo;
1189         struct drm_gem_object *gobj;
1190         u32 domain, alloc_domain;
1191         u64 alloc_flags;
1192         int ret;
1193
1194         /*
1195          * Check on which domain to allocate BO
1196          */
1197         if (flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
1198                 domain = alloc_domain = AMDGPU_GEM_DOMAIN_VRAM;
1199                 alloc_flags = AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE;
1200                 alloc_flags |= (flags & KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC) ?
1201                         AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED :
1202                         AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
1203         } else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_GTT) {
1204                 domain = alloc_domain = AMDGPU_GEM_DOMAIN_GTT;
1205                 alloc_flags = 0;
1206         } else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) {
1207                 domain = AMDGPU_GEM_DOMAIN_GTT;
1208                 alloc_domain = AMDGPU_GEM_DOMAIN_CPU;
1209                 alloc_flags = 0;
1210                 if (!offset || !*offset)
1211                         return -EINVAL;
1212                 user_addr = untagged_addr(*offset);
1213         } else if (flags & (KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL |
1214                         KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)) {
1215                 domain = AMDGPU_GEM_DOMAIN_GTT;
1216                 alloc_domain = AMDGPU_GEM_DOMAIN_CPU;
1217                 bo_type = ttm_bo_type_sg;
1218                 alloc_flags = 0;
1219                 if (size > UINT_MAX)
1220                         return -EINVAL;
1221                 sg = create_doorbell_sg(*offset, size);
1222                 if (!sg)
1223                         return -ENOMEM;
1224         } else {
1225                 return -EINVAL;
1226         }
1227
1228         *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
1229         if (!*mem) {
1230                 ret = -ENOMEM;
1231                 goto err;
1232         }
1233         INIT_LIST_HEAD(&(*mem)->bo_va_list);
1234         mutex_init(&(*mem)->lock);
1235         (*mem)->aql_queue = !!(flags & KFD_IOC_ALLOC_MEM_FLAGS_AQL_QUEUE_MEM);
1236
1237         /* Workaround for AQL queue wraparound bug. Map the same
1238          * memory twice. That means we only actually allocate half
1239          * the memory.
1240          */
1241         if ((*mem)->aql_queue)
1242                 size = size >> 1;
1243
1244         (*mem)->alloc_flags = flags;
1245
1246         amdgpu_sync_create(&(*mem)->sync);
1247
1248         ret = amdgpu_amdkfd_reserve_mem_limit(adev, size, alloc_domain, !!sg);
1249         if (ret) {
1250                 pr_debug("Insufficient memory\n");
1251                 goto err_reserve_limit;
1252         }
1253
1254         pr_debug("\tcreate BO VA 0x%llx size 0x%llx domain %s\n",
1255                         va, size, domain_string(alloc_domain));
1256
1257         ret = amdgpu_gem_object_create(adev, size, 1, alloc_domain, alloc_flags,
1258                                        bo_type, NULL, &gobj);
1259         if (ret) {
1260                 pr_debug("Failed to create BO on domain %s. ret %d\n",
1261                          domain_string(alloc_domain), ret);
1262                 goto err_bo_create;
1263         }
1264         bo = gem_to_amdgpu_bo(gobj);
1265         if (bo_type == ttm_bo_type_sg) {
1266                 bo->tbo.sg = sg;
1267                 bo->tbo.ttm->sg = sg;
1268         }
1269         bo->kfd_bo = *mem;
1270         (*mem)->bo = bo;
1271         if (user_addr)
1272                 bo->flags |= AMDGPU_AMDKFD_USERPTR_BO;
1273
1274         (*mem)->va = va;
1275         (*mem)->domain = domain;
1276         (*mem)->mapped_to_gpu_memory = 0;
1277         (*mem)->process_info = avm->process_info;
1278         add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, user_addr);
1279
1280         if (user_addr) {
1281                 ret = init_user_pages(*mem, user_addr);
1282                 if (ret)
1283                         goto allocate_init_user_pages_failed;
1284         }
1285
1286         if (offset)
1287                 *offset = amdgpu_bo_mmap_offset(bo);
1288
1289         return 0;
1290
1291 allocate_init_user_pages_failed:
1292         remove_kgd_mem_from_kfd_bo_list(*mem, avm->process_info);
1293         amdgpu_bo_unref(&bo);
1294         /* Don't unreserve system mem limit twice */
1295         goto err_reserve_limit;
1296 err_bo_create:
1297         unreserve_mem_limit(adev, size, alloc_domain, !!sg);
1298 err_reserve_limit:
1299         mutex_destroy(&(*mem)->lock);
1300         kfree(*mem);
1301 err:
1302         if (sg) {
1303                 sg_free_table(sg);
1304                 kfree(sg);
1305         }
1306         return ret;
1307 }
1308
1309 int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
1310                 struct kgd_dev *kgd, struct kgd_mem *mem, uint64_t *size)
1311 {
1312         struct amdkfd_process_info *process_info = mem->process_info;
1313         unsigned long bo_size = mem->bo->tbo.base.size;
1314         struct kfd_bo_va_list *entry, *tmp;
1315         struct bo_vm_reservation_context ctx;
1316         struct ttm_validate_buffer *bo_list_entry;
1317         unsigned int mapped_to_gpu_memory;
1318         int ret;
1319         bool is_imported = false;
1320
1321         mutex_lock(&mem->lock);
1322         mapped_to_gpu_memory = mem->mapped_to_gpu_memory;
1323         is_imported = mem->is_imported;
1324         mutex_unlock(&mem->lock);
1325         /* lock is not needed after this, since mem is unused and will
1326          * be freed anyway
1327          */
1328
1329         if (mapped_to_gpu_memory > 0) {
1330                 pr_debug("BO VA 0x%llx size 0x%lx is still mapped.\n",
1331                                 mem->va, bo_size);
1332                 return -EBUSY;
1333         }
1334
1335         /* Make sure restore workers don't access the BO any more */
1336         bo_list_entry = &mem->validate_list;
1337         mutex_lock(&process_info->lock);
1338         list_del(&bo_list_entry->head);
1339         mutex_unlock(&process_info->lock);
1340
1341         /* No more MMU notifiers */
1342         amdgpu_mn_unregister(mem->bo);
1343
1344         ret = reserve_bo_and_cond_vms(mem, NULL, BO_VM_ALL, &ctx);
1345         if (unlikely(ret))
1346                 return ret;
1347
1348         /* The eviction fence should be removed by the last unmap.
1349          * TODO: Log an error condition if the bo still has the eviction fence
1350          * attached
1351          */
1352         amdgpu_amdkfd_remove_eviction_fence(mem->bo,
1353                                         process_info->eviction_fence);
1354         pr_debug("Release VA 0x%llx - 0x%llx\n", mem->va,
1355                 mem->va + bo_size * (1 + mem->aql_queue));
1356
1357         /* Remove from VM internal data structures */
1358         list_for_each_entry_safe(entry, tmp, &mem->bo_va_list, bo_list)
1359                 remove_bo_from_vm((struct amdgpu_device *)entry->kgd_dev,
1360                                 entry, bo_size);
1361
1362         ret = unreserve_bo_and_vms(&ctx, false, false);
1363
1364         /* Free the sync object */
1365         amdgpu_sync_free(&mem->sync);
1366
1367         /* If the SG is not NULL, it's one we created for a doorbell or mmio
1368          * remap BO. We need to free it.
1369          */
1370         if (mem->bo->tbo.sg) {
1371                 sg_free_table(mem->bo->tbo.sg);
1372                 kfree(mem->bo->tbo.sg);
1373         }
1374
1375         /* Update the size of the BO being freed if it was allocated from
1376          * VRAM and is not imported.
1377          */
1378         if (size) {
1379                 if ((mem->bo->preferred_domains == AMDGPU_GEM_DOMAIN_VRAM) &&
1380                     (!is_imported))
1381                         *size = bo_size;
1382                 else
1383                         *size = 0;
1384         }
1385
1386         /* Free the BO*/
1387         drm_gem_object_put(&mem->bo->tbo.base);
1388         mutex_destroy(&mem->lock);
1389         kfree(mem);
1390
1391         return ret;
1392 }
1393
1394 int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
1395                 struct kgd_dev *kgd, struct kgd_mem *mem, void *vm)
1396 {
1397         struct amdgpu_device *adev = get_amdgpu_device(kgd);
1398         struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1399         int ret;
1400         struct amdgpu_bo *bo;
1401         uint32_t domain;
1402         struct kfd_bo_va_list *entry;
1403         struct bo_vm_reservation_context ctx;
1404         struct kfd_bo_va_list *bo_va_entry = NULL;
1405         struct kfd_bo_va_list *bo_va_entry_aql = NULL;
1406         unsigned long bo_size;
1407         bool is_invalid_userptr = false;
1408
1409         bo = mem->bo;
1410         if (!bo) {
1411                 pr_err("Invalid BO when mapping memory to GPU\n");
1412                 return -EINVAL;
1413         }
1414
1415         /* Make sure restore is not running concurrently. Since we
1416          * don't map invalid userptr BOs, we rely on the next restore
1417          * worker to do the mapping
1418          */
1419         mutex_lock(&mem->process_info->lock);
1420
1421         /* Lock mmap-sem. If we find an invalid userptr BO, we can be
1422          * sure that the MMU notifier is no longer running
1423          * concurrently and the queues are actually stopped
1424          */
1425         if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
1426                 mmap_write_lock(current->mm);
1427                 is_invalid_userptr = atomic_read(&mem->invalid);
1428                 mmap_write_unlock(current->mm);
1429         }
1430
1431         mutex_lock(&mem->lock);
1432
1433         domain = mem->domain;
1434         bo_size = bo->tbo.base.size;
1435
1436         pr_debug("Map VA 0x%llx - 0x%llx to vm %p domain %s\n",
1437                         mem->va,
1438                         mem->va + bo_size * (1 + mem->aql_queue),
1439                         vm, domain_string(domain));
1440
1441         ret = reserve_bo_and_vm(mem, vm, &ctx);
1442         if (unlikely(ret))
1443                 goto out;
1444
1445         /* Userptr can be marked as "not invalid", but not actually be
1446          * validated yet (still in the system domain). In that case
1447          * the queues are still stopped and we can leave mapping for
1448          * the next restore worker
1449          */
1450         if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) &&
1451             bo->tbo.mem.mem_type == TTM_PL_SYSTEM)
1452                 is_invalid_userptr = true;
1453
1454         if (check_if_add_bo_to_vm(avm, mem)) {
1455                 ret = add_bo_to_vm(adev, mem, avm, false,
1456                                 &bo_va_entry);
1457                 if (ret)
1458                         goto add_bo_to_vm_failed;
1459                 if (mem->aql_queue) {
1460                         ret = add_bo_to_vm(adev, mem, avm,
1461                                         true, &bo_va_entry_aql);
1462                         if (ret)
1463                                 goto add_bo_to_vm_failed_aql;
1464                 }
1465         } else {
1466                 ret = vm_validate_pt_pd_bos(avm);
1467                 if (unlikely(ret))
1468                         goto add_bo_to_vm_failed;
1469         }
1470
1471         if (mem->mapped_to_gpu_memory == 0 &&
1472             !amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
1473                 /* Validate BO only once. The eviction fence gets added to BO
1474                  * the first time it is mapped. Validate will wait for all
1475                  * background evictions to complete.
1476                  */
1477                 ret = amdgpu_amdkfd_bo_validate(bo, domain, true);
1478                 if (ret) {
1479                         pr_debug("Validate failed\n");
1480                         goto map_bo_to_gpuvm_failed;
1481                 }
1482         }
1483
1484         list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
1485                 if (entry->bo_va->base.vm == vm && !entry->is_mapped) {
1486                         pr_debug("\t map VA 0x%llx - 0x%llx in entry %p\n",
1487                                         entry->va, entry->va + bo_size,
1488                                         entry);
1489
1490                         ret = map_bo_to_gpuvm(adev, entry, ctx.sync,
1491                                               is_invalid_userptr);
1492                         if (ret) {
1493                                 pr_err("Failed to map bo to gpuvm\n");
1494                                 goto map_bo_to_gpuvm_failed;
1495                         }
1496
1497                         ret = vm_update_pds(vm, ctx.sync);
1498                         if (ret) {
1499                                 pr_err("Failed to update page directories\n");
1500                                 goto map_bo_to_gpuvm_failed;
1501                         }
1502
1503                         entry->is_mapped = true;
1504                         mem->mapped_to_gpu_memory++;
1505                         pr_debug("\t INC mapping count %d\n",
1506                                         mem->mapped_to_gpu_memory);
1507                 }
1508         }
1509
1510         if (!amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) && !bo->tbo.pin_count)
1511                 amdgpu_bo_fence(bo,
1512                                 &avm->process_info->eviction_fence->base,
1513                                 true);
1514         ret = unreserve_bo_and_vms(&ctx, false, false);
1515
1516         goto out;
1517
1518 map_bo_to_gpuvm_failed:
1519         if (bo_va_entry_aql)
1520                 remove_bo_from_vm(adev, bo_va_entry_aql, bo_size);
1521 add_bo_to_vm_failed_aql:
1522         if (bo_va_entry)
1523                 remove_bo_from_vm(adev, bo_va_entry, bo_size);
1524 add_bo_to_vm_failed:
1525         unreserve_bo_and_vms(&ctx, false, false);
1526 out:
1527         mutex_unlock(&mem->process_info->lock);
1528         mutex_unlock(&mem->lock);
1529         return ret;
1530 }
1531
1532 int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
1533                 struct kgd_dev *kgd, struct kgd_mem *mem, void *vm)
1534 {
1535         struct amdgpu_device *adev = get_amdgpu_device(kgd);
1536         struct amdkfd_process_info *process_info =
1537                 ((struct amdgpu_vm *)vm)->process_info;
1538         unsigned long bo_size = mem->bo->tbo.base.size;
1539         struct kfd_bo_va_list *entry;
1540         struct bo_vm_reservation_context ctx;
1541         int ret;
1542
1543         mutex_lock(&mem->lock);
1544
1545         ret = reserve_bo_and_cond_vms(mem, vm, BO_VM_MAPPED, &ctx);
1546         if (unlikely(ret))
1547                 goto out;
1548         /* If no VMs were reserved, it means the BO wasn't actually mapped */
1549         if (ctx.n_vms == 0) {
1550                 ret = -EINVAL;
1551                 goto unreserve_out;
1552         }
1553
1554         ret = vm_validate_pt_pd_bos((struct amdgpu_vm *)vm);
1555         if (unlikely(ret))
1556                 goto unreserve_out;
1557
1558         pr_debug("Unmap VA 0x%llx - 0x%llx from vm %p\n",
1559                 mem->va,
1560                 mem->va + bo_size * (1 + mem->aql_queue),
1561                 vm);
1562
1563         list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
1564                 if (entry->bo_va->base.vm == vm && entry->is_mapped) {
1565                         pr_debug("\t unmap VA 0x%llx - 0x%llx from entry %p\n",
1566                                         entry->va,
1567                                         entry->va + bo_size,
1568                                         entry);
1569
1570                         ret = unmap_bo_from_gpuvm(adev, entry, ctx.sync);
1571                         if (ret == 0) {
1572                                 entry->is_mapped = false;
1573                         } else {
1574                                 pr_err("failed to unmap VA 0x%llx\n",
1575                                                 mem->va);
1576                                 goto unreserve_out;
1577                         }
1578
1579                         mem->mapped_to_gpu_memory--;
1580                         pr_debug("\t DEC mapping count %d\n",
1581                                         mem->mapped_to_gpu_memory);
1582                 }
1583         }
1584
1585         /* If BO is unmapped from all VMs, unfence it. It can be evicted if
1586          * required.
1587          */
1588         if (mem->mapped_to_gpu_memory == 0 &&
1589             !amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm) &&
1590             !mem->bo->tbo.pin_count)
1591                 amdgpu_amdkfd_remove_eviction_fence(mem->bo,
1592                                                 process_info->eviction_fence);
1593
1594 unreserve_out:
1595         unreserve_bo_and_vms(&ctx, false, false);
1596 out:
1597         mutex_unlock(&mem->lock);
1598         return ret;
1599 }
1600
1601 int amdgpu_amdkfd_gpuvm_sync_memory(
1602                 struct kgd_dev *kgd, struct kgd_mem *mem, bool intr)
1603 {
1604         struct amdgpu_sync sync;
1605         int ret;
1606
1607         amdgpu_sync_create(&sync);
1608
1609         mutex_lock(&mem->lock);
1610         amdgpu_sync_clone(&mem->sync, &sync);
1611         mutex_unlock(&mem->lock);
1612
1613         ret = amdgpu_sync_wait(&sync, intr);
1614         amdgpu_sync_free(&sync);
1615         return ret;
1616 }
1617
1618 int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_dev *kgd,
1619                 struct kgd_mem *mem, void **kptr, uint64_t *size)
1620 {
1621         int ret;
1622         struct amdgpu_bo *bo = mem->bo;
1623
1624         if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
1625                 pr_err("userptr can't be mapped to kernel\n");
1626                 return -EINVAL;
1627         }
1628
1629         /* delete kgd_mem from kfd_bo_list to avoid re-validating
1630          * this BO in BO's restoring after eviction.
1631          */
1632         mutex_lock(&mem->process_info->lock);
1633
1634         ret = amdgpu_bo_reserve(bo, true);
1635         if (ret) {
1636                 pr_err("Failed to reserve bo. ret %d\n", ret);
1637                 goto bo_reserve_failed;
1638         }
1639
1640         ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
1641         if (ret) {
1642                 pr_err("Failed to pin bo. ret %d\n", ret);
1643                 goto pin_failed;
1644         }
1645
1646         ret = amdgpu_bo_kmap(bo, kptr);
1647         if (ret) {
1648                 pr_err("Failed to map bo to kernel. ret %d\n", ret);
1649                 goto kmap_failed;
1650         }
1651
1652         amdgpu_amdkfd_remove_eviction_fence(
1653                 bo, mem->process_info->eviction_fence);
1654         list_del_init(&mem->validate_list.head);
1655
1656         if (size)
1657                 *size = amdgpu_bo_size(bo);
1658
1659         amdgpu_bo_unreserve(bo);
1660
1661         mutex_unlock(&mem->process_info->lock);
1662         return 0;
1663
1664 kmap_failed:
1665         amdgpu_bo_unpin(bo);
1666 pin_failed:
1667         amdgpu_bo_unreserve(bo);
1668 bo_reserve_failed:
1669         mutex_unlock(&mem->process_info->lock);
1670
1671         return ret;
1672 }
1673
1674 int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct kgd_dev *kgd,
1675                                               struct kfd_vm_fault_info *mem)
1676 {
1677         struct amdgpu_device *adev;
1678
1679         adev = (struct amdgpu_device *)kgd;
1680         if (atomic_read(&adev->gmc.vm_fault_info_updated) == 1) {
1681                 *mem = *adev->gmc.vm_fault_info;
1682                 mb();
1683                 atomic_set(&adev->gmc.vm_fault_info_updated, 0);
1684         }
1685         return 0;
1686 }
1687
1688 int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd,
1689                                       struct dma_buf *dma_buf,
1690                                       uint64_t va, void *vm,
1691                                       struct kgd_mem **mem, uint64_t *size,
1692                                       uint64_t *mmap_offset)
1693 {
1694         struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
1695         struct drm_gem_object *obj;
1696         struct amdgpu_bo *bo;
1697         struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1698
1699         if (dma_buf->ops != &amdgpu_dmabuf_ops)
1700                 /* Can't handle non-graphics buffers */
1701                 return -EINVAL;
1702
1703         obj = dma_buf->priv;
1704         if (drm_to_adev(obj->dev) != adev)
1705                 /* Can't handle buffers from other devices */
1706                 return -EINVAL;
1707
1708         bo = gem_to_amdgpu_bo(obj);
1709         if (!(bo->preferred_domains & (AMDGPU_GEM_DOMAIN_VRAM |
1710                                     AMDGPU_GEM_DOMAIN_GTT)))
1711                 /* Only VRAM and GTT BOs are supported */
1712                 return -EINVAL;
1713
1714         *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
1715         if (!*mem)
1716                 return -ENOMEM;
1717
1718         if (size)
1719                 *size = amdgpu_bo_size(bo);
1720
1721         if (mmap_offset)
1722                 *mmap_offset = amdgpu_bo_mmap_offset(bo);
1723
1724         INIT_LIST_HEAD(&(*mem)->bo_va_list);
1725         mutex_init(&(*mem)->lock);
1726
1727         (*mem)->alloc_flags =
1728                 ((bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ?
1729                 KFD_IOC_ALLOC_MEM_FLAGS_VRAM : KFD_IOC_ALLOC_MEM_FLAGS_GTT)
1730                 | KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE
1731                 | KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE;
1732
1733         drm_gem_object_get(&bo->tbo.base);
1734         (*mem)->bo = bo;
1735         (*mem)->va = va;
1736         (*mem)->domain = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ?
1737                 AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT;
1738         (*mem)->mapped_to_gpu_memory = 0;
1739         (*mem)->process_info = avm->process_info;
1740         add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, false);
1741         amdgpu_sync_create(&(*mem)->sync);
1742         (*mem)->is_imported = true;
1743
1744         return 0;
1745 }
1746
1747 /* Evict a userptr BO by stopping the queues if necessary
1748  *
1749  * Runs in MMU notifier, may be in RECLAIM_FS context. This means it
1750  * cannot do any memory allocations, and cannot take any locks that
1751  * are held elsewhere while allocating memory. Therefore this is as
1752  * simple as possible, using atomic counters.
1753  *
1754  * It doesn't do anything to the BO itself. The real work happens in
1755  * restore, where we get updated page addresses. This function only
1756  * ensures that GPU access to the BO is stopped.
1757  */
1758 int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem,
1759                                 struct mm_struct *mm)
1760 {
1761         struct amdkfd_process_info *process_info = mem->process_info;
1762         int evicted_bos;
1763         int r = 0;
1764
1765         atomic_inc(&mem->invalid);
1766         evicted_bos = atomic_inc_return(&process_info->evicted_bos);
1767         if (evicted_bos == 1) {
1768                 /* First eviction, stop the queues */
1769                 r = kgd2kfd_quiesce_mm(mm);
1770                 if (r)
1771                         pr_err("Failed to quiesce KFD\n");
1772                 schedule_delayed_work(&process_info->restore_userptr_work,
1773                         msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS));
1774         }
1775
1776         return r;
1777 }
1778
1779 /* Update invalid userptr BOs
1780  *
1781  * Moves invalidated (evicted) userptr BOs from userptr_valid_list to
1782  * userptr_inval_list and updates user pages for all BOs that have
1783  * been invalidated since their last update.
1784  */
1785 static int update_invalid_user_pages(struct amdkfd_process_info *process_info,
1786                                      struct mm_struct *mm)
1787 {
1788         struct kgd_mem *mem, *tmp_mem;
1789         struct amdgpu_bo *bo;
1790         struct ttm_operation_ctx ctx = { false, false };
1791         int invalid, ret;
1792
1793         /* Move all invalidated BOs to the userptr_inval_list and
1794          * release their user pages by migration to the CPU domain
1795          */
1796         list_for_each_entry_safe(mem, tmp_mem,
1797                                  &process_info->userptr_valid_list,
1798                                  validate_list.head) {
1799                 if (!atomic_read(&mem->invalid))
1800                         continue; /* BO is still valid */
1801
1802                 bo = mem->bo;
1803
1804                 if (amdgpu_bo_reserve(bo, true))
1805                         return -EAGAIN;
1806                 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
1807                 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
1808                 amdgpu_bo_unreserve(bo);
1809                 if (ret) {
1810                         pr_err("%s: Failed to invalidate userptr BO\n",
1811                                __func__);
1812                         return -EAGAIN;
1813                 }
1814
1815                 list_move_tail(&mem->validate_list.head,
1816                                &process_info->userptr_inval_list);
1817         }
1818
1819         if (list_empty(&process_info->userptr_inval_list))
1820                 return 0; /* All evicted userptr BOs were freed */
1821
1822         /* Go through userptr_inval_list and update any invalid user_pages */
1823         list_for_each_entry(mem, &process_info->userptr_inval_list,
1824                             validate_list.head) {
1825                 invalid = atomic_read(&mem->invalid);
1826                 if (!invalid)
1827                         /* BO hasn't been invalidated since the last
1828                          * revalidation attempt. Keep its BO list.
1829                          */
1830                         continue;
1831
1832                 bo = mem->bo;
1833
1834                 /* Get updated user pages */
1835                 ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages);
1836                 if (ret) {
1837                         pr_debug("%s: Failed to get user pages: %d\n",
1838                                 __func__, ret);
1839
1840                         /* Return error -EBUSY or -ENOMEM, retry restore */
1841                         return ret;
1842                 }
1843
1844                 /*
1845                  * FIXME: Cannot ignore the return code, must hold
1846                  * notifier_lock
1847                  */
1848                 amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
1849
1850                 /* Mark the BO as valid unless it was invalidated
1851                  * again concurrently.
1852                  */
1853                 if (atomic_cmpxchg(&mem->invalid, invalid, 0) != invalid)
1854                         return -EAGAIN;
1855         }
1856
1857         return 0;
1858 }
1859
1860 /* Validate invalid userptr BOs
1861  *
1862  * Validates BOs on the userptr_inval_list, and moves them back to the
1863  * userptr_valid_list. Also updates GPUVM page tables with new page
1864  * addresses and waits for the page table updates to complete.
1865  */
1866 static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
1867 {
1868         struct amdgpu_bo_list_entry *pd_bo_list_entries;
1869         struct list_head resv_list, duplicates;
1870         struct ww_acquire_ctx ticket;
1871         struct amdgpu_sync sync;
1872
1873         struct amdgpu_vm *peer_vm;
1874         struct kgd_mem *mem, *tmp_mem;
1875         struct amdgpu_bo *bo;
1876         struct ttm_operation_ctx ctx = { false, false };
1877         int i, ret;
1878
1879         pd_bo_list_entries = kcalloc(process_info->n_vms,
1880                                      sizeof(struct amdgpu_bo_list_entry),
1881                                      GFP_KERNEL);
1882         if (!pd_bo_list_entries) {
1883                 pr_err("%s: Failed to allocate PD BO list entries\n", __func__);
1884                 ret = -ENOMEM;
1885                 goto out_no_mem;
1886         }
1887
1888         INIT_LIST_HEAD(&resv_list);
1889         INIT_LIST_HEAD(&duplicates);
1890
1891         /* Get all the page directory BOs that need to be reserved */
1892         i = 0;
1893         list_for_each_entry(peer_vm, &process_info->vm_list_head,
1894                             vm_list_node)
1895                 amdgpu_vm_get_pd_bo(peer_vm, &resv_list,
1896                                     &pd_bo_list_entries[i++]);
1897         /* Add the userptr_inval_list entries to resv_list */
1898         list_for_each_entry(mem, &process_info->userptr_inval_list,
1899                             validate_list.head) {
1900                 list_add_tail(&mem->resv_list.head, &resv_list);
1901                 mem->resv_list.bo = mem->validate_list.bo;
1902                 mem->resv_list.num_shared = mem->validate_list.num_shared;
1903         }
1904
1905         /* Reserve all BOs and page tables for validation */
1906         ret = ttm_eu_reserve_buffers(&ticket, &resv_list, false, &duplicates);
1907         WARN(!list_empty(&duplicates), "Duplicates should be empty");
1908         if (ret)
1909                 goto out_free;
1910
1911         amdgpu_sync_create(&sync);
1912
1913         ret = process_validate_vms(process_info);
1914         if (ret)
1915                 goto unreserve_out;
1916
1917         /* Validate BOs and update GPUVM page tables */
1918         list_for_each_entry_safe(mem, tmp_mem,
1919                                  &process_info->userptr_inval_list,
1920                                  validate_list.head) {
1921                 struct kfd_bo_va_list *bo_va_entry;
1922
1923                 bo = mem->bo;
1924
1925                 /* Validate the BO if we got user pages */
1926                 if (bo->tbo.ttm->pages[0]) {
1927                         amdgpu_bo_placement_from_domain(bo, mem->domain);
1928                         ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
1929                         if (ret) {
1930                                 pr_err("%s: failed to validate BO\n", __func__);
1931                                 goto unreserve_out;
1932                         }
1933                 }
1934
1935                 list_move_tail(&mem->validate_list.head,
1936                                &process_info->userptr_valid_list);
1937
1938                 /* Update mapping. If the BO was not validated
1939                  * (because we couldn't get user pages), this will
1940                  * clear the page table entries, which will result in
1941                  * VM faults if the GPU tries to access the invalid
1942                  * memory.
1943                  */
1944                 list_for_each_entry(bo_va_entry, &mem->bo_va_list, bo_list) {
1945                         if (!bo_va_entry->is_mapped)
1946                                 continue;
1947
1948                         ret = update_gpuvm_pte((struct amdgpu_device *)
1949                                                bo_va_entry->kgd_dev,
1950                                                bo_va_entry, &sync);
1951                         if (ret) {
1952                                 pr_err("%s: update PTE failed\n", __func__);
1953                                 /* make sure this gets validated again */
1954                                 atomic_inc(&mem->invalid);
1955                                 goto unreserve_out;
1956                         }
1957                 }
1958         }
1959
1960         /* Update page directories */
1961         ret = process_update_pds(process_info, &sync);
1962
1963 unreserve_out:
1964         ttm_eu_backoff_reservation(&ticket, &resv_list);
1965         amdgpu_sync_wait(&sync, false);
1966         amdgpu_sync_free(&sync);
1967 out_free:
1968         kfree(pd_bo_list_entries);
1969 out_no_mem:
1970
1971         return ret;
1972 }
1973
1974 /* Worker callback to restore evicted userptr BOs
1975  *
1976  * Tries to update and validate all userptr BOs. If successful and no
1977  * concurrent evictions happened, the queues are restarted. Otherwise,
1978  * reschedule for another attempt later.
1979  */
1980 static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work)
1981 {
1982         struct delayed_work *dwork = to_delayed_work(work);
1983         struct amdkfd_process_info *process_info =
1984                 container_of(dwork, struct amdkfd_process_info,
1985                              restore_userptr_work);
1986         struct task_struct *usertask;
1987         struct mm_struct *mm;
1988         int evicted_bos;
1989
1990         evicted_bos = atomic_read(&process_info->evicted_bos);
1991         if (!evicted_bos)
1992                 return;
1993
1994         /* Reference task and mm in case of concurrent process termination */
1995         usertask = get_pid_task(process_info->pid, PIDTYPE_PID);
1996         if (!usertask)
1997                 return;
1998         mm = get_task_mm(usertask);
1999         if (!mm) {
2000                 put_task_struct(usertask);
2001                 return;
2002         }
2003
2004         mutex_lock(&process_info->lock);
2005
2006         if (update_invalid_user_pages(process_info, mm))
2007                 goto unlock_out;
2008         /* userptr_inval_list can be empty if all evicted userptr BOs
2009          * have been freed. In that case there is nothing to validate
2010          * and we can just restart the queues.
2011          */
2012         if (!list_empty(&process_info->userptr_inval_list)) {
2013                 if (atomic_read(&process_info->evicted_bos) != evicted_bos)
2014                         goto unlock_out; /* Concurrent eviction, try again */
2015
2016                 if (validate_invalid_user_pages(process_info))
2017                         goto unlock_out;
2018         }
2019         /* Final check for concurrent evicton and atomic update. If
2020          * another eviction happens after successful update, it will
2021          * be a first eviction that calls quiesce_mm. The eviction
2022          * reference counting inside KFD will handle this case.
2023          */
2024         if (atomic_cmpxchg(&process_info->evicted_bos, evicted_bos, 0) !=
2025             evicted_bos)
2026                 goto unlock_out;
2027         evicted_bos = 0;
2028         if (kgd2kfd_resume_mm(mm)) {
2029                 pr_err("%s: Failed to resume KFD\n", __func__);
2030                 /* No recovery from this failure. Probably the CP is
2031                  * hanging. No point trying again.
2032                  */
2033         }
2034
2035 unlock_out:
2036         mutex_unlock(&process_info->lock);
2037         mmput(mm);
2038         put_task_struct(usertask);
2039
2040         /* If validation failed, reschedule another attempt */
2041         if (evicted_bos)
2042                 schedule_delayed_work(&process_info->restore_userptr_work,
2043                         msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS));
2044 }
2045
2046 /** amdgpu_amdkfd_gpuvm_restore_process_bos - Restore all BOs for the given
2047  *   KFD process identified by process_info
2048  *
2049  * @process_info: amdkfd_process_info of the KFD process
2050  *
2051  * After memory eviction, restore thread calls this function. The function
2052  * should be called when the Process is still valid. BO restore involves -
2053  *
2054  * 1.  Release old eviction fence and create new one
2055  * 2.  Get two copies of PD BO list from all the VMs. Keep one copy as pd_list.
2056  * 3   Use the second PD list and kfd_bo_list to create a list (ctx.list) of
2057  *     BOs that need to be reserved.
2058  * 4.  Reserve all the BOs
2059  * 5.  Validate of PD and PT BOs.
2060  * 6.  Validate all KFD BOs using kfd_bo_list and Map them and add new fence
2061  * 7.  Add fence to all PD and PT BOs.
2062  * 8.  Unreserve all BOs
2063  */
2064 int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
2065 {
2066         struct amdgpu_bo_list_entry *pd_bo_list;
2067         struct amdkfd_process_info *process_info = info;
2068         struct amdgpu_vm *peer_vm;
2069         struct kgd_mem *mem;
2070         struct bo_vm_reservation_context ctx;
2071         struct amdgpu_amdkfd_fence *new_fence;
2072         int ret = 0, i;
2073         struct list_head duplicate_save;
2074         struct amdgpu_sync sync_obj;
2075         unsigned long failed_size = 0;
2076         unsigned long total_size = 0;
2077
2078         INIT_LIST_HEAD(&duplicate_save);
2079         INIT_LIST_HEAD(&ctx.list);
2080         INIT_LIST_HEAD(&ctx.duplicates);
2081
2082         pd_bo_list = kcalloc(process_info->n_vms,
2083                              sizeof(struct amdgpu_bo_list_entry),
2084                              GFP_KERNEL);
2085         if (!pd_bo_list)
2086                 return -ENOMEM;
2087
2088         i = 0;
2089         mutex_lock(&process_info->lock);
2090         list_for_each_entry(peer_vm, &process_info->vm_list_head,
2091                         vm_list_node)
2092                 amdgpu_vm_get_pd_bo(peer_vm, &ctx.list, &pd_bo_list[i++]);
2093
2094         /* Reserve all BOs and page tables/directory. Add all BOs from
2095          * kfd_bo_list to ctx.list
2096          */
2097         list_for_each_entry(mem, &process_info->kfd_bo_list,
2098                             validate_list.head) {
2099
2100                 list_add_tail(&mem->resv_list.head, &ctx.list);
2101                 mem->resv_list.bo = mem->validate_list.bo;
2102                 mem->resv_list.num_shared = mem->validate_list.num_shared;
2103         }
2104
2105         ret = ttm_eu_reserve_buffers(&ctx.ticket, &ctx.list,
2106                                      false, &duplicate_save);
2107         if (ret) {
2108                 pr_debug("Memory eviction: TTM Reserve Failed. Try again\n");
2109                 goto ttm_reserve_fail;
2110         }
2111
2112         amdgpu_sync_create(&sync_obj);
2113
2114         /* Validate PDs and PTs */
2115         ret = process_validate_vms(process_info);
2116         if (ret)
2117                 goto validate_map_fail;
2118
2119         ret = process_sync_pds_resv(process_info, &sync_obj);
2120         if (ret) {
2121                 pr_debug("Memory eviction: Failed to sync to PD BO moving fence. Try again\n");
2122                 goto validate_map_fail;
2123         }
2124
2125         /* Validate BOs and map them to GPUVM (update VM page tables). */
2126         list_for_each_entry(mem, &process_info->kfd_bo_list,
2127                             validate_list.head) {
2128
2129                 struct amdgpu_bo *bo = mem->bo;
2130                 uint32_t domain = mem->domain;
2131                 struct kfd_bo_va_list *bo_va_entry;
2132
2133                 total_size += amdgpu_bo_size(bo);
2134
2135                 ret = amdgpu_amdkfd_bo_validate(bo, domain, false);
2136                 if (ret) {
2137                         pr_debug("Memory eviction: Validate BOs failed\n");
2138                         failed_size += amdgpu_bo_size(bo);
2139                         ret = amdgpu_amdkfd_bo_validate(bo,
2140                                                 AMDGPU_GEM_DOMAIN_GTT, false);
2141                         if (ret) {
2142                                 pr_debug("Memory eviction: Try again\n");
2143                                 goto validate_map_fail;
2144                         }
2145                 }
2146                 ret = amdgpu_sync_fence(&sync_obj, bo->tbo.moving);
2147                 if (ret) {
2148                         pr_debug("Memory eviction: Sync BO fence failed. Try again\n");
2149                         goto validate_map_fail;
2150                 }
2151                 list_for_each_entry(bo_va_entry, &mem->bo_va_list,
2152                                     bo_list) {
2153                         ret = update_gpuvm_pte((struct amdgpu_device *)
2154                                               bo_va_entry->kgd_dev,
2155                                               bo_va_entry,
2156                                               &sync_obj);
2157                         if (ret) {
2158                                 pr_debug("Memory eviction: update PTE failed. Try again\n");
2159                                 goto validate_map_fail;
2160                         }
2161                 }
2162         }
2163
2164         if (failed_size)
2165                 pr_debug("0x%lx/0x%lx in system\n", failed_size, total_size);
2166
2167         /* Update page directories */
2168         ret = process_update_pds(process_info, &sync_obj);
2169         if (ret) {
2170                 pr_debug("Memory eviction: update PDs failed. Try again\n");
2171                 goto validate_map_fail;
2172         }
2173
2174         /* Wait for validate and PT updates to finish */
2175         amdgpu_sync_wait(&sync_obj, false);
2176
2177         /* Release old eviction fence and create new one, because fence only
2178          * goes from unsignaled to signaled, fence cannot be reused.
2179          * Use context and mm from the old fence.
2180          */
2181         new_fence = amdgpu_amdkfd_fence_create(
2182                                 process_info->eviction_fence->base.context,
2183                                 process_info->eviction_fence->mm);
2184         if (!new_fence) {
2185                 pr_err("Failed to create eviction fence\n");
2186                 ret = -ENOMEM;
2187                 goto validate_map_fail;
2188         }
2189         dma_fence_put(&process_info->eviction_fence->base);
2190         process_info->eviction_fence = new_fence;
2191         *ef = dma_fence_get(&new_fence->base);
2192
2193         /* Attach new eviction fence to all BOs */
2194         list_for_each_entry(mem, &process_info->kfd_bo_list,
2195                 validate_list.head)
2196                 amdgpu_bo_fence(mem->bo,
2197                         &process_info->eviction_fence->base, true);
2198
2199         /* Attach eviction fence to PD / PT BOs */
2200         list_for_each_entry(peer_vm, &process_info->vm_list_head,
2201                             vm_list_node) {
2202                 struct amdgpu_bo *bo = peer_vm->root.base.bo;
2203
2204                 amdgpu_bo_fence(bo, &process_info->eviction_fence->base, true);
2205         }
2206
2207 validate_map_fail:
2208         ttm_eu_backoff_reservation(&ctx.ticket, &ctx.list);
2209         amdgpu_sync_free(&sync_obj);
2210 ttm_reserve_fail:
2211         mutex_unlock(&process_info->lock);
2212         kfree(pd_bo_list);
2213         return ret;
2214 }
2215
2216 int amdgpu_amdkfd_add_gws_to_process(void *info, void *gws, struct kgd_mem **mem)
2217 {
2218         struct amdkfd_process_info *process_info = (struct amdkfd_process_info *)info;
2219         struct amdgpu_bo *gws_bo = (struct amdgpu_bo *)gws;
2220         int ret;
2221
2222         if (!info || !gws)
2223                 return -EINVAL;
2224
2225         *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
2226         if (!*mem)
2227                 return -ENOMEM;
2228
2229         mutex_init(&(*mem)->lock);
2230         INIT_LIST_HEAD(&(*mem)->bo_va_list);
2231         (*mem)->bo = amdgpu_bo_ref(gws_bo);
2232         (*mem)->domain = AMDGPU_GEM_DOMAIN_GWS;
2233         (*mem)->process_info = process_info;
2234         add_kgd_mem_to_kfd_bo_list(*mem, process_info, false);
2235         amdgpu_sync_create(&(*mem)->sync);
2236
2237
2238         /* Validate gws bo the first time it is added to process */
2239         mutex_lock(&(*mem)->process_info->lock);
2240         ret = amdgpu_bo_reserve(gws_bo, false);
2241         if (unlikely(ret)) {
2242                 pr_err("Reserve gws bo failed %d\n", ret);
2243                 goto bo_reservation_failure;
2244         }
2245
2246         ret = amdgpu_amdkfd_bo_validate(gws_bo, AMDGPU_GEM_DOMAIN_GWS, true);
2247         if (ret) {
2248                 pr_err("GWS BO validate failed %d\n", ret);
2249                 goto bo_validation_failure;
2250         }
2251         /* GWS resource is shared b/t amdgpu and amdkfd
2252          * Add process eviction fence to bo so they can
2253          * evict each other.
2254          */
2255         ret = dma_resv_reserve_shared(gws_bo->tbo.base.resv, 1);
2256         if (ret)
2257                 goto reserve_shared_fail;
2258         amdgpu_bo_fence(gws_bo, &process_info->eviction_fence->base, true);
2259         amdgpu_bo_unreserve(gws_bo);
2260         mutex_unlock(&(*mem)->process_info->lock);
2261
2262         return ret;
2263
2264 reserve_shared_fail:
2265 bo_validation_failure:
2266         amdgpu_bo_unreserve(gws_bo);
2267 bo_reservation_failure:
2268         mutex_unlock(&(*mem)->process_info->lock);
2269         amdgpu_sync_free(&(*mem)->sync);
2270         remove_kgd_mem_from_kfd_bo_list(*mem, process_info);
2271         amdgpu_bo_unref(&gws_bo);
2272         mutex_destroy(&(*mem)->lock);
2273         kfree(*mem);
2274         *mem = NULL;
2275         return ret;
2276 }
2277
2278 int amdgpu_amdkfd_remove_gws_from_process(void *info, void *mem)
2279 {
2280         int ret;
2281         struct amdkfd_process_info *process_info = (struct amdkfd_process_info *)info;
2282         struct kgd_mem *kgd_mem = (struct kgd_mem *)mem;
2283         struct amdgpu_bo *gws_bo = kgd_mem->bo;
2284
2285         /* Remove BO from process's validate list so restore worker won't touch
2286          * it anymore
2287          */
2288         remove_kgd_mem_from_kfd_bo_list(kgd_mem, process_info);
2289
2290         ret = amdgpu_bo_reserve(gws_bo, false);
2291         if (unlikely(ret)) {
2292                 pr_err("Reserve gws bo failed %d\n", ret);
2293                 //TODO add BO back to validate_list?
2294                 return ret;
2295         }
2296         amdgpu_amdkfd_remove_eviction_fence(gws_bo,
2297                         process_info->eviction_fence);
2298         amdgpu_bo_unreserve(gws_bo);
2299         amdgpu_sync_free(&kgd_mem->sync);
2300         amdgpu_bo_unref(&gws_bo);
2301         mutex_destroy(&kgd_mem->lock);
2302         kfree(mem);
2303         return 0;
2304 }
2305
2306 /* Returns GPU-specific tiling mode information */
2307 int amdgpu_amdkfd_get_tile_config(struct kgd_dev *kgd,
2308                                 struct tile_config *config)
2309 {
2310         struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
2311
2312         config->gb_addr_config = adev->gfx.config.gb_addr_config;
2313         config->tile_config_ptr = adev->gfx.config.tile_mode_array;
2314         config->num_tile_configs =
2315                         ARRAY_SIZE(adev->gfx.config.tile_mode_array);
2316         config->macro_tile_config_ptr =
2317                         adev->gfx.config.macrotile_mode_array;
2318         config->num_macro_tile_configs =
2319                         ARRAY_SIZE(adev->gfx.config.macrotile_mode_array);
2320
2321         /* Those values are not set from GFX9 onwards */
2322         config->num_banks = adev->gfx.config.num_banks;
2323         config->num_ranks = adev->gfx.config.num_ranks;
2324
2325         return 0;
2326 }