drm/amdgpu: Add a new flag to AMDGPU_CTX_OP_QUERY_STATE2
[linux-2.6-microblaze.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_amdkfd_gpuvm.c
1 /*
2  * Copyright 2014-2018 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22
23 #define pr_fmt(fmt) "kfd2kgd: " fmt
24
25 #include <linux/list.h>
26 #include <linux/pagemap.h>
27 #include <linux/sched/mm.h>
28 #include <linux/dma-buf.h>
29 #include <drm/drmP.h>
30 #include "amdgpu_object.h"
31 #include "amdgpu_vm.h"
32 #include "amdgpu_amdkfd.h"
33
34 /* Special VM and GART address alignment needed for VI pre-Fiji due to
35  * a HW bug.
36  */
37 #define VI_BO_SIZE_ALIGN (0x8000)
38
39 /* BO flag to indicate a KFD userptr BO */
40 #define AMDGPU_AMDKFD_USERPTR_BO (1ULL << 63)
41
42 /* Userptr restore delay, just long enough to allow consecutive VM
43  * changes to accumulate
44  */
45 #define AMDGPU_USERPTR_RESTORE_DELAY_MS 1
46
47 /* Impose limit on how much memory KFD can use */
48 static struct {
49         uint64_t max_system_mem_limit;
50         uint64_t max_ttm_mem_limit;
51         int64_t system_mem_used;
52         int64_t ttm_mem_used;
53         spinlock_t mem_limit_lock;
54 } kfd_mem_limit;
55
56 /* Struct used for amdgpu_amdkfd_bo_validate */
57 struct amdgpu_vm_parser {
58         uint32_t        domain;
59         bool            wait;
60 };
61
62 static const char * const domain_bit_to_string[] = {
63                 "CPU",
64                 "GTT",
65                 "VRAM",
66                 "GDS",
67                 "GWS",
68                 "OA"
69 };
70
71 #define domain_string(domain) domain_bit_to_string[ffs(domain)-1]
72
73 static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work);
74
75
76 static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
77 {
78         return (struct amdgpu_device *)kgd;
79 }
80
81 static bool check_if_add_bo_to_vm(struct amdgpu_vm *avm,
82                 struct kgd_mem *mem)
83 {
84         struct kfd_bo_va_list *entry;
85
86         list_for_each_entry(entry, &mem->bo_va_list, bo_list)
87                 if (entry->bo_va->base.vm == avm)
88                         return false;
89
90         return true;
91 }
92
93 /* Set memory usage limits. Current, limits are
94  *  System (TTM + userptr) memory - 3/4th System RAM
95  *  TTM memory - 3/8th System RAM
96  */
97 void amdgpu_amdkfd_gpuvm_init_mem_limits(void)
98 {
99         struct sysinfo si;
100         uint64_t mem;
101
102         si_meminfo(&si);
103         mem = si.totalram - si.totalhigh;
104         mem *= si.mem_unit;
105
106         spin_lock_init(&kfd_mem_limit.mem_limit_lock);
107         kfd_mem_limit.max_system_mem_limit = (mem >> 1) + (mem >> 2);
108         kfd_mem_limit.max_ttm_mem_limit = (mem >> 1) - (mem >> 3);
109         pr_debug("Kernel memory limit %lluM, TTM limit %lluM\n",
110                 (kfd_mem_limit.max_system_mem_limit >> 20),
111                 (kfd_mem_limit.max_ttm_mem_limit >> 20));
112 }
113
114 static int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
115                 uint64_t size, u32 domain, bool sg)
116 {
117         size_t acc_size, system_mem_needed, ttm_mem_needed, vram_needed;
118         uint64_t reserved_for_pt = amdgpu_amdkfd_total_mem_size >> 9;
119         int ret = 0;
120
121         acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size,
122                                        sizeof(struct amdgpu_bo));
123
124         vram_needed = 0;
125         if (domain == AMDGPU_GEM_DOMAIN_GTT) {
126                 /* TTM GTT memory */
127                 system_mem_needed = acc_size + size;
128                 ttm_mem_needed = acc_size + size;
129         } else if (domain == AMDGPU_GEM_DOMAIN_CPU && !sg) {
130                 /* Userptr */
131                 system_mem_needed = acc_size + size;
132                 ttm_mem_needed = acc_size;
133         } else {
134                 /* VRAM and SG */
135                 system_mem_needed = acc_size;
136                 ttm_mem_needed = acc_size;
137                 if (domain == AMDGPU_GEM_DOMAIN_VRAM)
138                         vram_needed = size;
139         }
140
141         spin_lock(&kfd_mem_limit.mem_limit_lock);
142
143         if ((kfd_mem_limit.system_mem_used + system_mem_needed >
144              kfd_mem_limit.max_system_mem_limit) ||
145             (kfd_mem_limit.ttm_mem_used + ttm_mem_needed >
146              kfd_mem_limit.max_ttm_mem_limit) ||
147             (adev->kfd.vram_used + vram_needed >
148              adev->gmc.real_vram_size - reserved_for_pt)) {
149                 ret = -ENOMEM;
150         } else {
151                 kfd_mem_limit.system_mem_used += system_mem_needed;
152                 kfd_mem_limit.ttm_mem_used += ttm_mem_needed;
153                 adev->kfd.vram_used += vram_needed;
154         }
155
156         spin_unlock(&kfd_mem_limit.mem_limit_lock);
157         return ret;
158 }
159
160 static void unreserve_mem_limit(struct amdgpu_device *adev,
161                 uint64_t size, u32 domain, bool sg)
162 {
163         size_t acc_size;
164
165         acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size,
166                                        sizeof(struct amdgpu_bo));
167
168         spin_lock(&kfd_mem_limit.mem_limit_lock);
169         if (domain == AMDGPU_GEM_DOMAIN_GTT) {
170                 kfd_mem_limit.system_mem_used -= (acc_size + size);
171                 kfd_mem_limit.ttm_mem_used -= (acc_size + size);
172         } else if (domain == AMDGPU_GEM_DOMAIN_CPU && !sg) {
173                 kfd_mem_limit.system_mem_used -= (acc_size + size);
174                 kfd_mem_limit.ttm_mem_used -= acc_size;
175         } else {
176                 kfd_mem_limit.system_mem_used -= acc_size;
177                 kfd_mem_limit.ttm_mem_used -= acc_size;
178                 if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
179                         adev->kfd.vram_used -= size;
180                         WARN_ONCE(adev->kfd.vram_used < 0,
181                                   "kfd VRAM memory accounting unbalanced");
182                 }
183         }
184         WARN_ONCE(kfd_mem_limit.system_mem_used < 0,
185                   "kfd system memory accounting unbalanced");
186         WARN_ONCE(kfd_mem_limit.ttm_mem_used < 0,
187                   "kfd TTM memory accounting unbalanced");
188
189         spin_unlock(&kfd_mem_limit.mem_limit_lock);
190 }
191
192 void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo)
193 {
194         struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
195         u32 domain = bo->preferred_domains;
196         bool sg = (bo->preferred_domains == AMDGPU_GEM_DOMAIN_CPU);
197
198         if (bo->flags & AMDGPU_AMDKFD_USERPTR_BO) {
199                 domain = AMDGPU_GEM_DOMAIN_CPU;
200                 sg = false;
201         }
202
203         unreserve_mem_limit(adev, amdgpu_bo_size(bo), domain, sg);
204 }
205
206
207 /* amdgpu_amdkfd_remove_eviction_fence - Removes eviction fence from BO's
208  *  reservation object.
209  *
210  * @bo: [IN] Remove eviction fence(s) from this BO
211  * @ef: [IN] This eviction fence is removed if it
212  *  is present in the shared list.
213  *
214  * NOTE: Must be called with BO reserved i.e. bo->tbo.resv->lock held.
215  */
216 static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
217                                         struct amdgpu_amdkfd_fence *ef)
218 {
219         struct reservation_object *resv = bo->tbo.resv;
220         struct reservation_object_list *old, *new;
221         unsigned int i, j, k;
222
223         if (!ef)
224                 return -EINVAL;
225
226         old = reservation_object_get_list(resv);
227         if (!old)
228                 return 0;
229
230         new = kmalloc(offsetof(typeof(*new), shared[old->shared_max]),
231                       GFP_KERNEL);
232         if (!new)
233                 return -ENOMEM;
234
235         /* Go through all the shared fences in the resevation object and sort
236          * the interesting ones to the end of the list.
237          */
238         for (i = 0, j = old->shared_count, k = 0; i < old->shared_count; ++i) {
239                 struct dma_fence *f;
240
241                 f = rcu_dereference_protected(old->shared[i],
242                                               reservation_object_held(resv));
243
244                 if (f->context == ef->base.context)
245                         RCU_INIT_POINTER(new->shared[--j], f);
246                 else
247                         RCU_INIT_POINTER(new->shared[k++], f);
248         }
249         new->shared_max = old->shared_max;
250         new->shared_count = k;
251
252         /* Install the new fence list, seqcount provides the barriers */
253         preempt_disable();
254         write_seqcount_begin(&resv->seq);
255         RCU_INIT_POINTER(resv->fence, new);
256         write_seqcount_end(&resv->seq);
257         preempt_enable();
258
259         /* Drop the references to the removed fences or move them to ef_list */
260         for (i = j, k = 0; i < old->shared_count; ++i) {
261                 struct dma_fence *f;
262
263                 f = rcu_dereference_protected(new->shared[i],
264                                               reservation_object_held(resv));
265                 dma_fence_put(f);
266         }
267         kfree_rcu(old, rcu);
268
269         return 0;
270 }
271
272 static int amdgpu_amdkfd_bo_validate(struct amdgpu_bo *bo, uint32_t domain,
273                                      bool wait)
274 {
275         struct ttm_operation_ctx ctx = { false, false };
276         int ret;
277
278         if (WARN(amdgpu_ttm_tt_get_usermm(bo->tbo.ttm),
279                  "Called with userptr BO"))
280                 return -EINVAL;
281
282         amdgpu_bo_placement_from_domain(bo, domain);
283
284         ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
285         if (ret)
286                 goto validate_fail;
287         if (wait)
288                 amdgpu_bo_sync_wait(bo, AMDGPU_FENCE_OWNER_KFD, false);
289
290 validate_fail:
291         return ret;
292 }
293
294 static int amdgpu_amdkfd_validate(void *param, struct amdgpu_bo *bo)
295 {
296         struct amdgpu_vm_parser *p = param;
297
298         return amdgpu_amdkfd_bo_validate(bo, p->domain, p->wait);
299 }
300
301 /* vm_validate_pt_pd_bos - Validate page table and directory BOs
302  *
303  * Page directories are not updated here because huge page handling
304  * during page table updates can invalidate page directory entries
305  * again. Page directories are only updated after updating page
306  * tables.
307  */
308 static int vm_validate_pt_pd_bos(struct amdgpu_vm *vm)
309 {
310         struct amdgpu_bo *pd = vm->root.base.bo;
311         struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
312         struct amdgpu_vm_parser param;
313         int ret;
314
315         param.domain = AMDGPU_GEM_DOMAIN_VRAM;
316         param.wait = false;
317
318         ret = amdgpu_vm_validate_pt_bos(adev, vm, amdgpu_amdkfd_validate,
319                                         &param);
320         if (ret) {
321                 pr_err("amdgpu: failed to validate PT BOs\n");
322                 return ret;
323         }
324
325         ret = amdgpu_amdkfd_validate(&param, pd);
326         if (ret) {
327                 pr_err("amdgpu: failed to validate PD\n");
328                 return ret;
329         }
330
331         vm->pd_phys_addr = amdgpu_gmc_pd_addr(vm->root.base.bo);
332
333         if (vm->use_cpu_for_update) {
334                 ret = amdgpu_bo_kmap(pd, NULL);
335                 if (ret) {
336                         pr_err("amdgpu: failed to kmap PD, ret=%d\n", ret);
337                         return ret;
338                 }
339         }
340
341         return 0;
342 }
343
344 static int vm_update_pds(struct amdgpu_vm *vm, struct amdgpu_sync *sync)
345 {
346         struct amdgpu_bo *pd = vm->root.base.bo;
347         struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
348         int ret;
349
350         ret = amdgpu_vm_update_directories(adev, vm);
351         if (ret)
352                 return ret;
353
354         return amdgpu_sync_fence(NULL, sync, vm->last_update, false);
355 }
356
357 /* add_bo_to_vm - Add a BO to a VM
358  *
359  * Everything that needs to bo done only once when a BO is first added
360  * to a VM. It can later be mapped and unmapped many times without
361  * repeating these steps.
362  *
363  * 1. Allocate and initialize BO VA entry data structure
364  * 2. Add BO to the VM
365  * 3. Determine ASIC-specific PTE flags
366  * 4. Alloc page tables and directories if needed
367  * 4a.  Validate new page tables and directories
368  */
369 static int add_bo_to_vm(struct amdgpu_device *adev, struct kgd_mem *mem,
370                 struct amdgpu_vm *vm, bool is_aql,
371                 struct kfd_bo_va_list **p_bo_va_entry)
372 {
373         int ret;
374         struct kfd_bo_va_list *bo_va_entry;
375         struct amdgpu_bo *bo = mem->bo;
376         uint64_t va = mem->va;
377         struct list_head *list_bo_va = &mem->bo_va_list;
378         unsigned long bo_size = bo->tbo.mem.size;
379
380         if (!va) {
381                 pr_err("Invalid VA when adding BO to VM\n");
382                 return -EINVAL;
383         }
384
385         if (is_aql)
386                 va += bo_size;
387
388         bo_va_entry = kzalloc(sizeof(*bo_va_entry), GFP_KERNEL);
389         if (!bo_va_entry)
390                 return -ENOMEM;
391
392         pr_debug("\t add VA 0x%llx - 0x%llx to vm %p\n", va,
393                         va + bo_size, vm);
394
395         /* Add BO to VM internal data structures*/
396         bo_va_entry->bo_va = amdgpu_vm_bo_add(adev, vm, bo);
397         if (!bo_va_entry->bo_va) {
398                 ret = -EINVAL;
399                 pr_err("Failed to add BO object to VM. ret == %d\n",
400                                 ret);
401                 goto err_vmadd;
402         }
403
404         bo_va_entry->va = va;
405         bo_va_entry->pte_flags = amdgpu_gmc_get_pte_flags(adev,
406                                                          mem->mapping_flags);
407         bo_va_entry->kgd_dev = (void *)adev;
408         list_add(&bo_va_entry->bo_list, list_bo_va);
409
410         if (p_bo_va_entry)
411                 *p_bo_va_entry = bo_va_entry;
412
413         /* Allocate validate page tables if needed */
414         ret = vm_validate_pt_pd_bos(vm);
415         if (ret) {
416                 pr_err("validate_pt_pd_bos() failed\n");
417                 goto err_alloc_pts;
418         }
419
420         return 0;
421
422 err_alloc_pts:
423         amdgpu_vm_bo_rmv(adev, bo_va_entry->bo_va);
424         list_del(&bo_va_entry->bo_list);
425 err_vmadd:
426         kfree(bo_va_entry);
427         return ret;
428 }
429
430 static void remove_bo_from_vm(struct amdgpu_device *adev,
431                 struct kfd_bo_va_list *entry, unsigned long size)
432 {
433         pr_debug("\t remove VA 0x%llx - 0x%llx in entry %p\n",
434                         entry->va,
435                         entry->va + size, entry);
436         amdgpu_vm_bo_rmv(adev, entry->bo_va);
437         list_del(&entry->bo_list);
438         kfree(entry);
439 }
440
441 static void add_kgd_mem_to_kfd_bo_list(struct kgd_mem *mem,
442                                 struct amdkfd_process_info *process_info,
443                                 bool userptr)
444 {
445         struct ttm_validate_buffer *entry = &mem->validate_list;
446         struct amdgpu_bo *bo = mem->bo;
447
448         INIT_LIST_HEAD(&entry->head);
449         entry->num_shared = 1;
450         entry->bo = &bo->tbo;
451         mutex_lock(&process_info->lock);
452         if (userptr)
453                 list_add_tail(&entry->head, &process_info->userptr_valid_list);
454         else
455                 list_add_tail(&entry->head, &process_info->kfd_bo_list);
456         mutex_unlock(&process_info->lock);
457 }
458
459 /* Initializes user pages. It registers the MMU notifier and validates
460  * the userptr BO in the GTT domain.
461  *
462  * The BO must already be on the userptr_valid_list. Otherwise an
463  * eviction and restore may happen that leaves the new BO unmapped
464  * with the user mode queues running.
465  *
466  * Takes the process_info->lock to protect against concurrent restore
467  * workers.
468  *
469  * Returns 0 for success, negative errno for errors.
470  */
471 static int init_user_pages(struct kgd_mem *mem, struct mm_struct *mm,
472                            uint64_t user_addr)
473 {
474         struct amdkfd_process_info *process_info = mem->process_info;
475         struct amdgpu_bo *bo = mem->bo;
476         struct ttm_operation_ctx ctx = { true, false };
477         int ret = 0;
478
479         mutex_lock(&process_info->lock);
480
481         ret = amdgpu_ttm_tt_set_userptr(bo->tbo.ttm, user_addr, 0);
482         if (ret) {
483                 pr_err("%s: Failed to set userptr: %d\n", __func__, ret);
484                 goto out;
485         }
486
487         ret = amdgpu_mn_register(bo, user_addr);
488         if (ret) {
489                 pr_err("%s: Failed to register MMU notifier: %d\n",
490                        __func__, ret);
491                 goto out;
492         }
493
494         ret = amdgpu_ttm_tt_get_user_pages(bo->tbo.ttm, bo->tbo.ttm->pages);
495         if (ret) {
496                 pr_err("%s: Failed to get user pages: %d\n", __func__, ret);
497                 goto unregister_out;
498         }
499
500         ret = amdgpu_bo_reserve(bo, true);
501         if (ret) {
502                 pr_err("%s: Failed to reserve BO\n", __func__);
503                 goto release_out;
504         }
505         amdgpu_bo_placement_from_domain(bo, mem->domain);
506         ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
507         if (ret)
508                 pr_err("%s: failed to validate BO\n", __func__);
509         amdgpu_bo_unreserve(bo);
510
511 release_out:
512         amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
513 unregister_out:
514         if (ret)
515                 amdgpu_mn_unregister(bo);
516 out:
517         mutex_unlock(&process_info->lock);
518         return ret;
519 }
520
521 /* Reserving a BO and its page table BOs must happen atomically to
522  * avoid deadlocks. Some operations update multiple VMs at once. Track
523  * all the reservation info in a context structure. Optionally a sync
524  * object can track VM updates.
525  */
526 struct bo_vm_reservation_context {
527         struct amdgpu_bo_list_entry kfd_bo; /* BO list entry for the KFD BO */
528         unsigned int n_vms;                 /* Number of VMs reserved       */
529         struct amdgpu_bo_list_entry *vm_pd; /* Array of VM BO list entries  */
530         struct ww_acquire_ctx ticket;       /* Reservation ticket           */
531         struct list_head list, duplicates;  /* BO lists                     */
532         struct amdgpu_sync *sync;           /* Pointer to sync object       */
533         bool reserved;                      /* Whether BOs are reserved     */
534 };
535
536 enum bo_vm_match {
537         BO_VM_NOT_MAPPED = 0,   /* Match VMs where a BO is not mapped */
538         BO_VM_MAPPED,           /* Match VMs where a BO is mapped     */
539         BO_VM_ALL,              /* Match all VMs a BO was added to    */
540 };
541
542 /**
543  * reserve_bo_and_vm - reserve a BO and a VM unconditionally.
544  * @mem: KFD BO structure.
545  * @vm: the VM to reserve.
546  * @ctx: the struct that will be used in unreserve_bo_and_vms().
547  */
548 static int reserve_bo_and_vm(struct kgd_mem *mem,
549                               struct amdgpu_vm *vm,
550                               struct bo_vm_reservation_context *ctx)
551 {
552         struct amdgpu_bo *bo = mem->bo;
553         int ret;
554
555         WARN_ON(!vm);
556
557         ctx->reserved = false;
558         ctx->n_vms = 1;
559         ctx->sync = &mem->sync;
560
561         INIT_LIST_HEAD(&ctx->list);
562         INIT_LIST_HEAD(&ctx->duplicates);
563
564         ctx->vm_pd = kcalloc(ctx->n_vms, sizeof(*ctx->vm_pd), GFP_KERNEL);
565         if (!ctx->vm_pd)
566                 return -ENOMEM;
567
568         ctx->kfd_bo.priority = 0;
569         ctx->kfd_bo.tv.bo = &bo->tbo;
570         ctx->kfd_bo.tv.num_shared = 1;
571         list_add(&ctx->kfd_bo.tv.head, &ctx->list);
572
573         amdgpu_vm_get_pd_bo(vm, &ctx->list, &ctx->vm_pd[0]);
574
575         ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list,
576                                      false, &ctx->duplicates);
577         if (!ret)
578                 ctx->reserved = true;
579         else {
580                 pr_err("Failed to reserve buffers in ttm\n");
581                 kfree(ctx->vm_pd);
582                 ctx->vm_pd = NULL;
583         }
584
585         return ret;
586 }
587
588 /**
589  * reserve_bo_and_cond_vms - reserve a BO and some VMs conditionally
590  * @mem: KFD BO structure.
591  * @vm: the VM to reserve. If NULL, then all VMs associated with the BO
592  * is used. Otherwise, a single VM associated with the BO.
593  * @map_type: the mapping status that will be used to filter the VMs.
594  * @ctx: the struct that will be used in unreserve_bo_and_vms().
595  *
596  * Returns 0 for success, negative for failure.
597  */
598 static int reserve_bo_and_cond_vms(struct kgd_mem *mem,
599                                 struct amdgpu_vm *vm, enum bo_vm_match map_type,
600                                 struct bo_vm_reservation_context *ctx)
601 {
602         struct amdgpu_bo *bo = mem->bo;
603         struct kfd_bo_va_list *entry;
604         unsigned int i;
605         int ret;
606
607         ctx->reserved = false;
608         ctx->n_vms = 0;
609         ctx->vm_pd = NULL;
610         ctx->sync = &mem->sync;
611
612         INIT_LIST_HEAD(&ctx->list);
613         INIT_LIST_HEAD(&ctx->duplicates);
614
615         list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
616                 if ((vm && vm != entry->bo_va->base.vm) ||
617                         (entry->is_mapped != map_type
618                         && map_type != BO_VM_ALL))
619                         continue;
620
621                 ctx->n_vms++;
622         }
623
624         if (ctx->n_vms != 0) {
625                 ctx->vm_pd = kcalloc(ctx->n_vms, sizeof(*ctx->vm_pd),
626                                      GFP_KERNEL);
627                 if (!ctx->vm_pd)
628                         return -ENOMEM;
629         }
630
631         ctx->kfd_bo.priority = 0;
632         ctx->kfd_bo.tv.bo = &bo->tbo;
633         ctx->kfd_bo.tv.num_shared = 1;
634         list_add(&ctx->kfd_bo.tv.head, &ctx->list);
635
636         i = 0;
637         list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
638                 if ((vm && vm != entry->bo_va->base.vm) ||
639                         (entry->is_mapped != map_type
640                         && map_type != BO_VM_ALL))
641                         continue;
642
643                 amdgpu_vm_get_pd_bo(entry->bo_va->base.vm, &ctx->list,
644                                 &ctx->vm_pd[i]);
645                 i++;
646         }
647
648         ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list,
649                                      false, &ctx->duplicates);
650         if (!ret)
651                 ctx->reserved = true;
652         else
653                 pr_err("Failed to reserve buffers in ttm.\n");
654
655         if (ret) {
656                 kfree(ctx->vm_pd);
657                 ctx->vm_pd = NULL;
658         }
659
660         return ret;
661 }
662
663 /**
664  * unreserve_bo_and_vms - Unreserve BO and VMs from a reservation context
665  * @ctx: Reservation context to unreserve
666  * @wait: Optionally wait for a sync object representing pending VM updates
667  * @intr: Whether the wait is interruptible
668  *
669  * Also frees any resources allocated in
670  * reserve_bo_and_(cond_)vm(s). Returns the status from
671  * amdgpu_sync_wait.
672  */
673 static int unreserve_bo_and_vms(struct bo_vm_reservation_context *ctx,
674                                  bool wait, bool intr)
675 {
676         int ret = 0;
677
678         if (wait)
679                 ret = amdgpu_sync_wait(ctx->sync, intr);
680
681         if (ctx->reserved)
682                 ttm_eu_backoff_reservation(&ctx->ticket, &ctx->list);
683         kfree(ctx->vm_pd);
684
685         ctx->sync = NULL;
686
687         ctx->reserved = false;
688         ctx->vm_pd = NULL;
689
690         return ret;
691 }
692
693 static int unmap_bo_from_gpuvm(struct amdgpu_device *adev,
694                                 struct kfd_bo_va_list *entry,
695                                 struct amdgpu_sync *sync)
696 {
697         struct amdgpu_bo_va *bo_va = entry->bo_va;
698         struct amdgpu_vm *vm = bo_va->base.vm;
699
700         amdgpu_vm_bo_unmap(adev, bo_va, entry->va);
701
702         amdgpu_vm_clear_freed(adev, vm, &bo_va->last_pt_update);
703
704         amdgpu_sync_fence(NULL, sync, bo_va->last_pt_update, false);
705
706         return 0;
707 }
708
709 static int update_gpuvm_pte(struct amdgpu_device *adev,
710                 struct kfd_bo_va_list *entry,
711                 struct amdgpu_sync *sync)
712 {
713         int ret;
714         struct amdgpu_bo_va *bo_va = entry->bo_va;
715
716         /* Update the page tables  */
717         ret = amdgpu_vm_bo_update(adev, bo_va, false);
718         if (ret) {
719                 pr_err("amdgpu_vm_bo_update failed\n");
720                 return ret;
721         }
722
723         return amdgpu_sync_fence(NULL, sync, bo_va->last_pt_update, false);
724 }
725
726 static int map_bo_to_gpuvm(struct amdgpu_device *adev,
727                 struct kfd_bo_va_list *entry, struct amdgpu_sync *sync,
728                 bool no_update_pte)
729 {
730         int ret;
731
732         /* Set virtual address for the allocation */
733         ret = amdgpu_vm_bo_map(adev, entry->bo_va, entry->va, 0,
734                                amdgpu_bo_size(entry->bo_va->base.bo),
735                                entry->pte_flags);
736         if (ret) {
737                 pr_err("Failed to map VA 0x%llx in vm. ret %d\n",
738                                 entry->va, ret);
739                 return ret;
740         }
741
742         if (no_update_pte)
743                 return 0;
744
745         ret = update_gpuvm_pte(adev, entry, sync);
746         if (ret) {
747                 pr_err("update_gpuvm_pte() failed\n");
748                 goto update_gpuvm_pte_failed;
749         }
750
751         return 0;
752
753 update_gpuvm_pte_failed:
754         unmap_bo_from_gpuvm(adev, entry, sync);
755         return ret;
756 }
757
758 static struct sg_table *create_doorbell_sg(uint64_t addr, uint32_t size)
759 {
760         struct sg_table *sg = kmalloc(sizeof(*sg), GFP_KERNEL);
761
762         if (!sg)
763                 return NULL;
764         if (sg_alloc_table(sg, 1, GFP_KERNEL)) {
765                 kfree(sg);
766                 return NULL;
767         }
768         sg->sgl->dma_address = addr;
769         sg->sgl->length = size;
770 #ifdef CONFIG_NEED_SG_DMA_LENGTH
771         sg->sgl->dma_length = size;
772 #endif
773         return sg;
774 }
775
776 static int process_validate_vms(struct amdkfd_process_info *process_info)
777 {
778         struct amdgpu_vm *peer_vm;
779         int ret;
780
781         list_for_each_entry(peer_vm, &process_info->vm_list_head,
782                             vm_list_node) {
783                 ret = vm_validate_pt_pd_bos(peer_vm);
784                 if (ret)
785                         return ret;
786         }
787
788         return 0;
789 }
790
791 static int process_sync_pds_resv(struct amdkfd_process_info *process_info,
792                                  struct amdgpu_sync *sync)
793 {
794         struct amdgpu_vm *peer_vm;
795         int ret;
796
797         list_for_each_entry(peer_vm, &process_info->vm_list_head,
798                             vm_list_node) {
799                 struct amdgpu_bo *pd = peer_vm->root.base.bo;
800
801                 ret = amdgpu_sync_resv(NULL,
802                                         sync, pd->tbo.resv,
803                                         AMDGPU_FENCE_OWNER_UNDEFINED, false);
804                 if (ret)
805                         return ret;
806         }
807
808         return 0;
809 }
810
811 static int process_update_pds(struct amdkfd_process_info *process_info,
812                               struct amdgpu_sync *sync)
813 {
814         struct amdgpu_vm *peer_vm;
815         int ret;
816
817         list_for_each_entry(peer_vm, &process_info->vm_list_head,
818                             vm_list_node) {
819                 ret = vm_update_pds(peer_vm, sync);
820                 if (ret)
821                         return ret;
822         }
823
824         return 0;
825 }
826
827 static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info,
828                        struct dma_fence **ef)
829 {
830         struct amdkfd_process_info *info = NULL;
831         int ret;
832
833         if (!*process_info) {
834                 info = kzalloc(sizeof(*info), GFP_KERNEL);
835                 if (!info)
836                         return -ENOMEM;
837
838                 mutex_init(&info->lock);
839                 INIT_LIST_HEAD(&info->vm_list_head);
840                 INIT_LIST_HEAD(&info->kfd_bo_list);
841                 INIT_LIST_HEAD(&info->userptr_valid_list);
842                 INIT_LIST_HEAD(&info->userptr_inval_list);
843
844                 info->eviction_fence =
845                         amdgpu_amdkfd_fence_create(dma_fence_context_alloc(1),
846                                                    current->mm);
847                 if (!info->eviction_fence) {
848                         pr_err("Failed to create eviction fence\n");
849                         ret = -ENOMEM;
850                         goto create_evict_fence_fail;
851                 }
852
853                 info->pid = get_task_pid(current->group_leader, PIDTYPE_PID);
854                 atomic_set(&info->evicted_bos, 0);
855                 INIT_DELAYED_WORK(&info->restore_userptr_work,
856                                   amdgpu_amdkfd_restore_userptr_worker);
857
858                 *process_info = info;
859                 *ef = dma_fence_get(&info->eviction_fence->base);
860         }
861
862         vm->process_info = *process_info;
863
864         /* Validate page directory and attach eviction fence */
865         ret = amdgpu_bo_reserve(vm->root.base.bo, true);
866         if (ret)
867                 goto reserve_pd_fail;
868         ret = vm_validate_pt_pd_bos(vm);
869         if (ret) {
870                 pr_err("validate_pt_pd_bos() failed\n");
871                 goto validate_pd_fail;
872         }
873         ret = amdgpu_bo_sync_wait(vm->root.base.bo,
874                                   AMDGPU_FENCE_OWNER_KFD, false);
875         if (ret)
876                 goto wait_pd_fail;
877         amdgpu_bo_fence(vm->root.base.bo,
878                         &vm->process_info->eviction_fence->base, true);
879         amdgpu_bo_unreserve(vm->root.base.bo);
880
881         /* Update process info */
882         mutex_lock(&vm->process_info->lock);
883         list_add_tail(&vm->vm_list_node,
884                         &(vm->process_info->vm_list_head));
885         vm->process_info->n_vms++;
886         mutex_unlock(&vm->process_info->lock);
887
888         return 0;
889
890 wait_pd_fail:
891 validate_pd_fail:
892         amdgpu_bo_unreserve(vm->root.base.bo);
893 reserve_pd_fail:
894         vm->process_info = NULL;
895         if (info) {
896                 /* Two fence references: one in info and one in *ef */
897                 dma_fence_put(&info->eviction_fence->base);
898                 dma_fence_put(*ef);
899                 *ef = NULL;
900                 *process_info = NULL;
901                 put_pid(info->pid);
902 create_evict_fence_fail:
903                 mutex_destroy(&info->lock);
904                 kfree(info);
905         }
906         return ret;
907 }
908
909 int amdgpu_amdkfd_gpuvm_create_process_vm(struct kgd_dev *kgd, unsigned int pasid,
910                                           void **vm, void **process_info,
911                                           struct dma_fence **ef)
912 {
913         struct amdgpu_device *adev = get_amdgpu_device(kgd);
914         struct amdgpu_vm *new_vm;
915         int ret;
916
917         new_vm = kzalloc(sizeof(*new_vm), GFP_KERNEL);
918         if (!new_vm)
919                 return -ENOMEM;
920
921         /* Initialize AMDGPU part of the VM */
922         ret = amdgpu_vm_init(adev, new_vm, AMDGPU_VM_CONTEXT_COMPUTE, pasid);
923         if (ret) {
924                 pr_err("Failed init vm ret %d\n", ret);
925                 goto amdgpu_vm_init_fail;
926         }
927
928         /* Initialize KFD part of the VM and process info */
929         ret = init_kfd_vm(new_vm, process_info, ef);
930         if (ret)
931                 goto init_kfd_vm_fail;
932
933         *vm = (void *) new_vm;
934
935         return 0;
936
937 init_kfd_vm_fail:
938         amdgpu_vm_fini(adev, new_vm);
939 amdgpu_vm_init_fail:
940         kfree(new_vm);
941         return ret;
942 }
943
944 int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct kgd_dev *kgd,
945                                            struct file *filp, unsigned int pasid,
946                                            void **vm, void **process_info,
947                                            struct dma_fence **ef)
948 {
949         struct amdgpu_device *adev = get_amdgpu_device(kgd);
950         struct drm_file *drm_priv = filp->private_data;
951         struct amdgpu_fpriv *drv_priv = drm_priv->driver_priv;
952         struct amdgpu_vm *avm = &drv_priv->vm;
953         int ret;
954
955         /* Already a compute VM? */
956         if (avm->process_info)
957                 return -EINVAL;
958
959         /* Convert VM into a compute VM */
960         ret = amdgpu_vm_make_compute(adev, avm, pasid);
961         if (ret)
962                 return ret;
963
964         /* Initialize KFD part of the VM and process info */
965         ret = init_kfd_vm(avm, process_info, ef);
966         if (ret)
967                 return ret;
968
969         *vm = (void *)avm;
970
971         return 0;
972 }
973
974 void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
975                                     struct amdgpu_vm *vm)
976 {
977         struct amdkfd_process_info *process_info = vm->process_info;
978         struct amdgpu_bo *pd = vm->root.base.bo;
979
980         if (!process_info)
981                 return;
982
983         /* Release eviction fence from PD */
984         amdgpu_bo_reserve(pd, false);
985         amdgpu_bo_fence(pd, NULL, false);
986         amdgpu_bo_unreserve(pd);
987
988         /* Update process info */
989         mutex_lock(&process_info->lock);
990         process_info->n_vms--;
991         list_del(&vm->vm_list_node);
992         mutex_unlock(&process_info->lock);
993
994         /* Release per-process resources when last compute VM is destroyed */
995         if (!process_info->n_vms) {
996                 WARN_ON(!list_empty(&process_info->kfd_bo_list));
997                 WARN_ON(!list_empty(&process_info->userptr_valid_list));
998                 WARN_ON(!list_empty(&process_info->userptr_inval_list));
999
1000                 dma_fence_put(&process_info->eviction_fence->base);
1001                 cancel_delayed_work_sync(&process_info->restore_userptr_work);
1002                 put_pid(process_info->pid);
1003                 mutex_destroy(&process_info->lock);
1004                 kfree(process_info);
1005         }
1006 }
1007
1008 void amdgpu_amdkfd_gpuvm_destroy_process_vm(struct kgd_dev *kgd, void *vm)
1009 {
1010         struct amdgpu_device *adev = get_amdgpu_device(kgd);
1011         struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1012
1013         if (WARN_ON(!kgd || !vm))
1014                 return;
1015
1016         pr_debug("Destroying process vm %p\n", vm);
1017
1018         /* Release the VM context */
1019         amdgpu_vm_fini(adev, avm);
1020         kfree(vm);
1021 }
1022
1023 void amdgpu_amdkfd_gpuvm_release_process_vm(struct kgd_dev *kgd, void *vm)
1024 {
1025         struct amdgpu_device *adev = get_amdgpu_device(kgd);
1026         struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1027
1028         if (WARN_ON(!kgd || !vm))
1029                 return;
1030
1031         pr_debug("Releasing process vm %p\n", vm);
1032
1033         /* The original pasid of amdgpu vm has already been
1034          * released during making a amdgpu vm to a compute vm
1035          * The current pasid is managed by kfd and will be
1036          * released on kfd process destroy. Set amdgpu pasid
1037          * to 0 to avoid duplicate release.
1038          */
1039         amdgpu_vm_release_compute(adev, avm);
1040 }
1041
1042 uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *vm)
1043 {
1044         struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1045         struct amdgpu_bo *pd = avm->root.base.bo;
1046         struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
1047
1048         if (adev->asic_type < CHIP_VEGA10)
1049                 return avm->pd_phys_addr >> AMDGPU_GPU_PAGE_SHIFT;
1050         return avm->pd_phys_addr;
1051 }
1052
1053 int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
1054                 struct kgd_dev *kgd, uint64_t va, uint64_t size,
1055                 void *vm, struct kgd_mem **mem,
1056                 uint64_t *offset, uint32_t flags)
1057 {
1058         struct amdgpu_device *adev = get_amdgpu_device(kgd);
1059         struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1060         enum ttm_bo_type bo_type = ttm_bo_type_device;
1061         struct sg_table *sg = NULL;
1062         uint64_t user_addr = 0;
1063         struct amdgpu_bo *bo;
1064         struct amdgpu_bo_param bp;
1065         int byte_align;
1066         u32 domain, alloc_domain;
1067         u64 alloc_flags;
1068         uint32_t mapping_flags;
1069         int ret;
1070
1071         /*
1072          * Check on which domain to allocate BO
1073          */
1074         if (flags & ALLOC_MEM_FLAGS_VRAM) {
1075                 domain = alloc_domain = AMDGPU_GEM_DOMAIN_VRAM;
1076                 alloc_flags = AMDGPU_GEM_CREATE_VRAM_CLEARED;
1077                 alloc_flags |= (flags & ALLOC_MEM_FLAGS_PUBLIC) ?
1078                         AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED :
1079                         AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
1080         } else if (flags & ALLOC_MEM_FLAGS_GTT) {
1081                 domain = alloc_domain = AMDGPU_GEM_DOMAIN_GTT;
1082                 alloc_flags = 0;
1083         } else if (flags & ALLOC_MEM_FLAGS_USERPTR) {
1084                 domain = AMDGPU_GEM_DOMAIN_GTT;
1085                 alloc_domain = AMDGPU_GEM_DOMAIN_CPU;
1086                 alloc_flags = 0;
1087                 if (!offset || !*offset)
1088                         return -EINVAL;
1089                 user_addr = *offset;
1090         } else if (flags & ALLOC_MEM_FLAGS_DOORBELL) {
1091                 domain = AMDGPU_GEM_DOMAIN_GTT;
1092                 alloc_domain = AMDGPU_GEM_DOMAIN_CPU;
1093                 bo_type = ttm_bo_type_sg;
1094                 alloc_flags = 0;
1095                 if (size > UINT_MAX)
1096                         return -EINVAL;
1097                 sg = create_doorbell_sg(*offset, size);
1098                 if (!sg)
1099                         return -ENOMEM;
1100         } else {
1101                 return -EINVAL;
1102         }
1103
1104         *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
1105         if (!*mem) {
1106                 ret = -ENOMEM;
1107                 goto err;
1108         }
1109         INIT_LIST_HEAD(&(*mem)->bo_va_list);
1110         mutex_init(&(*mem)->lock);
1111         (*mem)->aql_queue = !!(flags & ALLOC_MEM_FLAGS_AQL_QUEUE_MEM);
1112
1113         /* Workaround for AQL queue wraparound bug. Map the same
1114          * memory twice. That means we only actually allocate half
1115          * the memory.
1116          */
1117         if ((*mem)->aql_queue)
1118                 size = size >> 1;
1119
1120         /* Workaround for TLB bug on older VI chips */
1121         byte_align = (adev->family == AMDGPU_FAMILY_VI &&
1122                         adev->asic_type != CHIP_FIJI &&
1123                         adev->asic_type != CHIP_POLARIS10 &&
1124                         adev->asic_type != CHIP_POLARIS11 &&
1125                         adev->asic_type != CHIP_POLARIS12) ?
1126                         VI_BO_SIZE_ALIGN : 1;
1127
1128         mapping_flags = AMDGPU_VM_PAGE_READABLE;
1129         if (flags & ALLOC_MEM_FLAGS_WRITABLE)
1130                 mapping_flags |= AMDGPU_VM_PAGE_WRITEABLE;
1131         if (flags & ALLOC_MEM_FLAGS_EXECUTABLE)
1132                 mapping_flags |= AMDGPU_VM_PAGE_EXECUTABLE;
1133         if (flags & ALLOC_MEM_FLAGS_COHERENT)
1134                 mapping_flags |= AMDGPU_VM_MTYPE_UC;
1135         else
1136                 mapping_flags |= AMDGPU_VM_MTYPE_NC;
1137         (*mem)->mapping_flags = mapping_flags;
1138
1139         amdgpu_sync_create(&(*mem)->sync);
1140
1141         ret = amdgpu_amdkfd_reserve_mem_limit(adev, size, alloc_domain, !!sg);
1142         if (ret) {
1143                 pr_debug("Insufficient system memory\n");
1144                 goto err_reserve_limit;
1145         }
1146
1147         pr_debug("\tcreate BO VA 0x%llx size 0x%llx domain %s\n",
1148                         va, size, domain_string(alloc_domain));
1149
1150         memset(&bp, 0, sizeof(bp));
1151         bp.size = size;
1152         bp.byte_align = byte_align;
1153         bp.domain = alloc_domain;
1154         bp.flags = alloc_flags;
1155         bp.type = bo_type;
1156         bp.resv = NULL;
1157         ret = amdgpu_bo_create(adev, &bp, &bo);
1158         if (ret) {
1159                 pr_debug("Failed to create BO on domain %s. ret %d\n",
1160                                 domain_string(alloc_domain), ret);
1161                 goto err_bo_create;
1162         }
1163         if (bo_type == ttm_bo_type_sg) {
1164                 bo->tbo.sg = sg;
1165                 bo->tbo.ttm->sg = sg;
1166         }
1167         bo->kfd_bo = *mem;
1168         (*mem)->bo = bo;
1169         if (user_addr)
1170                 bo->flags |= AMDGPU_AMDKFD_USERPTR_BO;
1171
1172         (*mem)->va = va;
1173         (*mem)->domain = domain;
1174         (*mem)->mapped_to_gpu_memory = 0;
1175         (*mem)->process_info = avm->process_info;
1176         add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, user_addr);
1177
1178         if (user_addr) {
1179                 ret = init_user_pages(*mem, current->mm, user_addr);
1180                 if (ret) {
1181                         mutex_lock(&avm->process_info->lock);
1182                         list_del(&(*mem)->validate_list.head);
1183                         mutex_unlock(&avm->process_info->lock);
1184                         goto allocate_init_user_pages_failed;
1185                 }
1186         }
1187
1188         if (offset)
1189                 *offset = amdgpu_bo_mmap_offset(bo);
1190
1191         return 0;
1192
1193 allocate_init_user_pages_failed:
1194         amdgpu_bo_unref(&bo);
1195         /* Don't unreserve system mem limit twice */
1196         goto err_reserve_limit;
1197 err_bo_create:
1198         unreserve_mem_limit(adev, size, alloc_domain, !!sg);
1199 err_reserve_limit:
1200         mutex_destroy(&(*mem)->lock);
1201         kfree(*mem);
1202 err:
1203         if (sg) {
1204                 sg_free_table(sg);
1205                 kfree(sg);
1206         }
1207         return ret;
1208 }
1209
1210 int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
1211                 struct kgd_dev *kgd, struct kgd_mem *mem)
1212 {
1213         struct amdkfd_process_info *process_info = mem->process_info;
1214         unsigned long bo_size = mem->bo->tbo.mem.size;
1215         struct kfd_bo_va_list *entry, *tmp;
1216         struct bo_vm_reservation_context ctx;
1217         struct ttm_validate_buffer *bo_list_entry;
1218         int ret;
1219
1220         mutex_lock(&mem->lock);
1221
1222         if (mem->mapped_to_gpu_memory > 0) {
1223                 pr_debug("BO VA 0x%llx size 0x%lx is still mapped.\n",
1224                                 mem->va, bo_size);
1225                 mutex_unlock(&mem->lock);
1226                 return -EBUSY;
1227         }
1228
1229         mutex_unlock(&mem->lock);
1230         /* lock is not needed after this, since mem is unused and will
1231          * be freed anyway
1232          */
1233
1234         /* No more MMU notifiers */
1235         amdgpu_mn_unregister(mem->bo);
1236
1237         /* Make sure restore workers don't access the BO any more */
1238         bo_list_entry = &mem->validate_list;
1239         mutex_lock(&process_info->lock);
1240         list_del(&bo_list_entry->head);
1241         mutex_unlock(&process_info->lock);
1242
1243         ret = reserve_bo_and_cond_vms(mem, NULL, BO_VM_ALL, &ctx);
1244         if (unlikely(ret))
1245                 return ret;
1246
1247         /* The eviction fence should be removed by the last unmap.
1248          * TODO: Log an error condition if the bo still has the eviction fence
1249          * attached
1250          */
1251         amdgpu_amdkfd_remove_eviction_fence(mem->bo,
1252                                         process_info->eviction_fence);
1253         pr_debug("Release VA 0x%llx - 0x%llx\n", mem->va,
1254                 mem->va + bo_size * (1 + mem->aql_queue));
1255
1256         /* Remove from VM internal data structures */
1257         list_for_each_entry_safe(entry, tmp, &mem->bo_va_list, bo_list)
1258                 remove_bo_from_vm((struct amdgpu_device *)entry->kgd_dev,
1259                                 entry, bo_size);
1260
1261         ret = unreserve_bo_and_vms(&ctx, false, false);
1262
1263         /* Free the sync object */
1264         amdgpu_sync_free(&mem->sync);
1265
1266         /* If the SG is not NULL, it's one we created for a doorbell
1267          * BO. We need to free it.
1268          */
1269         if (mem->bo->tbo.sg) {
1270                 sg_free_table(mem->bo->tbo.sg);
1271                 kfree(mem->bo->tbo.sg);
1272         }
1273
1274         /* Free the BO*/
1275         amdgpu_bo_unref(&mem->bo);
1276         mutex_destroy(&mem->lock);
1277         kfree(mem);
1278
1279         return ret;
1280 }
1281
1282 int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
1283                 struct kgd_dev *kgd, struct kgd_mem *mem, void *vm)
1284 {
1285         struct amdgpu_device *adev = get_amdgpu_device(kgd);
1286         struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1287         int ret;
1288         struct amdgpu_bo *bo;
1289         uint32_t domain;
1290         struct kfd_bo_va_list *entry;
1291         struct bo_vm_reservation_context ctx;
1292         struct kfd_bo_va_list *bo_va_entry = NULL;
1293         struct kfd_bo_va_list *bo_va_entry_aql = NULL;
1294         unsigned long bo_size;
1295         bool is_invalid_userptr = false;
1296
1297         bo = mem->bo;
1298         if (!bo) {
1299                 pr_err("Invalid BO when mapping memory to GPU\n");
1300                 return -EINVAL;
1301         }
1302
1303         /* Make sure restore is not running concurrently. Since we
1304          * don't map invalid userptr BOs, we rely on the next restore
1305          * worker to do the mapping
1306          */
1307         mutex_lock(&mem->process_info->lock);
1308
1309         /* Lock mmap-sem. If we find an invalid userptr BO, we can be
1310          * sure that the MMU notifier is no longer running
1311          * concurrently and the queues are actually stopped
1312          */
1313         if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
1314                 down_write(&current->mm->mmap_sem);
1315                 is_invalid_userptr = atomic_read(&mem->invalid);
1316                 up_write(&current->mm->mmap_sem);
1317         }
1318
1319         mutex_lock(&mem->lock);
1320
1321         domain = mem->domain;
1322         bo_size = bo->tbo.mem.size;
1323
1324         pr_debug("Map VA 0x%llx - 0x%llx to vm %p domain %s\n",
1325                         mem->va,
1326                         mem->va + bo_size * (1 + mem->aql_queue),
1327                         vm, domain_string(domain));
1328
1329         ret = reserve_bo_and_vm(mem, vm, &ctx);
1330         if (unlikely(ret))
1331                 goto out;
1332
1333         /* Userptr can be marked as "not invalid", but not actually be
1334          * validated yet (still in the system domain). In that case
1335          * the queues are still stopped and we can leave mapping for
1336          * the next restore worker
1337          */
1338         if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) &&
1339             bo->tbo.mem.mem_type == TTM_PL_SYSTEM)
1340                 is_invalid_userptr = true;
1341
1342         if (check_if_add_bo_to_vm(avm, mem)) {
1343                 ret = add_bo_to_vm(adev, mem, avm, false,
1344                                 &bo_va_entry);
1345                 if (ret)
1346                         goto add_bo_to_vm_failed;
1347                 if (mem->aql_queue) {
1348                         ret = add_bo_to_vm(adev, mem, avm,
1349                                         true, &bo_va_entry_aql);
1350                         if (ret)
1351                                 goto add_bo_to_vm_failed_aql;
1352                 }
1353         } else {
1354                 ret = vm_validate_pt_pd_bos(avm);
1355                 if (unlikely(ret))
1356                         goto add_bo_to_vm_failed;
1357         }
1358
1359         if (mem->mapped_to_gpu_memory == 0 &&
1360             !amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
1361                 /* Validate BO only once. The eviction fence gets added to BO
1362                  * the first time it is mapped. Validate will wait for all
1363                  * background evictions to complete.
1364                  */
1365                 ret = amdgpu_amdkfd_bo_validate(bo, domain, true);
1366                 if (ret) {
1367                         pr_debug("Validate failed\n");
1368                         goto map_bo_to_gpuvm_failed;
1369                 }
1370         }
1371
1372         list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
1373                 if (entry->bo_va->base.vm == vm && !entry->is_mapped) {
1374                         pr_debug("\t map VA 0x%llx - 0x%llx in entry %p\n",
1375                                         entry->va, entry->va + bo_size,
1376                                         entry);
1377
1378                         ret = map_bo_to_gpuvm(adev, entry, ctx.sync,
1379                                               is_invalid_userptr);
1380                         if (ret) {
1381                                 pr_err("Failed to map radeon bo to gpuvm\n");
1382                                 goto map_bo_to_gpuvm_failed;
1383                         }
1384
1385                         ret = vm_update_pds(vm, ctx.sync);
1386                         if (ret) {
1387                                 pr_err("Failed to update page directories\n");
1388                                 goto map_bo_to_gpuvm_failed;
1389                         }
1390
1391                         entry->is_mapped = true;
1392                         mem->mapped_to_gpu_memory++;
1393                         pr_debug("\t INC mapping count %d\n",
1394                                         mem->mapped_to_gpu_memory);
1395                 }
1396         }
1397
1398         if (!amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) && !bo->pin_count)
1399                 amdgpu_bo_fence(bo,
1400                                 &avm->process_info->eviction_fence->base,
1401                                 true);
1402         ret = unreserve_bo_and_vms(&ctx, false, false);
1403
1404         goto out;
1405
1406 map_bo_to_gpuvm_failed:
1407         if (bo_va_entry_aql)
1408                 remove_bo_from_vm(adev, bo_va_entry_aql, bo_size);
1409 add_bo_to_vm_failed_aql:
1410         if (bo_va_entry)
1411                 remove_bo_from_vm(adev, bo_va_entry, bo_size);
1412 add_bo_to_vm_failed:
1413         unreserve_bo_and_vms(&ctx, false, false);
1414 out:
1415         mutex_unlock(&mem->process_info->lock);
1416         mutex_unlock(&mem->lock);
1417         return ret;
1418 }
1419
1420 int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
1421                 struct kgd_dev *kgd, struct kgd_mem *mem, void *vm)
1422 {
1423         struct amdgpu_device *adev = get_amdgpu_device(kgd);
1424         struct amdkfd_process_info *process_info =
1425                 ((struct amdgpu_vm *)vm)->process_info;
1426         unsigned long bo_size = mem->bo->tbo.mem.size;
1427         struct kfd_bo_va_list *entry;
1428         struct bo_vm_reservation_context ctx;
1429         int ret;
1430
1431         mutex_lock(&mem->lock);
1432
1433         ret = reserve_bo_and_cond_vms(mem, vm, BO_VM_MAPPED, &ctx);
1434         if (unlikely(ret))
1435                 goto out;
1436         /* If no VMs were reserved, it means the BO wasn't actually mapped */
1437         if (ctx.n_vms == 0) {
1438                 ret = -EINVAL;
1439                 goto unreserve_out;
1440         }
1441
1442         ret = vm_validate_pt_pd_bos((struct amdgpu_vm *)vm);
1443         if (unlikely(ret))
1444                 goto unreserve_out;
1445
1446         pr_debug("Unmap VA 0x%llx - 0x%llx from vm %p\n",
1447                 mem->va,
1448                 mem->va + bo_size * (1 + mem->aql_queue),
1449                 vm);
1450
1451         list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
1452                 if (entry->bo_va->base.vm == vm && entry->is_mapped) {
1453                         pr_debug("\t unmap VA 0x%llx - 0x%llx from entry %p\n",
1454                                         entry->va,
1455                                         entry->va + bo_size,
1456                                         entry);
1457
1458                         ret = unmap_bo_from_gpuvm(adev, entry, ctx.sync);
1459                         if (ret == 0) {
1460                                 entry->is_mapped = false;
1461                         } else {
1462                                 pr_err("failed to unmap VA 0x%llx\n",
1463                                                 mem->va);
1464                                 goto unreserve_out;
1465                         }
1466
1467                         mem->mapped_to_gpu_memory--;
1468                         pr_debug("\t DEC mapping count %d\n",
1469                                         mem->mapped_to_gpu_memory);
1470                 }
1471         }
1472
1473         /* If BO is unmapped from all VMs, unfence it. It can be evicted if
1474          * required.
1475          */
1476         if (mem->mapped_to_gpu_memory == 0 &&
1477             !amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm) && !mem->bo->pin_count)
1478                 amdgpu_amdkfd_remove_eviction_fence(mem->bo,
1479                                                 process_info->eviction_fence);
1480
1481 unreserve_out:
1482         unreserve_bo_and_vms(&ctx, false, false);
1483 out:
1484         mutex_unlock(&mem->lock);
1485         return ret;
1486 }
1487
1488 int amdgpu_amdkfd_gpuvm_sync_memory(
1489                 struct kgd_dev *kgd, struct kgd_mem *mem, bool intr)
1490 {
1491         struct amdgpu_sync sync;
1492         int ret;
1493
1494         amdgpu_sync_create(&sync);
1495
1496         mutex_lock(&mem->lock);
1497         amdgpu_sync_clone(&mem->sync, &sync);
1498         mutex_unlock(&mem->lock);
1499
1500         ret = amdgpu_sync_wait(&sync, intr);
1501         amdgpu_sync_free(&sync);
1502         return ret;
1503 }
1504
1505 int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_dev *kgd,
1506                 struct kgd_mem *mem, void **kptr, uint64_t *size)
1507 {
1508         int ret;
1509         struct amdgpu_bo *bo = mem->bo;
1510
1511         if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
1512                 pr_err("userptr can't be mapped to kernel\n");
1513                 return -EINVAL;
1514         }
1515
1516         /* delete kgd_mem from kfd_bo_list to avoid re-validating
1517          * this BO in BO's restoring after eviction.
1518          */
1519         mutex_lock(&mem->process_info->lock);
1520
1521         ret = amdgpu_bo_reserve(bo, true);
1522         if (ret) {
1523                 pr_err("Failed to reserve bo. ret %d\n", ret);
1524                 goto bo_reserve_failed;
1525         }
1526
1527         ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
1528         if (ret) {
1529                 pr_err("Failed to pin bo. ret %d\n", ret);
1530                 goto pin_failed;
1531         }
1532
1533         ret = amdgpu_bo_kmap(bo, kptr);
1534         if (ret) {
1535                 pr_err("Failed to map bo to kernel. ret %d\n", ret);
1536                 goto kmap_failed;
1537         }
1538
1539         amdgpu_amdkfd_remove_eviction_fence(
1540                 bo, mem->process_info->eviction_fence);
1541         list_del_init(&mem->validate_list.head);
1542
1543         if (size)
1544                 *size = amdgpu_bo_size(bo);
1545
1546         amdgpu_bo_unreserve(bo);
1547
1548         mutex_unlock(&mem->process_info->lock);
1549         return 0;
1550
1551 kmap_failed:
1552         amdgpu_bo_unpin(bo);
1553 pin_failed:
1554         amdgpu_bo_unreserve(bo);
1555 bo_reserve_failed:
1556         mutex_unlock(&mem->process_info->lock);
1557
1558         return ret;
1559 }
1560
1561 int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct kgd_dev *kgd,
1562                                               struct kfd_vm_fault_info *mem)
1563 {
1564         struct amdgpu_device *adev;
1565
1566         adev = (struct amdgpu_device *)kgd;
1567         if (atomic_read(&adev->gmc.vm_fault_info_updated) == 1) {
1568                 *mem = *adev->gmc.vm_fault_info;
1569                 mb();
1570                 atomic_set(&adev->gmc.vm_fault_info_updated, 0);
1571         }
1572         return 0;
1573 }
1574
1575 int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd,
1576                                       struct dma_buf *dma_buf,
1577                                       uint64_t va, void *vm,
1578                                       struct kgd_mem **mem, uint64_t *size,
1579                                       uint64_t *mmap_offset)
1580 {
1581         struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
1582         struct drm_gem_object *obj;
1583         struct amdgpu_bo *bo;
1584         struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1585
1586         if (dma_buf->ops != &amdgpu_dmabuf_ops)
1587                 /* Can't handle non-graphics buffers */
1588                 return -EINVAL;
1589
1590         obj = dma_buf->priv;
1591         if (obj->dev->dev_private != adev)
1592                 /* Can't handle buffers from other devices */
1593                 return -EINVAL;
1594
1595         bo = gem_to_amdgpu_bo(obj);
1596         if (!(bo->preferred_domains & (AMDGPU_GEM_DOMAIN_VRAM |
1597                                     AMDGPU_GEM_DOMAIN_GTT)))
1598                 /* Only VRAM and GTT BOs are supported */
1599                 return -EINVAL;
1600
1601         *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
1602         if (!*mem)
1603                 return -ENOMEM;
1604
1605         if (size)
1606                 *size = amdgpu_bo_size(bo);
1607
1608         if (mmap_offset)
1609                 *mmap_offset = amdgpu_bo_mmap_offset(bo);
1610
1611         INIT_LIST_HEAD(&(*mem)->bo_va_list);
1612         mutex_init(&(*mem)->lock);
1613         (*mem)->mapping_flags =
1614                 AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE |
1615                 AMDGPU_VM_PAGE_EXECUTABLE | AMDGPU_VM_MTYPE_NC;
1616
1617         (*mem)->bo = amdgpu_bo_ref(bo);
1618         (*mem)->va = va;
1619         (*mem)->domain = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ?
1620                 AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT;
1621         (*mem)->mapped_to_gpu_memory = 0;
1622         (*mem)->process_info = avm->process_info;
1623         add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, false);
1624         amdgpu_sync_create(&(*mem)->sync);
1625
1626         return 0;
1627 }
1628
1629 /* Evict a userptr BO by stopping the queues if necessary
1630  *
1631  * Runs in MMU notifier, may be in RECLAIM_FS context. This means it
1632  * cannot do any memory allocations, and cannot take any locks that
1633  * are held elsewhere while allocating memory. Therefore this is as
1634  * simple as possible, using atomic counters.
1635  *
1636  * It doesn't do anything to the BO itself. The real work happens in
1637  * restore, where we get updated page addresses. This function only
1638  * ensures that GPU access to the BO is stopped.
1639  */
1640 int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem,
1641                                 struct mm_struct *mm)
1642 {
1643         struct amdkfd_process_info *process_info = mem->process_info;
1644         int invalid, evicted_bos;
1645         int r = 0;
1646
1647         invalid = atomic_inc_return(&mem->invalid);
1648         evicted_bos = atomic_inc_return(&process_info->evicted_bos);
1649         if (evicted_bos == 1) {
1650                 /* First eviction, stop the queues */
1651                 r = kgd2kfd_quiesce_mm(mm);
1652                 if (r)
1653                         pr_err("Failed to quiesce KFD\n");
1654                 schedule_delayed_work(&process_info->restore_userptr_work,
1655                         msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS));
1656         }
1657
1658         return r;
1659 }
1660
1661 /* Update invalid userptr BOs
1662  *
1663  * Moves invalidated (evicted) userptr BOs from userptr_valid_list to
1664  * userptr_inval_list and updates user pages for all BOs that have
1665  * been invalidated since their last update.
1666  */
1667 static int update_invalid_user_pages(struct amdkfd_process_info *process_info,
1668                                      struct mm_struct *mm)
1669 {
1670         struct kgd_mem *mem, *tmp_mem;
1671         struct amdgpu_bo *bo;
1672         struct ttm_operation_ctx ctx = { false, false };
1673         int invalid, ret;
1674
1675         /* Move all invalidated BOs to the userptr_inval_list and
1676          * release their user pages by migration to the CPU domain
1677          */
1678         list_for_each_entry_safe(mem, tmp_mem,
1679                                  &process_info->userptr_valid_list,
1680                                  validate_list.head) {
1681                 if (!atomic_read(&mem->invalid))
1682                         continue; /* BO is still valid */
1683
1684                 bo = mem->bo;
1685
1686                 if (amdgpu_bo_reserve(bo, true))
1687                         return -EAGAIN;
1688                 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
1689                 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
1690                 amdgpu_bo_unreserve(bo);
1691                 if (ret) {
1692                         pr_err("%s: Failed to invalidate userptr BO\n",
1693                                __func__);
1694                         return -EAGAIN;
1695                 }
1696
1697                 list_move_tail(&mem->validate_list.head,
1698                                &process_info->userptr_inval_list);
1699         }
1700
1701         if (list_empty(&process_info->userptr_inval_list))
1702                 return 0; /* All evicted userptr BOs were freed */
1703
1704         /* Go through userptr_inval_list and update any invalid user_pages */
1705         list_for_each_entry(mem, &process_info->userptr_inval_list,
1706                             validate_list.head) {
1707                 invalid = atomic_read(&mem->invalid);
1708                 if (!invalid)
1709                         /* BO hasn't been invalidated since the last
1710                          * revalidation attempt. Keep its BO list.
1711                          */
1712                         continue;
1713
1714                 bo = mem->bo;
1715
1716                 /* Get updated user pages */
1717                 ret = amdgpu_ttm_tt_get_user_pages(bo->tbo.ttm,
1718                                                    bo->tbo.ttm->pages);
1719                 if (ret) {
1720                         bo->tbo.ttm->pages[0] = NULL;
1721                         pr_info("%s: Failed to get user pages: %d\n",
1722                                 __func__, ret);
1723                         /* Pretend it succeeded. It will fail later
1724                          * with a VM fault if the GPU tries to access
1725                          * it. Better than hanging indefinitely with
1726                          * stalled user mode queues.
1727                          */
1728                 }
1729         }
1730
1731         return 0;
1732 }
1733
1734 /* Validate invalid userptr BOs
1735  *
1736  * Validates BOs on the userptr_inval_list, and moves them back to the
1737  * userptr_valid_list. Also updates GPUVM page tables with new page
1738  * addresses and waits for the page table updates to complete.
1739  */
1740 static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
1741 {
1742         struct amdgpu_bo_list_entry *pd_bo_list_entries;
1743         struct list_head resv_list, duplicates;
1744         struct ww_acquire_ctx ticket;
1745         struct amdgpu_sync sync;
1746
1747         struct amdgpu_vm *peer_vm;
1748         struct kgd_mem *mem, *tmp_mem;
1749         struct amdgpu_bo *bo;
1750         struct ttm_operation_ctx ctx = { false, false };
1751         int i, ret;
1752
1753         pd_bo_list_entries = kcalloc(process_info->n_vms,
1754                                      sizeof(struct amdgpu_bo_list_entry),
1755                                      GFP_KERNEL);
1756         if (!pd_bo_list_entries) {
1757                 pr_err("%s: Failed to allocate PD BO list entries\n", __func__);
1758                 ret = -ENOMEM;
1759                 goto out_no_mem;
1760         }
1761
1762         INIT_LIST_HEAD(&resv_list);
1763         INIT_LIST_HEAD(&duplicates);
1764
1765         /* Get all the page directory BOs that need to be reserved */
1766         i = 0;
1767         list_for_each_entry(peer_vm, &process_info->vm_list_head,
1768                             vm_list_node)
1769                 amdgpu_vm_get_pd_bo(peer_vm, &resv_list,
1770                                     &pd_bo_list_entries[i++]);
1771         /* Add the userptr_inval_list entries to resv_list */
1772         list_for_each_entry(mem, &process_info->userptr_inval_list,
1773                             validate_list.head) {
1774                 list_add_tail(&mem->resv_list.head, &resv_list);
1775                 mem->resv_list.bo = mem->validate_list.bo;
1776                 mem->resv_list.num_shared = mem->validate_list.num_shared;
1777         }
1778
1779         /* Reserve all BOs and page tables for validation */
1780         ret = ttm_eu_reserve_buffers(&ticket, &resv_list, false, &duplicates);
1781         WARN(!list_empty(&duplicates), "Duplicates should be empty");
1782         if (ret)
1783                 goto out_free;
1784
1785         amdgpu_sync_create(&sync);
1786
1787         ret = process_validate_vms(process_info);
1788         if (ret)
1789                 goto unreserve_out;
1790
1791         /* Validate BOs and update GPUVM page tables */
1792         list_for_each_entry_safe(mem, tmp_mem,
1793                                  &process_info->userptr_inval_list,
1794                                  validate_list.head) {
1795                 struct kfd_bo_va_list *bo_va_entry;
1796
1797                 bo = mem->bo;
1798
1799                 /* Validate the BO if we got user pages */
1800                 if (bo->tbo.ttm->pages[0]) {
1801                         amdgpu_bo_placement_from_domain(bo, mem->domain);
1802                         ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
1803                         if (ret) {
1804                                 pr_err("%s: failed to validate BO\n", __func__);
1805                                 goto unreserve_out;
1806                         }
1807                 }
1808
1809                 list_move_tail(&mem->validate_list.head,
1810                                &process_info->userptr_valid_list);
1811
1812                 /* Stop HMM track the userptr update. We dont check the return
1813                  * value for concurrent CPU page table update because we will
1814                  * reschedule the restore worker if process_info->evicted_bos
1815                  * is updated.
1816                  */
1817                 amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
1818
1819                 /* Update mapping. If the BO was not validated
1820                  * (because we couldn't get user pages), this will
1821                  * clear the page table entries, which will result in
1822                  * VM faults if the GPU tries to access the invalid
1823                  * memory.
1824                  */
1825                 list_for_each_entry(bo_va_entry, &mem->bo_va_list, bo_list) {
1826                         if (!bo_va_entry->is_mapped)
1827                                 continue;
1828
1829                         ret = update_gpuvm_pte((struct amdgpu_device *)
1830                                                bo_va_entry->kgd_dev,
1831                                                bo_va_entry, &sync);
1832                         if (ret) {
1833                                 pr_err("%s: update PTE failed\n", __func__);
1834                                 /* make sure this gets validated again */
1835                                 atomic_inc(&mem->invalid);
1836                                 goto unreserve_out;
1837                         }
1838                 }
1839         }
1840
1841         /* Update page directories */
1842         ret = process_update_pds(process_info, &sync);
1843
1844 unreserve_out:
1845         ttm_eu_backoff_reservation(&ticket, &resv_list);
1846         amdgpu_sync_wait(&sync, false);
1847         amdgpu_sync_free(&sync);
1848 out_free:
1849         kfree(pd_bo_list_entries);
1850 out_no_mem:
1851         list_for_each_entry_safe(mem, tmp_mem,
1852                                  &process_info->userptr_inval_list,
1853                                  validate_list.head) {
1854                 bo = mem->bo;
1855                 amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
1856         }
1857
1858         return ret;
1859 }
1860
1861 /* Worker callback to restore evicted userptr BOs
1862  *
1863  * Tries to update and validate all userptr BOs. If successful and no
1864  * concurrent evictions happened, the queues are restarted. Otherwise,
1865  * reschedule for another attempt later.
1866  */
1867 static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work)
1868 {
1869         struct delayed_work *dwork = to_delayed_work(work);
1870         struct amdkfd_process_info *process_info =
1871                 container_of(dwork, struct amdkfd_process_info,
1872                              restore_userptr_work);
1873         struct task_struct *usertask;
1874         struct mm_struct *mm;
1875         int evicted_bos;
1876
1877         evicted_bos = atomic_read(&process_info->evicted_bos);
1878         if (!evicted_bos)
1879                 return;
1880
1881         /* Reference task and mm in case of concurrent process termination */
1882         usertask = get_pid_task(process_info->pid, PIDTYPE_PID);
1883         if (!usertask)
1884                 return;
1885         mm = get_task_mm(usertask);
1886         if (!mm) {
1887                 put_task_struct(usertask);
1888                 return;
1889         }
1890
1891         mutex_lock(&process_info->lock);
1892
1893         if (update_invalid_user_pages(process_info, mm))
1894                 goto unlock_out;
1895         /* userptr_inval_list can be empty if all evicted userptr BOs
1896          * have been freed. In that case there is nothing to validate
1897          * and we can just restart the queues.
1898          */
1899         if (!list_empty(&process_info->userptr_inval_list)) {
1900                 if (atomic_read(&process_info->evicted_bos) != evicted_bos)
1901                         goto unlock_out; /* Concurrent eviction, try again */
1902
1903                 if (validate_invalid_user_pages(process_info))
1904                         goto unlock_out;
1905         }
1906         /* Final check for concurrent evicton and atomic update. If
1907          * another eviction happens after successful update, it will
1908          * be a first eviction that calls quiesce_mm. The eviction
1909          * reference counting inside KFD will handle this case.
1910          */
1911         if (atomic_cmpxchg(&process_info->evicted_bos, evicted_bos, 0) !=
1912             evicted_bos)
1913                 goto unlock_out;
1914         evicted_bos = 0;
1915         if (kgd2kfd_resume_mm(mm)) {
1916                 pr_err("%s: Failed to resume KFD\n", __func__);
1917                 /* No recovery from this failure. Probably the CP is
1918                  * hanging. No point trying again.
1919                  */
1920         }
1921 unlock_out:
1922         mutex_unlock(&process_info->lock);
1923         mmput(mm);
1924         put_task_struct(usertask);
1925
1926         /* If validation failed, reschedule another attempt */
1927         if (evicted_bos)
1928                 schedule_delayed_work(&process_info->restore_userptr_work,
1929                         msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS));
1930 }
1931
1932 /** amdgpu_amdkfd_gpuvm_restore_process_bos - Restore all BOs for the given
1933  *   KFD process identified by process_info
1934  *
1935  * @process_info: amdkfd_process_info of the KFD process
1936  *
1937  * After memory eviction, restore thread calls this function. The function
1938  * should be called when the Process is still valid. BO restore involves -
1939  *
1940  * 1.  Release old eviction fence and create new one
1941  * 2.  Get two copies of PD BO list from all the VMs. Keep one copy as pd_list.
1942  * 3   Use the second PD list and kfd_bo_list to create a list (ctx.list) of
1943  *     BOs that need to be reserved.
1944  * 4.  Reserve all the BOs
1945  * 5.  Validate of PD and PT BOs.
1946  * 6.  Validate all KFD BOs using kfd_bo_list and Map them and add new fence
1947  * 7.  Add fence to all PD and PT BOs.
1948  * 8.  Unreserve all BOs
1949  */
1950 int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
1951 {
1952         struct amdgpu_bo_list_entry *pd_bo_list;
1953         struct amdkfd_process_info *process_info = info;
1954         struct amdgpu_vm *peer_vm;
1955         struct kgd_mem *mem;
1956         struct bo_vm_reservation_context ctx;
1957         struct amdgpu_amdkfd_fence *new_fence;
1958         int ret = 0, i;
1959         struct list_head duplicate_save;
1960         struct amdgpu_sync sync_obj;
1961
1962         INIT_LIST_HEAD(&duplicate_save);
1963         INIT_LIST_HEAD(&ctx.list);
1964         INIT_LIST_HEAD(&ctx.duplicates);
1965
1966         pd_bo_list = kcalloc(process_info->n_vms,
1967                              sizeof(struct amdgpu_bo_list_entry),
1968                              GFP_KERNEL);
1969         if (!pd_bo_list)
1970                 return -ENOMEM;
1971
1972         i = 0;
1973         mutex_lock(&process_info->lock);
1974         list_for_each_entry(peer_vm, &process_info->vm_list_head,
1975                         vm_list_node)
1976                 amdgpu_vm_get_pd_bo(peer_vm, &ctx.list, &pd_bo_list[i++]);
1977
1978         /* Reserve all BOs and page tables/directory. Add all BOs from
1979          * kfd_bo_list to ctx.list
1980          */
1981         list_for_each_entry(mem, &process_info->kfd_bo_list,
1982                             validate_list.head) {
1983
1984                 list_add_tail(&mem->resv_list.head, &ctx.list);
1985                 mem->resv_list.bo = mem->validate_list.bo;
1986                 mem->resv_list.num_shared = mem->validate_list.num_shared;
1987         }
1988
1989         ret = ttm_eu_reserve_buffers(&ctx.ticket, &ctx.list,
1990                                      false, &duplicate_save);
1991         if (ret) {
1992                 pr_debug("Memory eviction: TTM Reserve Failed. Try again\n");
1993                 goto ttm_reserve_fail;
1994         }
1995
1996         amdgpu_sync_create(&sync_obj);
1997
1998         /* Validate PDs and PTs */
1999         ret = process_validate_vms(process_info);
2000         if (ret)
2001                 goto validate_map_fail;
2002
2003         ret = process_sync_pds_resv(process_info, &sync_obj);
2004         if (ret) {
2005                 pr_debug("Memory eviction: Failed to sync to PD BO moving fence. Try again\n");
2006                 goto validate_map_fail;
2007         }
2008
2009         /* Validate BOs and map them to GPUVM (update VM page tables). */
2010         list_for_each_entry(mem, &process_info->kfd_bo_list,
2011                             validate_list.head) {
2012
2013                 struct amdgpu_bo *bo = mem->bo;
2014                 uint32_t domain = mem->domain;
2015                 struct kfd_bo_va_list *bo_va_entry;
2016
2017                 ret = amdgpu_amdkfd_bo_validate(bo, domain, false);
2018                 if (ret) {
2019                         pr_debug("Memory eviction: Validate BOs failed. Try again\n");
2020                         goto validate_map_fail;
2021                 }
2022                 ret = amdgpu_sync_fence(NULL, &sync_obj, bo->tbo.moving, false);
2023                 if (ret) {
2024                         pr_debug("Memory eviction: Sync BO fence failed. Try again\n");
2025                         goto validate_map_fail;
2026                 }
2027                 list_for_each_entry(bo_va_entry, &mem->bo_va_list,
2028                                     bo_list) {
2029                         ret = update_gpuvm_pte((struct amdgpu_device *)
2030                                               bo_va_entry->kgd_dev,
2031                                               bo_va_entry,
2032                                               &sync_obj);
2033                         if (ret) {
2034                                 pr_debug("Memory eviction: update PTE failed. Try again\n");
2035                                 goto validate_map_fail;
2036                         }
2037                 }
2038         }
2039
2040         /* Update page directories */
2041         ret = process_update_pds(process_info, &sync_obj);
2042         if (ret) {
2043                 pr_debug("Memory eviction: update PDs failed. Try again\n");
2044                 goto validate_map_fail;
2045         }
2046
2047         /* Wait for validate and PT updates to finish */
2048         amdgpu_sync_wait(&sync_obj, false);
2049
2050         /* Release old eviction fence and create new one, because fence only
2051          * goes from unsignaled to signaled, fence cannot be reused.
2052          * Use context and mm from the old fence.
2053          */
2054         new_fence = amdgpu_amdkfd_fence_create(
2055                                 process_info->eviction_fence->base.context,
2056                                 process_info->eviction_fence->mm);
2057         if (!new_fence) {
2058                 pr_err("Failed to create eviction fence\n");
2059                 ret = -ENOMEM;
2060                 goto validate_map_fail;
2061         }
2062         dma_fence_put(&process_info->eviction_fence->base);
2063         process_info->eviction_fence = new_fence;
2064         *ef = dma_fence_get(&new_fence->base);
2065
2066         /* Attach new eviction fence to all BOs */
2067         list_for_each_entry(mem, &process_info->kfd_bo_list,
2068                 validate_list.head)
2069                 amdgpu_bo_fence(mem->bo,
2070                         &process_info->eviction_fence->base, true);
2071
2072         /* Attach eviction fence to PD / PT BOs */
2073         list_for_each_entry(peer_vm, &process_info->vm_list_head,
2074                             vm_list_node) {
2075                 struct amdgpu_bo *bo = peer_vm->root.base.bo;
2076
2077                 amdgpu_bo_fence(bo, &process_info->eviction_fence->base, true);
2078         }
2079
2080 validate_map_fail:
2081         ttm_eu_backoff_reservation(&ctx.ticket, &ctx.list);
2082         amdgpu_sync_free(&sync_obj);
2083 ttm_reserve_fail:
2084         mutex_unlock(&process_info->lock);
2085         kfree(pd_bo_list);
2086         return ret;
2087 }