668a28b80a62be7db7f77d7a15334d129bdefdb6
[linux-2.6-microblaze.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_amdkfd_gpuvm.c
1 /*
2  * Copyright 2014-2018 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22 #include <linux/dma-buf.h>
23 #include <linux/list.h>
24 #include <linux/pagemap.h>
25 #include <linux/sched/mm.h>
26 #include <linux/sched/task.h>
27
28 #include "amdgpu_object.h"
29 #include "amdgpu_gem.h"
30 #include "amdgpu_vm.h"
31 #include "amdgpu_amdkfd.h"
32 #include "amdgpu_dma_buf.h"
33 #include <uapi/linux/kfd_ioctl.h>
34 #include "amdgpu_xgmi.h"
35
36 /* Userptr restore delay, just long enough to allow consecutive VM
37  * changes to accumulate
38  */
39 #define AMDGPU_USERPTR_RESTORE_DELAY_MS 1
40
41 /* Impose limit on how much memory KFD can use */
42 static struct {
43         uint64_t max_system_mem_limit;
44         uint64_t max_ttm_mem_limit;
45         int64_t system_mem_used;
46         int64_t ttm_mem_used;
47         spinlock_t mem_limit_lock;
48 } kfd_mem_limit;
49
50 /* Struct used for amdgpu_amdkfd_bo_validate */
51 struct amdgpu_vm_parser {
52         uint32_t        domain;
53         bool            wait;
54 };
55
56 static const char * const domain_bit_to_string[] = {
57                 "CPU",
58                 "GTT",
59                 "VRAM",
60                 "GDS",
61                 "GWS",
62                 "OA"
63 };
64
65 #define domain_string(domain) domain_bit_to_string[ffs(domain)-1]
66
67 static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work);
68
69
70 static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
71 {
72         return (struct amdgpu_device *)kgd;
73 }
74
75 static bool kfd_mem_is_attached(struct amdgpu_vm *avm,
76                 struct kgd_mem *mem)
77 {
78         struct kfd_mem_attachment *entry;
79
80         list_for_each_entry(entry, &mem->attachments, list)
81                 if (entry->bo_va->base.vm == avm)
82                         return true;
83
84         return false;
85 }
86
87 /* Set memory usage limits. Current, limits are
88  *  System (TTM + userptr) memory - 15/16th System RAM
89  *  TTM memory - 3/8th System RAM
90  */
91 void amdgpu_amdkfd_gpuvm_init_mem_limits(void)
92 {
93         struct sysinfo si;
94         uint64_t mem;
95
96         si_meminfo(&si);
97         mem = si.freeram - si.freehigh;
98         mem *= si.mem_unit;
99
100         spin_lock_init(&kfd_mem_limit.mem_limit_lock);
101         kfd_mem_limit.max_system_mem_limit = mem - (mem >> 4);
102         kfd_mem_limit.max_ttm_mem_limit = (mem >> 1) - (mem >> 3);
103         pr_debug("Kernel memory limit %lluM, TTM limit %lluM\n",
104                 (kfd_mem_limit.max_system_mem_limit >> 20),
105                 (kfd_mem_limit.max_ttm_mem_limit >> 20));
106 }
107
108 void amdgpu_amdkfd_reserve_system_mem(uint64_t size)
109 {
110         kfd_mem_limit.system_mem_used += size;
111 }
112
113 /* Estimate page table size needed to represent a given memory size
114  *
115  * With 4KB pages, we need one 8 byte PTE for each 4KB of memory
116  * (factor 512, >> 9). With 2MB pages, we need one 8 byte PTE for 2MB
117  * of memory (factor 256K, >> 18). ROCm user mode tries to optimize
118  * for 2MB pages for TLB efficiency. However, small allocations and
119  * fragmented system memory still need some 4KB pages. We choose a
120  * compromise that should work in most cases without reserving too
121  * much memory for page tables unnecessarily (factor 16K, >> 14).
122  */
123 #define ESTIMATE_PT_SIZE(mem_size) ((mem_size) >> 14)
124
125 static size_t amdgpu_amdkfd_acc_size(uint64_t size)
126 {
127         size >>= PAGE_SHIFT;
128         size *= sizeof(dma_addr_t) + sizeof(void *);
129
130         return __roundup_pow_of_two(sizeof(struct amdgpu_bo)) +
131                 __roundup_pow_of_two(sizeof(struct ttm_tt)) +
132                 PAGE_ALIGN(size);
133 }
134
135 static int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
136                 uint64_t size, u32 domain, bool sg)
137 {
138         uint64_t reserved_for_pt =
139                 ESTIMATE_PT_SIZE(amdgpu_amdkfd_total_mem_size);
140         size_t acc_size, system_mem_needed, ttm_mem_needed, vram_needed;
141         int ret = 0;
142
143         acc_size = amdgpu_amdkfd_acc_size(size);
144
145         vram_needed = 0;
146         if (domain == AMDGPU_GEM_DOMAIN_GTT) {
147                 /* TTM GTT memory */
148                 system_mem_needed = acc_size + size;
149                 ttm_mem_needed = acc_size + size;
150         } else if (domain == AMDGPU_GEM_DOMAIN_CPU && !sg) {
151                 /* Userptr */
152                 system_mem_needed = acc_size + size;
153                 ttm_mem_needed = acc_size;
154         } else {
155                 /* VRAM and SG */
156                 system_mem_needed = acc_size;
157                 ttm_mem_needed = acc_size;
158                 if (domain == AMDGPU_GEM_DOMAIN_VRAM)
159                         vram_needed = size;
160         }
161
162         spin_lock(&kfd_mem_limit.mem_limit_lock);
163
164         if (kfd_mem_limit.system_mem_used + system_mem_needed >
165             kfd_mem_limit.max_system_mem_limit)
166                 pr_debug("Set no_system_mem_limit=1 if using shared memory\n");
167
168         if ((kfd_mem_limit.system_mem_used + system_mem_needed >
169              kfd_mem_limit.max_system_mem_limit && !no_system_mem_limit) ||
170             (kfd_mem_limit.ttm_mem_used + ttm_mem_needed >
171              kfd_mem_limit.max_ttm_mem_limit) ||
172             (adev->kfd.vram_used + vram_needed >
173              adev->gmc.real_vram_size - reserved_for_pt)) {
174                 ret = -ENOMEM;
175         } else {
176                 kfd_mem_limit.system_mem_used += system_mem_needed;
177                 kfd_mem_limit.ttm_mem_used += ttm_mem_needed;
178                 adev->kfd.vram_used += vram_needed;
179         }
180
181         spin_unlock(&kfd_mem_limit.mem_limit_lock);
182         return ret;
183 }
184
185 static void unreserve_mem_limit(struct amdgpu_device *adev,
186                 uint64_t size, u32 domain, bool sg)
187 {
188         size_t acc_size;
189
190         acc_size = amdgpu_amdkfd_acc_size(size);
191
192         spin_lock(&kfd_mem_limit.mem_limit_lock);
193         if (domain == AMDGPU_GEM_DOMAIN_GTT) {
194                 kfd_mem_limit.system_mem_used -= (acc_size + size);
195                 kfd_mem_limit.ttm_mem_used -= (acc_size + size);
196         } else if (domain == AMDGPU_GEM_DOMAIN_CPU && !sg) {
197                 kfd_mem_limit.system_mem_used -= (acc_size + size);
198                 kfd_mem_limit.ttm_mem_used -= acc_size;
199         } else {
200                 kfd_mem_limit.system_mem_used -= acc_size;
201                 kfd_mem_limit.ttm_mem_used -= acc_size;
202                 if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
203                         adev->kfd.vram_used -= size;
204                         WARN_ONCE(adev->kfd.vram_used < 0,
205                                   "kfd VRAM memory accounting unbalanced");
206                 }
207         }
208         WARN_ONCE(kfd_mem_limit.system_mem_used < 0,
209                   "kfd system memory accounting unbalanced");
210         WARN_ONCE(kfd_mem_limit.ttm_mem_used < 0,
211                   "kfd TTM memory accounting unbalanced");
212
213         spin_unlock(&kfd_mem_limit.mem_limit_lock);
214 }
215
216 void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo)
217 {
218         struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
219         u32 domain = bo->preferred_domains;
220         bool sg = (bo->preferred_domains == AMDGPU_GEM_DOMAIN_CPU);
221
222         if (bo->flags & AMDGPU_AMDKFD_CREATE_USERPTR_BO) {
223                 domain = AMDGPU_GEM_DOMAIN_CPU;
224                 sg = false;
225         }
226
227         unreserve_mem_limit(adev, amdgpu_bo_size(bo), domain, sg);
228 }
229
230
231 /* amdgpu_amdkfd_remove_eviction_fence - Removes eviction fence from BO's
232  *  reservation object.
233  *
234  * @bo: [IN] Remove eviction fence(s) from this BO
235  * @ef: [IN] This eviction fence is removed if it
236  *  is present in the shared list.
237  *
238  * NOTE: Must be called with BO reserved i.e. bo->tbo.resv->lock held.
239  */
240 static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
241                                         struct amdgpu_amdkfd_fence *ef)
242 {
243         struct dma_resv *resv = bo->tbo.base.resv;
244         struct dma_resv_list *old, *new;
245         unsigned int i, j, k;
246
247         if (!ef)
248                 return -EINVAL;
249
250         old = dma_resv_shared_list(resv);
251         if (!old)
252                 return 0;
253
254         new = kmalloc(struct_size(new, shared, old->shared_max), GFP_KERNEL);
255         if (!new)
256                 return -ENOMEM;
257
258         /* Go through all the shared fences in the resevation object and sort
259          * the interesting ones to the end of the list.
260          */
261         for (i = 0, j = old->shared_count, k = 0; i < old->shared_count; ++i) {
262                 struct dma_fence *f;
263
264                 f = rcu_dereference_protected(old->shared[i],
265                                               dma_resv_held(resv));
266
267                 if (f->context == ef->base.context)
268                         RCU_INIT_POINTER(new->shared[--j], f);
269                 else
270                         RCU_INIT_POINTER(new->shared[k++], f);
271         }
272         new->shared_max = old->shared_max;
273         new->shared_count = k;
274
275         /* Install the new fence list, seqcount provides the barriers */
276         write_seqcount_begin(&resv->seq);
277         RCU_INIT_POINTER(resv->fence, new);
278         write_seqcount_end(&resv->seq);
279
280         /* Drop the references to the removed fences or move them to ef_list */
281         for (i = j, k = 0; i < old->shared_count; ++i) {
282                 struct dma_fence *f;
283
284                 f = rcu_dereference_protected(new->shared[i],
285                                               dma_resv_held(resv));
286                 dma_fence_put(f);
287         }
288         kfree_rcu(old, rcu);
289
290         return 0;
291 }
292
293 int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo)
294 {
295         struct amdgpu_bo *root = bo;
296         struct amdgpu_vm_bo_base *vm_bo;
297         struct amdgpu_vm *vm;
298         struct amdkfd_process_info *info;
299         struct amdgpu_amdkfd_fence *ef;
300         int ret;
301
302         /* we can always get vm_bo from root PD bo.*/
303         while (root->parent)
304                 root = root->parent;
305
306         vm_bo = root->vm_bo;
307         if (!vm_bo)
308                 return 0;
309
310         vm = vm_bo->vm;
311         if (!vm)
312                 return 0;
313
314         info = vm->process_info;
315         if (!info || !info->eviction_fence)
316                 return 0;
317
318         ef = container_of(dma_fence_get(&info->eviction_fence->base),
319                         struct amdgpu_amdkfd_fence, base);
320
321         BUG_ON(!dma_resv_trylock(bo->tbo.base.resv));
322         ret = amdgpu_amdkfd_remove_eviction_fence(bo, ef);
323         dma_resv_unlock(bo->tbo.base.resv);
324
325         dma_fence_put(&ef->base);
326         return ret;
327 }
328
329 static int amdgpu_amdkfd_bo_validate(struct amdgpu_bo *bo, uint32_t domain,
330                                      bool wait)
331 {
332         struct ttm_operation_ctx ctx = { false, false };
333         int ret;
334
335         if (WARN(amdgpu_ttm_tt_get_usermm(bo->tbo.ttm),
336                  "Called with userptr BO"))
337                 return -EINVAL;
338
339         amdgpu_bo_placement_from_domain(bo, domain);
340
341         ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
342         if (ret)
343                 goto validate_fail;
344         if (wait)
345                 amdgpu_bo_sync_wait(bo, AMDGPU_FENCE_OWNER_KFD, false);
346
347 validate_fail:
348         return ret;
349 }
350
351 static int amdgpu_amdkfd_validate(void *param, struct amdgpu_bo *bo)
352 {
353         struct amdgpu_vm_parser *p = param;
354
355         return amdgpu_amdkfd_bo_validate(bo, p->domain, p->wait);
356 }
357
358 /* vm_validate_pt_pd_bos - Validate page table and directory BOs
359  *
360  * Page directories are not updated here because huge page handling
361  * during page table updates can invalidate page directory entries
362  * again. Page directories are only updated after updating page
363  * tables.
364  */
365 static int vm_validate_pt_pd_bos(struct amdgpu_vm *vm)
366 {
367         struct amdgpu_bo *pd = vm->root.base.bo;
368         struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
369         struct amdgpu_vm_parser param;
370         int ret;
371
372         param.domain = AMDGPU_GEM_DOMAIN_VRAM;
373         param.wait = false;
374
375         ret = amdgpu_vm_validate_pt_bos(adev, vm, amdgpu_amdkfd_validate,
376                                         &param);
377         if (ret) {
378                 pr_err("failed to validate PT BOs\n");
379                 return ret;
380         }
381
382         ret = amdgpu_amdkfd_validate(&param, pd);
383         if (ret) {
384                 pr_err("failed to validate PD\n");
385                 return ret;
386         }
387
388         vm->pd_phys_addr = amdgpu_gmc_pd_addr(vm->root.base.bo);
389
390         if (vm->use_cpu_for_update) {
391                 ret = amdgpu_bo_kmap(pd, NULL);
392                 if (ret) {
393                         pr_err("failed to kmap PD, ret=%d\n", ret);
394                         return ret;
395                 }
396         }
397
398         return 0;
399 }
400
401 static int vm_update_pds(struct amdgpu_vm *vm, struct amdgpu_sync *sync)
402 {
403         struct amdgpu_bo *pd = vm->root.base.bo;
404         struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
405         int ret;
406
407         ret = amdgpu_vm_update_pdes(adev, vm, false);
408         if (ret)
409                 return ret;
410
411         return amdgpu_sync_fence(sync, vm->last_update);
412 }
413
414 static uint64_t get_pte_flags(struct amdgpu_device *adev, struct kgd_mem *mem)
415 {
416         struct amdgpu_device *bo_adev = amdgpu_ttm_adev(mem->bo->tbo.bdev);
417         bool coherent = mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_COHERENT;
418         bool uncached = mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_UNCACHED;
419         uint32_t mapping_flags;
420         uint64_t pte_flags;
421         bool snoop = false;
422
423         mapping_flags = AMDGPU_VM_PAGE_READABLE;
424         if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE)
425                 mapping_flags |= AMDGPU_VM_PAGE_WRITEABLE;
426         if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE)
427                 mapping_flags |= AMDGPU_VM_PAGE_EXECUTABLE;
428
429         switch (adev->asic_type) {
430         case CHIP_ARCTURUS:
431                 if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
432                         if (bo_adev == adev)
433                                 mapping_flags |= coherent ?
434                                         AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW;
435                         else
436                                 mapping_flags |= coherent ?
437                                         AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
438                 } else {
439                         mapping_flags |= coherent ?
440                                 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
441                 }
442                 break;
443         case CHIP_ALDEBARAN:
444                 if (coherent && uncached) {
445                         if (adev->gmc.xgmi.connected_to_cpu ||
446                                 !(mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM))
447                                 snoop = true;
448                         mapping_flags |= AMDGPU_VM_MTYPE_UC;
449                 } else if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
450                         if (bo_adev == adev) {
451                                 mapping_flags |= coherent ?
452                                         AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW;
453                                 if (adev->gmc.xgmi.connected_to_cpu)
454                                         snoop = true;
455                         } else {
456                                 mapping_flags |= coherent ?
457                                         AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
458                                 if (amdgpu_xgmi_same_hive(adev, bo_adev))
459                                         snoop = true;
460                         }
461                 } else {
462                         snoop = true;
463                         mapping_flags |= coherent ?
464                                 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
465                 }
466                 break;
467         default:
468                 mapping_flags |= coherent ?
469                         AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
470         }
471
472         pte_flags = amdgpu_gem_va_map_flags(adev, mapping_flags);
473         pte_flags |= snoop ? AMDGPU_PTE_SNOOPED : 0;
474
475         return pte_flags;
476 }
477
478 static int
479 kfd_mem_dmamap_userptr(struct kgd_mem *mem,
480                        struct kfd_mem_attachment *attachment)
481 {
482         enum dma_data_direction direction =
483                 mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ?
484                 DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
485         struct ttm_operation_ctx ctx = {.interruptible = true};
486         struct amdgpu_bo *bo = attachment->bo_va->base.bo;
487         struct amdgpu_device *adev = attachment->adev;
488         struct ttm_tt *src_ttm = mem->bo->tbo.ttm;
489         struct ttm_tt *ttm = bo->tbo.ttm;
490         int ret;
491
492         ttm->sg = kmalloc(sizeof(*ttm->sg), GFP_KERNEL);
493         if (unlikely(!ttm->sg))
494                 return -ENOMEM;
495
496         if (WARN_ON(ttm->num_pages != src_ttm->num_pages))
497                 return -EINVAL;
498
499         /* Same sequence as in amdgpu_ttm_tt_pin_userptr */
500         ret = sg_alloc_table_from_pages(ttm->sg, src_ttm->pages,
501                                         ttm->num_pages, 0,
502                                         (u64)ttm->num_pages << PAGE_SHIFT,
503                                         GFP_KERNEL);
504         if (unlikely(ret))
505                 goto free_sg;
506
507         ret = dma_map_sgtable(adev->dev, ttm->sg, direction, 0);
508         if (unlikely(ret))
509                 goto release_sg;
510
511         drm_prime_sg_to_dma_addr_array(ttm->sg, ttm->dma_address,
512                                        ttm->num_pages);
513
514         amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
515         ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
516         if (ret)
517                 goto unmap_sg;
518
519         return 0;
520
521 unmap_sg:
522         dma_unmap_sgtable(adev->dev, ttm->sg, direction, 0);
523 release_sg:
524         pr_err("DMA map userptr failed: %d\n", ret);
525         sg_free_table(ttm->sg);
526 free_sg:
527         kfree(ttm->sg);
528         ttm->sg = NULL;
529         return ret;
530 }
531
532 static int
533 kfd_mem_dmamap_dmabuf(struct kfd_mem_attachment *attachment)
534 {
535         struct ttm_operation_ctx ctx = {.interruptible = true};
536         struct amdgpu_bo *bo = attachment->bo_va->base.bo;
537
538         amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
539         return ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
540 }
541
542 static int
543 kfd_mem_dmamap_attachment(struct kgd_mem *mem,
544                           struct kfd_mem_attachment *attachment)
545 {
546         switch (attachment->type) {
547         case KFD_MEM_ATT_SHARED:
548                 return 0;
549         case KFD_MEM_ATT_USERPTR:
550                 return kfd_mem_dmamap_userptr(mem, attachment);
551         case KFD_MEM_ATT_DMABUF:
552                 return kfd_mem_dmamap_dmabuf(attachment);
553         default:
554                 WARN_ON_ONCE(1);
555         }
556         return -EINVAL;
557 }
558
559 static void
560 kfd_mem_dmaunmap_userptr(struct kgd_mem *mem,
561                          struct kfd_mem_attachment *attachment)
562 {
563         enum dma_data_direction direction =
564                 mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ?
565                 DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
566         struct ttm_operation_ctx ctx = {.interruptible = false};
567         struct amdgpu_bo *bo = attachment->bo_va->base.bo;
568         struct amdgpu_device *adev = attachment->adev;
569         struct ttm_tt *ttm = bo->tbo.ttm;
570
571         if (unlikely(!ttm->sg))
572                 return;
573
574         amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
575         ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
576
577         dma_unmap_sgtable(adev->dev, ttm->sg, direction, 0);
578         sg_free_table(ttm->sg);
579         ttm->sg = NULL;
580 }
581
582 static void
583 kfd_mem_dmaunmap_dmabuf(struct kfd_mem_attachment *attachment)
584 {
585         struct ttm_operation_ctx ctx = {.interruptible = true};
586         struct amdgpu_bo *bo = attachment->bo_va->base.bo;
587
588         amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
589         ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
590 }
591
592 static void
593 kfd_mem_dmaunmap_attachment(struct kgd_mem *mem,
594                             struct kfd_mem_attachment *attachment)
595 {
596         switch (attachment->type) {
597         case KFD_MEM_ATT_SHARED:
598                 break;
599         case KFD_MEM_ATT_USERPTR:
600                 kfd_mem_dmaunmap_userptr(mem, attachment);
601                 break;
602         case KFD_MEM_ATT_DMABUF:
603                 kfd_mem_dmaunmap_dmabuf(attachment);
604                 break;
605         default:
606                 WARN_ON_ONCE(1);
607         }
608 }
609
610 static int
611 kfd_mem_attach_userptr(struct amdgpu_device *adev, struct kgd_mem *mem,
612                        struct amdgpu_bo **bo)
613 {
614         unsigned long bo_size = mem->bo->tbo.base.size;
615         struct drm_gem_object *gobj;
616         int ret;
617
618         ret = amdgpu_bo_reserve(mem->bo, false);
619         if (ret)
620                 return ret;
621
622         ret = amdgpu_gem_object_create(adev, bo_size, 1,
623                                        AMDGPU_GEM_DOMAIN_CPU,
624                                        AMDGPU_GEM_CREATE_PREEMPTIBLE,
625                                        ttm_bo_type_sg, mem->bo->tbo.base.resv,
626                                        &gobj);
627         amdgpu_bo_unreserve(mem->bo);
628         if (ret)
629                 return ret;
630
631         *bo = gem_to_amdgpu_bo(gobj);
632         (*bo)->parent = amdgpu_bo_ref(mem->bo);
633
634         return 0;
635 }
636
637 static int
638 kfd_mem_attach_dmabuf(struct amdgpu_device *adev, struct kgd_mem *mem,
639                       struct amdgpu_bo **bo)
640 {
641         struct drm_gem_object *gobj;
642         int ret;
643
644         if (!mem->dmabuf) {
645                 mem->dmabuf = amdgpu_gem_prime_export(&mem->bo->tbo.base,
646                         mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ?
647                                 DRM_RDWR : 0);
648                 if (IS_ERR(mem->dmabuf)) {
649                         ret = PTR_ERR(mem->dmabuf);
650                         mem->dmabuf = NULL;
651                         return ret;
652                 }
653         }
654
655         gobj = amdgpu_gem_prime_import(&adev->ddev, mem->dmabuf);
656         if (IS_ERR(gobj))
657                 return PTR_ERR(gobj);
658
659         /* Import takes an extra reference on the dmabuf. Drop it now to
660          * avoid leaking it. We only need the one reference in
661          * kgd_mem->dmabuf.
662          */
663         dma_buf_put(mem->dmabuf);
664
665         *bo = gem_to_amdgpu_bo(gobj);
666         (*bo)->flags |= AMDGPU_GEM_CREATE_PREEMPTIBLE;
667         (*bo)->parent = amdgpu_bo_ref(mem->bo);
668
669         return 0;
670 }
671
672 /* kfd_mem_attach - Add a BO to a VM
673  *
674  * Everything that needs to bo done only once when a BO is first added
675  * to a VM. It can later be mapped and unmapped many times without
676  * repeating these steps.
677  *
678  * 0. Create BO for DMA mapping, if needed
679  * 1. Allocate and initialize BO VA entry data structure
680  * 2. Add BO to the VM
681  * 3. Determine ASIC-specific PTE flags
682  * 4. Alloc page tables and directories if needed
683  * 4a.  Validate new page tables and directories
684  */
685 static int kfd_mem_attach(struct amdgpu_device *adev, struct kgd_mem *mem,
686                 struct amdgpu_vm *vm, bool is_aql)
687 {
688         struct amdgpu_device *bo_adev = amdgpu_ttm_adev(mem->bo->tbo.bdev);
689         unsigned long bo_size = mem->bo->tbo.base.size;
690         uint64_t va = mem->va;
691         struct kfd_mem_attachment *attachment[2] = {NULL, NULL};
692         struct amdgpu_bo *bo[2] = {NULL, NULL};
693         int i, ret;
694
695         if (!va) {
696                 pr_err("Invalid VA when adding BO to VM\n");
697                 return -EINVAL;
698         }
699
700         for (i = 0; i <= is_aql; i++) {
701                 attachment[i] = kzalloc(sizeof(*attachment[i]), GFP_KERNEL);
702                 if (unlikely(!attachment[i])) {
703                         ret = -ENOMEM;
704                         goto unwind;
705                 }
706
707                 pr_debug("\t add VA 0x%llx - 0x%llx to vm %p\n", va,
708                          va + bo_size, vm);
709
710                 if (adev == bo_adev || (mem->domain == AMDGPU_GEM_DOMAIN_VRAM &&
711                                         amdgpu_xgmi_same_hive(adev, bo_adev))) {
712                         /* Mappings on the local GPU and VRAM mappings in the
713                          * local hive share the original BO
714                          */
715                         attachment[i]->type = KFD_MEM_ATT_SHARED;
716                         bo[i] = mem->bo;
717                         drm_gem_object_get(&bo[i]->tbo.base);
718                 } else if (i > 0) {
719                         /* Multiple mappings on the same GPU share the BO */
720                         attachment[i]->type = KFD_MEM_ATT_SHARED;
721                         bo[i] = bo[0];
722                         drm_gem_object_get(&bo[i]->tbo.base);
723                 } else if (amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm)) {
724                         /* Create an SG BO to DMA-map userptrs on other GPUs */
725                         attachment[i]->type = KFD_MEM_ATT_USERPTR;
726                         ret = kfd_mem_attach_userptr(adev, mem, &bo[i]);
727                         if (ret)
728                                 goto unwind;
729                 } else if (mem->domain == AMDGPU_GEM_DOMAIN_GTT &&
730                            mem->bo->tbo.type != ttm_bo_type_sg) {
731                         /* GTT BOs use DMA-mapping ability of dynamic-attach
732                          * DMA bufs. TODO: The same should work for VRAM on
733                          * large-BAR GPUs.
734                          */
735                         attachment[i]->type = KFD_MEM_ATT_DMABUF;
736                         ret = kfd_mem_attach_dmabuf(adev, mem, &bo[i]);
737                         if (ret)
738                                 goto unwind;
739                 } else {
740                         /* FIXME: Need to DMA-map other BO types:
741                          * large-BAR VRAM, doorbells, MMIO remap
742                          */
743                         attachment[i]->type = KFD_MEM_ATT_SHARED;
744                         bo[i] = mem->bo;
745                         drm_gem_object_get(&bo[i]->tbo.base);
746                 }
747
748                 /* Add BO to VM internal data structures */
749                 attachment[i]->bo_va = amdgpu_vm_bo_add(adev, vm, bo[i]);
750                 if (unlikely(!attachment[i]->bo_va)) {
751                         ret = -ENOMEM;
752                         pr_err("Failed to add BO object to VM. ret == %d\n",
753                                ret);
754                         goto unwind;
755                 }
756
757                 attachment[i]->va = va;
758                 attachment[i]->pte_flags = get_pte_flags(adev, mem);
759                 attachment[i]->adev = adev;
760                 list_add(&attachment[i]->list, &mem->attachments);
761
762                 va += bo_size;
763         }
764
765         return 0;
766
767 unwind:
768         for (; i >= 0; i--) {
769                 if (!attachment[i])
770                         continue;
771                 if (attachment[i]->bo_va) {
772                         amdgpu_vm_bo_rmv(adev, attachment[i]->bo_va);
773                         list_del(&attachment[i]->list);
774                 }
775                 if (bo[i])
776                         drm_gem_object_put(&bo[i]->tbo.base);
777                 kfree(attachment[i]);
778         }
779         return ret;
780 }
781
782 static void kfd_mem_detach(struct kfd_mem_attachment *attachment)
783 {
784         struct amdgpu_bo *bo = attachment->bo_va->base.bo;
785
786         pr_debug("\t remove VA 0x%llx in entry %p\n",
787                         attachment->va, attachment);
788         amdgpu_vm_bo_rmv(attachment->adev, attachment->bo_va);
789         drm_gem_object_put(&bo->tbo.base);
790         list_del(&attachment->list);
791         kfree(attachment);
792 }
793
794 static void add_kgd_mem_to_kfd_bo_list(struct kgd_mem *mem,
795                                 struct amdkfd_process_info *process_info,
796                                 bool userptr)
797 {
798         struct ttm_validate_buffer *entry = &mem->validate_list;
799         struct amdgpu_bo *bo = mem->bo;
800
801         INIT_LIST_HEAD(&entry->head);
802         entry->num_shared = 1;
803         entry->bo = &bo->tbo;
804         mutex_lock(&process_info->lock);
805         if (userptr)
806                 list_add_tail(&entry->head, &process_info->userptr_valid_list);
807         else
808                 list_add_tail(&entry->head, &process_info->kfd_bo_list);
809         mutex_unlock(&process_info->lock);
810 }
811
812 static void remove_kgd_mem_from_kfd_bo_list(struct kgd_mem *mem,
813                 struct amdkfd_process_info *process_info)
814 {
815         struct ttm_validate_buffer *bo_list_entry;
816
817         bo_list_entry = &mem->validate_list;
818         mutex_lock(&process_info->lock);
819         list_del(&bo_list_entry->head);
820         mutex_unlock(&process_info->lock);
821 }
822
823 /* Initializes user pages. It registers the MMU notifier and validates
824  * the userptr BO in the GTT domain.
825  *
826  * The BO must already be on the userptr_valid_list. Otherwise an
827  * eviction and restore may happen that leaves the new BO unmapped
828  * with the user mode queues running.
829  *
830  * Takes the process_info->lock to protect against concurrent restore
831  * workers.
832  *
833  * Returns 0 for success, negative errno for errors.
834  */
835 static int init_user_pages(struct kgd_mem *mem, uint64_t user_addr)
836 {
837         struct amdkfd_process_info *process_info = mem->process_info;
838         struct amdgpu_bo *bo = mem->bo;
839         struct ttm_operation_ctx ctx = { true, false };
840         int ret = 0;
841
842         mutex_lock(&process_info->lock);
843
844         ret = amdgpu_ttm_tt_set_userptr(&bo->tbo, user_addr, 0);
845         if (ret) {
846                 pr_err("%s: Failed to set userptr: %d\n", __func__, ret);
847                 goto out;
848         }
849
850         ret = amdgpu_mn_register(bo, user_addr);
851         if (ret) {
852                 pr_err("%s: Failed to register MMU notifier: %d\n",
853                        __func__, ret);
854                 goto out;
855         }
856
857         ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages);
858         if (ret) {
859                 pr_err("%s: Failed to get user pages: %d\n", __func__, ret);
860                 goto unregister_out;
861         }
862
863         ret = amdgpu_bo_reserve(bo, true);
864         if (ret) {
865                 pr_err("%s: Failed to reserve BO\n", __func__);
866                 goto release_out;
867         }
868         amdgpu_bo_placement_from_domain(bo, mem->domain);
869         ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
870         if (ret)
871                 pr_err("%s: failed to validate BO\n", __func__);
872         amdgpu_bo_unreserve(bo);
873
874 release_out:
875         amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
876 unregister_out:
877         if (ret)
878                 amdgpu_mn_unregister(bo);
879 out:
880         mutex_unlock(&process_info->lock);
881         return ret;
882 }
883
884 /* Reserving a BO and its page table BOs must happen atomically to
885  * avoid deadlocks. Some operations update multiple VMs at once. Track
886  * all the reservation info in a context structure. Optionally a sync
887  * object can track VM updates.
888  */
889 struct bo_vm_reservation_context {
890         struct amdgpu_bo_list_entry kfd_bo; /* BO list entry for the KFD BO */
891         unsigned int n_vms;                 /* Number of VMs reserved       */
892         struct amdgpu_bo_list_entry *vm_pd; /* Array of VM BO list entries  */
893         struct ww_acquire_ctx ticket;       /* Reservation ticket           */
894         struct list_head list, duplicates;  /* BO lists                     */
895         struct amdgpu_sync *sync;           /* Pointer to sync object       */
896         bool reserved;                      /* Whether BOs are reserved     */
897 };
898
899 enum bo_vm_match {
900         BO_VM_NOT_MAPPED = 0,   /* Match VMs where a BO is not mapped */
901         BO_VM_MAPPED,           /* Match VMs where a BO is mapped     */
902         BO_VM_ALL,              /* Match all VMs a BO was added to    */
903 };
904
905 /**
906  * reserve_bo_and_vm - reserve a BO and a VM unconditionally.
907  * @mem: KFD BO structure.
908  * @vm: the VM to reserve.
909  * @ctx: the struct that will be used in unreserve_bo_and_vms().
910  */
911 static int reserve_bo_and_vm(struct kgd_mem *mem,
912                               struct amdgpu_vm *vm,
913                               struct bo_vm_reservation_context *ctx)
914 {
915         struct amdgpu_bo *bo = mem->bo;
916         int ret;
917
918         WARN_ON(!vm);
919
920         ctx->reserved = false;
921         ctx->n_vms = 1;
922         ctx->sync = &mem->sync;
923
924         INIT_LIST_HEAD(&ctx->list);
925         INIT_LIST_HEAD(&ctx->duplicates);
926
927         ctx->vm_pd = kcalloc(ctx->n_vms, sizeof(*ctx->vm_pd), GFP_KERNEL);
928         if (!ctx->vm_pd)
929                 return -ENOMEM;
930
931         ctx->kfd_bo.priority = 0;
932         ctx->kfd_bo.tv.bo = &bo->tbo;
933         ctx->kfd_bo.tv.num_shared = 1;
934         list_add(&ctx->kfd_bo.tv.head, &ctx->list);
935
936         amdgpu_vm_get_pd_bo(vm, &ctx->list, &ctx->vm_pd[0]);
937
938         ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list,
939                                      false, &ctx->duplicates);
940         if (ret) {
941                 pr_err("Failed to reserve buffers in ttm.\n");
942                 kfree(ctx->vm_pd);
943                 ctx->vm_pd = NULL;
944                 return ret;
945         }
946
947         ctx->reserved = true;
948         return 0;
949 }
950
951 /**
952  * reserve_bo_and_cond_vms - reserve a BO and some VMs conditionally
953  * @mem: KFD BO structure.
954  * @vm: the VM to reserve. If NULL, then all VMs associated with the BO
955  * is used. Otherwise, a single VM associated with the BO.
956  * @map_type: the mapping status that will be used to filter the VMs.
957  * @ctx: the struct that will be used in unreserve_bo_and_vms().
958  *
959  * Returns 0 for success, negative for failure.
960  */
961 static int reserve_bo_and_cond_vms(struct kgd_mem *mem,
962                                 struct amdgpu_vm *vm, enum bo_vm_match map_type,
963                                 struct bo_vm_reservation_context *ctx)
964 {
965         struct amdgpu_bo *bo = mem->bo;
966         struct kfd_mem_attachment *entry;
967         unsigned int i;
968         int ret;
969
970         ctx->reserved = false;
971         ctx->n_vms = 0;
972         ctx->vm_pd = NULL;
973         ctx->sync = &mem->sync;
974
975         INIT_LIST_HEAD(&ctx->list);
976         INIT_LIST_HEAD(&ctx->duplicates);
977
978         list_for_each_entry(entry, &mem->attachments, list) {
979                 if ((vm && vm != entry->bo_va->base.vm) ||
980                         (entry->is_mapped != map_type
981                         && map_type != BO_VM_ALL))
982                         continue;
983
984                 ctx->n_vms++;
985         }
986
987         if (ctx->n_vms != 0) {
988                 ctx->vm_pd = kcalloc(ctx->n_vms, sizeof(*ctx->vm_pd),
989                                      GFP_KERNEL);
990                 if (!ctx->vm_pd)
991                         return -ENOMEM;
992         }
993
994         ctx->kfd_bo.priority = 0;
995         ctx->kfd_bo.tv.bo = &bo->tbo;
996         ctx->kfd_bo.tv.num_shared = 1;
997         list_add(&ctx->kfd_bo.tv.head, &ctx->list);
998
999         i = 0;
1000         list_for_each_entry(entry, &mem->attachments, list) {
1001                 if ((vm && vm != entry->bo_va->base.vm) ||
1002                         (entry->is_mapped != map_type
1003                         && map_type != BO_VM_ALL))
1004                         continue;
1005
1006                 amdgpu_vm_get_pd_bo(entry->bo_va->base.vm, &ctx->list,
1007                                 &ctx->vm_pd[i]);
1008                 i++;
1009         }
1010
1011         ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list,
1012                                      false, &ctx->duplicates);
1013         if (ret) {
1014                 pr_err("Failed to reserve buffers in ttm.\n");
1015                 kfree(ctx->vm_pd);
1016                 ctx->vm_pd = NULL;
1017                 return ret;
1018         }
1019
1020         ctx->reserved = true;
1021         return 0;
1022 }
1023
1024 /**
1025  * unreserve_bo_and_vms - Unreserve BO and VMs from a reservation context
1026  * @ctx: Reservation context to unreserve
1027  * @wait: Optionally wait for a sync object representing pending VM updates
1028  * @intr: Whether the wait is interruptible
1029  *
1030  * Also frees any resources allocated in
1031  * reserve_bo_and_(cond_)vm(s). Returns the status from
1032  * amdgpu_sync_wait.
1033  */
1034 static int unreserve_bo_and_vms(struct bo_vm_reservation_context *ctx,
1035                                  bool wait, bool intr)
1036 {
1037         int ret = 0;
1038
1039         if (wait)
1040                 ret = amdgpu_sync_wait(ctx->sync, intr);
1041
1042         if (ctx->reserved)
1043                 ttm_eu_backoff_reservation(&ctx->ticket, &ctx->list);
1044         kfree(ctx->vm_pd);
1045
1046         ctx->sync = NULL;
1047
1048         ctx->reserved = false;
1049         ctx->vm_pd = NULL;
1050
1051         return ret;
1052 }
1053
1054 static void unmap_bo_from_gpuvm(struct kgd_mem *mem,
1055                                 struct kfd_mem_attachment *entry,
1056                                 struct amdgpu_sync *sync)
1057 {
1058         struct amdgpu_bo_va *bo_va = entry->bo_va;
1059         struct amdgpu_device *adev = entry->adev;
1060         struct amdgpu_vm *vm = bo_va->base.vm;
1061
1062         amdgpu_vm_bo_unmap(adev, bo_va, entry->va);
1063
1064         amdgpu_vm_clear_freed(adev, vm, &bo_va->last_pt_update);
1065
1066         amdgpu_sync_fence(sync, bo_va->last_pt_update);
1067
1068         kfd_mem_dmaunmap_attachment(mem, entry);
1069 }
1070
1071 static int update_gpuvm_pte(struct kgd_mem *mem,
1072                             struct kfd_mem_attachment *entry,
1073                             struct amdgpu_sync *sync)
1074 {
1075         struct amdgpu_bo_va *bo_va = entry->bo_va;
1076         struct amdgpu_device *adev = entry->adev;
1077         int ret;
1078
1079         ret = kfd_mem_dmamap_attachment(mem, entry);
1080         if (ret)
1081                 return ret;
1082
1083         /* Update the page tables  */
1084         ret = amdgpu_vm_bo_update(adev, bo_va, false);
1085         if (ret) {
1086                 pr_err("amdgpu_vm_bo_update failed\n");
1087                 return ret;
1088         }
1089
1090         return amdgpu_sync_fence(sync, bo_va->last_pt_update);
1091 }
1092
1093 static int map_bo_to_gpuvm(struct kgd_mem *mem,
1094                            struct kfd_mem_attachment *entry,
1095                            struct amdgpu_sync *sync,
1096                            bool no_update_pte)
1097 {
1098         int ret;
1099
1100         /* Set virtual address for the allocation */
1101         ret = amdgpu_vm_bo_map(entry->adev, entry->bo_va, entry->va, 0,
1102                                amdgpu_bo_size(entry->bo_va->base.bo),
1103                                entry->pte_flags);
1104         if (ret) {
1105                 pr_err("Failed to map VA 0x%llx in vm. ret %d\n",
1106                                 entry->va, ret);
1107                 return ret;
1108         }
1109
1110         if (no_update_pte)
1111                 return 0;
1112
1113         ret = update_gpuvm_pte(mem, entry, sync);
1114         if (ret) {
1115                 pr_err("update_gpuvm_pte() failed\n");
1116                 goto update_gpuvm_pte_failed;
1117         }
1118
1119         return 0;
1120
1121 update_gpuvm_pte_failed:
1122         unmap_bo_from_gpuvm(mem, entry, sync);
1123         return ret;
1124 }
1125
1126 static struct sg_table *create_doorbell_sg(uint64_t addr, uint32_t size)
1127 {
1128         struct sg_table *sg = kmalloc(sizeof(*sg), GFP_KERNEL);
1129
1130         if (!sg)
1131                 return NULL;
1132         if (sg_alloc_table(sg, 1, GFP_KERNEL)) {
1133                 kfree(sg);
1134                 return NULL;
1135         }
1136         sg->sgl->dma_address = addr;
1137         sg->sgl->length = size;
1138 #ifdef CONFIG_NEED_SG_DMA_LENGTH
1139         sg->sgl->dma_length = size;
1140 #endif
1141         return sg;
1142 }
1143
1144 static int process_validate_vms(struct amdkfd_process_info *process_info)
1145 {
1146         struct amdgpu_vm *peer_vm;
1147         int ret;
1148
1149         list_for_each_entry(peer_vm, &process_info->vm_list_head,
1150                             vm_list_node) {
1151                 ret = vm_validate_pt_pd_bos(peer_vm);
1152                 if (ret)
1153                         return ret;
1154         }
1155
1156         return 0;
1157 }
1158
1159 static int process_sync_pds_resv(struct amdkfd_process_info *process_info,
1160                                  struct amdgpu_sync *sync)
1161 {
1162         struct amdgpu_vm *peer_vm;
1163         int ret;
1164
1165         list_for_each_entry(peer_vm, &process_info->vm_list_head,
1166                             vm_list_node) {
1167                 struct amdgpu_bo *pd = peer_vm->root.base.bo;
1168
1169                 ret = amdgpu_sync_resv(NULL, sync, pd->tbo.base.resv,
1170                                        AMDGPU_SYNC_NE_OWNER,
1171                                        AMDGPU_FENCE_OWNER_KFD);
1172                 if (ret)
1173                         return ret;
1174         }
1175
1176         return 0;
1177 }
1178
1179 static int process_update_pds(struct amdkfd_process_info *process_info,
1180                               struct amdgpu_sync *sync)
1181 {
1182         struct amdgpu_vm *peer_vm;
1183         int ret;
1184
1185         list_for_each_entry(peer_vm, &process_info->vm_list_head,
1186                             vm_list_node) {
1187                 ret = vm_update_pds(peer_vm, sync);
1188                 if (ret)
1189                         return ret;
1190         }
1191
1192         return 0;
1193 }
1194
1195 static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info,
1196                        struct dma_fence **ef)
1197 {
1198         struct amdkfd_process_info *info = NULL;
1199         int ret;
1200
1201         if (!*process_info) {
1202                 info = kzalloc(sizeof(*info), GFP_KERNEL);
1203                 if (!info)
1204                         return -ENOMEM;
1205
1206                 mutex_init(&info->lock);
1207                 INIT_LIST_HEAD(&info->vm_list_head);
1208                 INIT_LIST_HEAD(&info->kfd_bo_list);
1209                 INIT_LIST_HEAD(&info->userptr_valid_list);
1210                 INIT_LIST_HEAD(&info->userptr_inval_list);
1211
1212                 info->eviction_fence =
1213                         amdgpu_amdkfd_fence_create(dma_fence_context_alloc(1),
1214                                                    current->mm,
1215                                                    NULL);
1216                 if (!info->eviction_fence) {
1217                         pr_err("Failed to create eviction fence\n");
1218                         ret = -ENOMEM;
1219                         goto create_evict_fence_fail;
1220                 }
1221
1222                 info->pid = get_task_pid(current->group_leader, PIDTYPE_PID);
1223                 atomic_set(&info->evicted_bos, 0);
1224                 INIT_DELAYED_WORK(&info->restore_userptr_work,
1225                                   amdgpu_amdkfd_restore_userptr_worker);
1226
1227                 *process_info = info;
1228                 *ef = dma_fence_get(&info->eviction_fence->base);
1229         }
1230
1231         vm->process_info = *process_info;
1232
1233         /* Validate page directory and attach eviction fence */
1234         ret = amdgpu_bo_reserve(vm->root.base.bo, true);
1235         if (ret)
1236                 goto reserve_pd_fail;
1237         ret = vm_validate_pt_pd_bos(vm);
1238         if (ret) {
1239                 pr_err("validate_pt_pd_bos() failed\n");
1240                 goto validate_pd_fail;
1241         }
1242         ret = amdgpu_bo_sync_wait(vm->root.base.bo,
1243                                   AMDGPU_FENCE_OWNER_KFD, false);
1244         if (ret)
1245                 goto wait_pd_fail;
1246         ret = dma_resv_reserve_shared(vm->root.base.bo->tbo.base.resv, 1);
1247         if (ret)
1248                 goto reserve_shared_fail;
1249         amdgpu_bo_fence(vm->root.base.bo,
1250                         &vm->process_info->eviction_fence->base, true);
1251         amdgpu_bo_unreserve(vm->root.base.bo);
1252
1253         /* Update process info */
1254         mutex_lock(&vm->process_info->lock);
1255         list_add_tail(&vm->vm_list_node,
1256                         &(vm->process_info->vm_list_head));
1257         vm->process_info->n_vms++;
1258         mutex_unlock(&vm->process_info->lock);
1259
1260         return 0;
1261
1262 reserve_shared_fail:
1263 wait_pd_fail:
1264 validate_pd_fail:
1265         amdgpu_bo_unreserve(vm->root.base.bo);
1266 reserve_pd_fail:
1267         vm->process_info = NULL;
1268         if (info) {
1269                 /* Two fence references: one in info and one in *ef */
1270                 dma_fence_put(&info->eviction_fence->base);
1271                 dma_fence_put(*ef);
1272                 *ef = NULL;
1273                 *process_info = NULL;
1274                 put_pid(info->pid);
1275 create_evict_fence_fail:
1276                 mutex_destroy(&info->lock);
1277                 kfree(info);
1278         }
1279         return ret;
1280 }
1281
1282 int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct kgd_dev *kgd,
1283                                            struct file *filp, u32 pasid,
1284                                            void **process_info,
1285                                            struct dma_fence **ef)
1286 {
1287         struct amdgpu_device *adev = get_amdgpu_device(kgd);
1288         struct amdgpu_fpriv *drv_priv;
1289         struct amdgpu_vm *avm;
1290         int ret;
1291
1292         ret = amdgpu_file_to_fpriv(filp, &drv_priv);
1293         if (ret)
1294                 return ret;
1295         avm = &drv_priv->vm;
1296
1297         /* Already a compute VM? */
1298         if (avm->process_info)
1299                 return -EINVAL;
1300
1301         /* Convert VM into a compute VM */
1302         ret = amdgpu_vm_make_compute(adev, avm, pasid);
1303         if (ret)
1304                 return ret;
1305
1306         /* Initialize KFD part of the VM and process info */
1307         ret = init_kfd_vm(avm, process_info, ef);
1308         if (ret)
1309                 return ret;
1310
1311         amdgpu_vm_set_task_info(avm);
1312
1313         return 0;
1314 }
1315
1316 void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
1317                                     struct amdgpu_vm *vm)
1318 {
1319         struct amdkfd_process_info *process_info = vm->process_info;
1320         struct amdgpu_bo *pd = vm->root.base.bo;
1321
1322         if (!process_info)
1323                 return;
1324
1325         /* Release eviction fence from PD */
1326         amdgpu_bo_reserve(pd, false);
1327         amdgpu_bo_fence(pd, NULL, false);
1328         amdgpu_bo_unreserve(pd);
1329
1330         /* Update process info */
1331         mutex_lock(&process_info->lock);
1332         process_info->n_vms--;
1333         list_del(&vm->vm_list_node);
1334         mutex_unlock(&process_info->lock);
1335
1336         vm->process_info = NULL;
1337
1338         /* Release per-process resources when last compute VM is destroyed */
1339         if (!process_info->n_vms) {
1340                 WARN_ON(!list_empty(&process_info->kfd_bo_list));
1341                 WARN_ON(!list_empty(&process_info->userptr_valid_list));
1342                 WARN_ON(!list_empty(&process_info->userptr_inval_list));
1343
1344                 dma_fence_put(&process_info->eviction_fence->base);
1345                 cancel_delayed_work_sync(&process_info->restore_userptr_work);
1346                 put_pid(process_info->pid);
1347                 mutex_destroy(&process_info->lock);
1348                 kfree(process_info);
1349         }
1350 }
1351
1352 void amdgpu_amdkfd_gpuvm_release_process_vm(struct kgd_dev *kgd, void *drm_priv)
1353 {
1354         struct amdgpu_device *adev = get_amdgpu_device(kgd);
1355         struct amdgpu_vm *avm;
1356
1357         if (WARN_ON(!kgd || !drm_priv))
1358                 return;
1359
1360         avm = drm_priv_to_vm(drm_priv);
1361
1362         pr_debug("Releasing process vm %p\n", avm);
1363
1364         /* The original pasid of amdgpu vm has already been
1365          * released during making a amdgpu vm to a compute vm
1366          * The current pasid is managed by kfd and will be
1367          * released on kfd process destroy. Set amdgpu pasid
1368          * to 0 to avoid duplicate release.
1369          */
1370         amdgpu_vm_release_compute(adev, avm);
1371 }
1372
1373 uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *drm_priv)
1374 {
1375         struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
1376         struct amdgpu_bo *pd = avm->root.base.bo;
1377         struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
1378
1379         if (adev->asic_type < CHIP_VEGA10)
1380                 return avm->pd_phys_addr >> AMDGPU_GPU_PAGE_SHIFT;
1381         return avm->pd_phys_addr;
1382 }
1383
1384 int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
1385                 struct kgd_dev *kgd, uint64_t va, uint64_t size,
1386                 void *drm_priv, struct kgd_mem **mem,
1387                 uint64_t *offset, uint32_t flags)
1388 {
1389         struct amdgpu_device *adev = get_amdgpu_device(kgd);
1390         struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
1391         enum ttm_bo_type bo_type = ttm_bo_type_device;
1392         struct sg_table *sg = NULL;
1393         uint64_t user_addr = 0;
1394         struct amdgpu_bo *bo;
1395         struct drm_gem_object *gobj;
1396         u32 domain, alloc_domain;
1397         u64 alloc_flags;
1398         int ret;
1399
1400         /*
1401          * Check on which domain to allocate BO
1402          */
1403         if (flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
1404                 domain = alloc_domain = AMDGPU_GEM_DOMAIN_VRAM;
1405                 alloc_flags = AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE;
1406                 alloc_flags |= (flags & KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC) ?
1407                         AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED :
1408                         AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
1409         } else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_GTT) {
1410                 domain = alloc_domain = AMDGPU_GEM_DOMAIN_GTT;
1411                 alloc_flags = 0;
1412         } else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) {
1413                 domain = AMDGPU_GEM_DOMAIN_GTT;
1414                 alloc_domain = AMDGPU_GEM_DOMAIN_CPU;
1415                 alloc_flags = AMDGPU_GEM_CREATE_PREEMPTIBLE;
1416                 if (!offset || !*offset)
1417                         return -EINVAL;
1418                 user_addr = untagged_addr(*offset);
1419         } else if (flags & (KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL |
1420                         KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)) {
1421                 domain = AMDGPU_GEM_DOMAIN_GTT;
1422                 alloc_domain = AMDGPU_GEM_DOMAIN_CPU;
1423                 bo_type = ttm_bo_type_sg;
1424                 alloc_flags = 0;
1425                 if (size > UINT_MAX)
1426                         return -EINVAL;
1427                 sg = create_doorbell_sg(*offset, size);
1428                 if (!sg)
1429                         return -ENOMEM;
1430         } else {
1431                 return -EINVAL;
1432         }
1433
1434         *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
1435         if (!*mem) {
1436                 ret = -ENOMEM;
1437                 goto err;
1438         }
1439         INIT_LIST_HEAD(&(*mem)->attachments);
1440         mutex_init(&(*mem)->lock);
1441         (*mem)->aql_queue = !!(flags & KFD_IOC_ALLOC_MEM_FLAGS_AQL_QUEUE_MEM);
1442
1443         /* Workaround for AQL queue wraparound bug. Map the same
1444          * memory twice. That means we only actually allocate half
1445          * the memory.
1446          */
1447         if ((*mem)->aql_queue)
1448                 size = size >> 1;
1449
1450         (*mem)->alloc_flags = flags;
1451
1452         amdgpu_sync_create(&(*mem)->sync);
1453
1454         ret = amdgpu_amdkfd_reserve_mem_limit(adev, size, alloc_domain, !!sg);
1455         if (ret) {
1456                 pr_debug("Insufficient memory\n");
1457                 goto err_reserve_limit;
1458         }
1459
1460         pr_debug("\tcreate BO VA 0x%llx size 0x%llx domain %s\n",
1461                         va, size, domain_string(alloc_domain));
1462
1463         ret = amdgpu_gem_object_create(adev, size, 1, alloc_domain, alloc_flags,
1464                                        bo_type, NULL, &gobj);
1465         if (ret) {
1466                 pr_debug("Failed to create BO on domain %s. ret %d\n",
1467                          domain_string(alloc_domain), ret);
1468                 goto err_bo_create;
1469         }
1470         ret = drm_vma_node_allow(&gobj->vma_node, drm_priv);
1471         if (ret) {
1472                 pr_debug("Failed to allow vma node access. ret %d\n", ret);
1473                 goto err_node_allow;
1474         }
1475         bo = gem_to_amdgpu_bo(gobj);
1476         if (bo_type == ttm_bo_type_sg) {
1477                 bo->tbo.sg = sg;
1478                 bo->tbo.ttm->sg = sg;
1479         }
1480         bo->kfd_bo = *mem;
1481         (*mem)->bo = bo;
1482         if (user_addr)
1483                 bo->flags |= AMDGPU_AMDKFD_CREATE_USERPTR_BO;
1484
1485         (*mem)->va = va;
1486         (*mem)->domain = domain;
1487         (*mem)->mapped_to_gpu_memory = 0;
1488         (*mem)->process_info = avm->process_info;
1489         add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, user_addr);
1490
1491         if (user_addr) {
1492                 ret = init_user_pages(*mem, user_addr);
1493                 if (ret)
1494                         goto allocate_init_user_pages_failed;
1495         }
1496
1497         if (offset)
1498                 *offset = amdgpu_bo_mmap_offset(bo);
1499
1500         return 0;
1501
1502 allocate_init_user_pages_failed:
1503         remove_kgd_mem_from_kfd_bo_list(*mem, avm->process_info);
1504         drm_vma_node_revoke(&gobj->vma_node, drm_priv);
1505 err_node_allow:
1506         amdgpu_bo_unref(&bo);
1507         /* Don't unreserve system mem limit twice */
1508         goto err_reserve_limit;
1509 err_bo_create:
1510         unreserve_mem_limit(adev, size, alloc_domain, !!sg);
1511 err_reserve_limit:
1512         mutex_destroy(&(*mem)->lock);
1513         kfree(*mem);
1514 err:
1515         if (sg) {
1516                 sg_free_table(sg);
1517                 kfree(sg);
1518         }
1519         return ret;
1520 }
1521
1522 int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
1523                 struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv,
1524                 uint64_t *size)
1525 {
1526         struct amdkfd_process_info *process_info = mem->process_info;
1527         unsigned long bo_size = mem->bo->tbo.base.size;
1528         struct kfd_mem_attachment *entry, *tmp;
1529         struct bo_vm_reservation_context ctx;
1530         struct ttm_validate_buffer *bo_list_entry;
1531         unsigned int mapped_to_gpu_memory;
1532         int ret;
1533         bool is_imported = false;
1534
1535         mutex_lock(&mem->lock);
1536         mapped_to_gpu_memory = mem->mapped_to_gpu_memory;
1537         is_imported = mem->is_imported;
1538         mutex_unlock(&mem->lock);
1539         /* lock is not needed after this, since mem is unused and will
1540          * be freed anyway
1541          */
1542
1543         if (mapped_to_gpu_memory > 0) {
1544                 pr_debug("BO VA 0x%llx size 0x%lx is still mapped.\n",
1545                                 mem->va, bo_size);
1546                 return -EBUSY;
1547         }
1548
1549         /* Make sure restore workers don't access the BO any more */
1550         bo_list_entry = &mem->validate_list;
1551         mutex_lock(&process_info->lock);
1552         list_del(&bo_list_entry->head);
1553         mutex_unlock(&process_info->lock);
1554
1555         /* No more MMU notifiers */
1556         amdgpu_mn_unregister(mem->bo);
1557
1558         ret = reserve_bo_and_cond_vms(mem, NULL, BO_VM_ALL, &ctx);
1559         if (unlikely(ret))
1560                 return ret;
1561
1562         /* The eviction fence should be removed by the last unmap.
1563          * TODO: Log an error condition if the bo still has the eviction fence
1564          * attached
1565          */
1566         amdgpu_amdkfd_remove_eviction_fence(mem->bo,
1567                                         process_info->eviction_fence);
1568         pr_debug("Release VA 0x%llx - 0x%llx\n", mem->va,
1569                 mem->va + bo_size * (1 + mem->aql_queue));
1570
1571         ret = unreserve_bo_and_vms(&ctx, false, false);
1572
1573         /* Remove from VM internal data structures */
1574         list_for_each_entry_safe(entry, tmp, &mem->attachments, list)
1575                 kfd_mem_detach(entry);
1576
1577         /* Free the sync object */
1578         amdgpu_sync_free(&mem->sync);
1579
1580         /* If the SG is not NULL, it's one we created for a doorbell or mmio
1581          * remap BO. We need to free it.
1582          */
1583         if (mem->bo->tbo.sg) {
1584                 sg_free_table(mem->bo->tbo.sg);
1585                 kfree(mem->bo->tbo.sg);
1586         }
1587
1588         /* Update the size of the BO being freed if it was allocated from
1589          * VRAM and is not imported.
1590          */
1591         if (size) {
1592                 if ((mem->bo->preferred_domains == AMDGPU_GEM_DOMAIN_VRAM) &&
1593                     (!is_imported))
1594                         *size = bo_size;
1595                 else
1596                         *size = 0;
1597         }
1598
1599         /* Free the BO*/
1600         drm_vma_node_revoke(&mem->bo->tbo.base.vma_node, drm_priv);
1601         if (mem->dmabuf)
1602                 dma_buf_put(mem->dmabuf);
1603         drm_gem_object_put(&mem->bo->tbo.base);
1604         mutex_destroy(&mem->lock);
1605         kfree(mem);
1606
1607         return ret;
1608 }
1609
1610 int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
1611                 struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv)
1612 {
1613         struct amdgpu_device *adev = get_amdgpu_device(kgd);
1614         struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
1615         int ret;
1616         struct amdgpu_bo *bo;
1617         uint32_t domain;
1618         struct kfd_mem_attachment *entry;
1619         struct bo_vm_reservation_context ctx;
1620         unsigned long bo_size;
1621         bool is_invalid_userptr = false;
1622
1623         bo = mem->bo;
1624         if (!bo) {
1625                 pr_err("Invalid BO when mapping memory to GPU\n");
1626                 return -EINVAL;
1627         }
1628
1629         /* Make sure restore is not running concurrently. Since we
1630          * don't map invalid userptr BOs, we rely on the next restore
1631          * worker to do the mapping
1632          */
1633         mutex_lock(&mem->process_info->lock);
1634
1635         /* Lock mmap-sem. If we find an invalid userptr BO, we can be
1636          * sure that the MMU notifier is no longer running
1637          * concurrently and the queues are actually stopped
1638          */
1639         if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
1640                 mmap_write_lock(current->mm);
1641                 is_invalid_userptr = atomic_read(&mem->invalid);
1642                 mmap_write_unlock(current->mm);
1643         }
1644
1645         mutex_lock(&mem->lock);
1646
1647         domain = mem->domain;
1648         bo_size = bo->tbo.base.size;
1649
1650         pr_debug("Map VA 0x%llx - 0x%llx to vm %p domain %s\n",
1651                         mem->va,
1652                         mem->va + bo_size * (1 + mem->aql_queue),
1653                         avm, domain_string(domain));
1654
1655         if (!kfd_mem_is_attached(avm, mem)) {
1656                 ret = kfd_mem_attach(adev, mem, avm, mem->aql_queue);
1657                 if (ret)
1658                         goto out;
1659         }
1660
1661         ret = reserve_bo_and_vm(mem, avm, &ctx);
1662         if (unlikely(ret))
1663                 goto out;
1664
1665         /* Userptr can be marked as "not invalid", but not actually be
1666          * validated yet (still in the system domain). In that case
1667          * the queues are still stopped and we can leave mapping for
1668          * the next restore worker
1669          */
1670         if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) &&
1671             bo->tbo.resource->mem_type == TTM_PL_SYSTEM)
1672                 is_invalid_userptr = true;
1673
1674         ret = vm_validate_pt_pd_bos(avm);
1675         if (unlikely(ret))
1676                 goto out_unreserve;
1677
1678         if (mem->mapped_to_gpu_memory == 0 &&
1679             !amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
1680                 /* Validate BO only once. The eviction fence gets added to BO
1681                  * the first time it is mapped. Validate will wait for all
1682                  * background evictions to complete.
1683                  */
1684                 ret = amdgpu_amdkfd_bo_validate(bo, domain, true);
1685                 if (ret) {
1686                         pr_debug("Validate failed\n");
1687                         goto out_unreserve;
1688                 }
1689         }
1690
1691         list_for_each_entry(entry, &mem->attachments, list) {
1692                 if (entry->bo_va->base.vm != avm || entry->is_mapped)
1693                         continue;
1694
1695                 pr_debug("\t map VA 0x%llx - 0x%llx in entry %p\n",
1696                          entry->va, entry->va + bo_size, entry);
1697
1698                 ret = map_bo_to_gpuvm(mem, entry, ctx.sync,
1699                                       is_invalid_userptr);
1700                 if (ret) {
1701                         pr_err("Failed to map bo to gpuvm\n");
1702                         goto out_unreserve;
1703                 }
1704
1705                 ret = vm_update_pds(avm, ctx.sync);
1706                 if (ret) {
1707                         pr_err("Failed to update page directories\n");
1708                         goto out_unreserve;
1709                 }
1710
1711                 entry->is_mapped = true;
1712                 mem->mapped_to_gpu_memory++;
1713                 pr_debug("\t INC mapping count %d\n",
1714                          mem->mapped_to_gpu_memory);
1715         }
1716
1717         if (!amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) && !bo->tbo.pin_count)
1718                 amdgpu_bo_fence(bo,
1719                                 &avm->process_info->eviction_fence->base,
1720                                 true);
1721         ret = unreserve_bo_and_vms(&ctx, false, false);
1722
1723         goto out;
1724
1725 out_unreserve:
1726         unreserve_bo_and_vms(&ctx, false, false);
1727 out:
1728         mutex_unlock(&mem->process_info->lock);
1729         mutex_unlock(&mem->lock);
1730         return ret;
1731 }
1732
1733 int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
1734                 struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv)
1735 {
1736         struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
1737         struct amdkfd_process_info *process_info = avm->process_info;
1738         unsigned long bo_size = mem->bo->tbo.base.size;
1739         struct kfd_mem_attachment *entry;
1740         struct bo_vm_reservation_context ctx;
1741         int ret;
1742
1743         mutex_lock(&mem->lock);
1744
1745         ret = reserve_bo_and_cond_vms(mem, avm, BO_VM_MAPPED, &ctx);
1746         if (unlikely(ret))
1747                 goto out;
1748         /* If no VMs were reserved, it means the BO wasn't actually mapped */
1749         if (ctx.n_vms == 0) {
1750                 ret = -EINVAL;
1751                 goto unreserve_out;
1752         }
1753
1754         ret = vm_validate_pt_pd_bos(avm);
1755         if (unlikely(ret))
1756                 goto unreserve_out;
1757
1758         pr_debug("Unmap VA 0x%llx - 0x%llx from vm %p\n",
1759                 mem->va,
1760                 mem->va + bo_size * (1 + mem->aql_queue),
1761                 avm);
1762
1763         list_for_each_entry(entry, &mem->attachments, list) {
1764                 if (entry->bo_va->base.vm != avm || !entry->is_mapped)
1765                         continue;
1766
1767                 pr_debug("\t unmap VA 0x%llx - 0x%llx from entry %p\n",
1768                          entry->va, entry->va + bo_size, entry);
1769
1770                 unmap_bo_from_gpuvm(mem, entry, ctx.sync);
1771                 entry->is_mapped = false;
1772
1773                 mem->mapped_to_gpu_memory--;
1774                 pr_debug("\t DEC mapping count %d\n",
1775                          mem->mapped_to_gpu_memory);
1776         }
1777
1778         /* If BO is unmapped from all VMs, unfence it. It can be evicted if
1779          * required.
1780          */
1781         if (mem->mapped_to_gpu_memory == 0 &&
1782             !amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm) &&
1783             !mem->bo->tbo.pin_count)
1784                 amdgpu_amdkfd_remove_eviction_fence(mem->bo,
1785                                                 process_info->eviction_fence);
1786
1787 unreserve_out:
1788         unreserve_bo_and_vms(&ctx, false, false);
1789 out:
1790         mutex_unlock(&mem->lock);
1791         return ret;
1792 }
1793
1794 int amdgpu_amdkfd_gpuvm_sync_memory(
1795                 struct kgd_dev *kgd, struct kgd_mem *mem, bool intr)
1796 {
1797         struct amdgpu_sync sync;
1798         int ret;
1799
1800         amdgpu_sync_create(&sync);
1801
1802         mutex_lock(&mem->lock);
1803         amdgpu_sync_clone(&mem->sync, &sync);
1804         mutex_unlock(&mem->lock);
1805
1806         ret = amdgpu_sync_wait(&sync, intr);
1807         amdgpu_sync_free(&sync);
1808         return ret;
1809 }
1810
1811 int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_dev *kgd,
1812                 struct kgd_mem *mem, void **kptr, uint64_t *size)
1813 {
1814         int ret;
1815         struct amdgpu_bo *bo = mem->bo;
1816
1817         if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
1818                 pr_err("userptr can't be mapped to kernel\n");
1819                 return -EINVAL;
1820         }
1821
1822         /* delete kgd_mem from kfd_bo_list to avoid re-validating
1823          * this BO in BO's restoring after eviction.
1824          */
1825         mutex_lock(&mem->process_info->lock);
1826
1827         ret = amdgpu_bo_reserve(bo, true);
1828         if (ret) {
1829                 pr_err("Failed to reserve bo. ret %d\n", ret);
1830                 goto bo_reserve_failed;
1831         }
1832
1833         ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
1834         if (ret) {
1835                 pr_err("Failed to pin bo. ret %d\n", ret);
1836                 goto pin_failed;
1837         }
1838
1839         ret = amdgpu_bo_kmap(bo, kptr);
1840         if (ret) {
1841                 pr_err("Failed to map bo to kernel. ret %d\n", ret);
1842                 goto kmap_failed;
1843         }
1844
1845         amdgpu_amdkfd_remove_eviction_fence(
1846                 bo, mem->process_info->eviction_fence);
1847         list_del_init(&mem->validate_list.head);
1848
1849         if (size)
1850                 *size = amdgpu_bo_size(bo);
1851
1852         amdgpu_bo_unreserve(bo);
1853
1854         mutex_unlock(&mem->process_info->lock);
1855         return 0;
1856
1857 kmap_failed:
1858         amdgpu_bo_unpin(bo);
1859 pin_failed:
1860         amdgpu_bo_unreserve(bo);
1861 bo_reserve_failed:
1862         mutex_unlock(&mem->process_info->lock);
1863
1864         return ret;
1865 }
1866
1867 int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct kgd_dev *kgd,
1868                                               struct kfd_vm_fault_info *mem)
1869 {
1870         struct amdgpu_device *adev;
1871
1872         adev = (struct amdgpu_device *)kgd;
1873         if (atomic_read(&adev->gmc.vm_fault_info_updated) == 1) {
1874                 *mem = *adev->gmc.vm_fault_info;
1875                 mb();
1876                 atomic_set(&adev->gmc.vm_fault_info_updated, 0);
1877         }
1878         return 0;
1879 }
1880
1881 int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd,
1882                                       struct dma_buf *dma_buf,
1883                                       uint64_t va, void *drm_priv,
1884                                       struct kgd_mem **mem, uint64_t *size,
1885                                       uint64_t *mmap_offset)
1886 {
1887         struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
1888         struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
1889         struct drm_gem_object *obj;
1890         struct amdgpu_bo *bo;
1891         int ret;
1892
1893         if (dma_buf->ops != &amdgpu_dmabuf_ops)
1894                 /* Can't handle non-graphics buffers */
1895                 return -EINVAL;
1896
1897         obj = dma_buf->priv;
1898         if (drm_to_adev(obj->dev) != adev)
1899                 /* Can't handle buffers from other devices */
1900                 return -EINVAL;
1901
1902         bo = gem_to_amdgpu_bo(obj);
1903         if (!(bo->preferred_domains & (AMDGPU_GEM_DOMAIN_VRAM |
1904                                     AMDGPU_GEM_DOMAIN_GTT)))
1905                 /* Only VRAM and GTT BOs are supported */
1906                 return -EINVAL;
1907
1908         *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
1909         if (!*mem)
1910                 return -ENOMEM;
1911
1912         ret = drm_vma_node_allow(&obj->vma_node, drm_priv);
1913         if (ret) {
1914                 kfree(mem);
1915                 return ret;
1916         }
1917
1918         if (size)
1919                 *size = amdgpu_bo_size(bo);
1920
1921         if (mmap_offset)
1922                 *mmap_offset = amdgpu_bo_mmap_offset(bo);
1923
1924         INIT_LIST_HEAD(&(*mem)->attachments);
1925         mutex_init(&(*mem)->lock);
1926
1927         (*mem)->alloc_flags =
1928                 ((bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ?
1929                 KFD_IOC_ALLOC_MEM_FLAGS_VRAM : KFD_IOC_ALLOC_MEM_FLAGS_GTT)
1930                 | KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE
1931                 | KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE;
1932
1933         drm_gem_object_get(&bo->tbo.base);
1934         (*mem)->bo = bo;
1935         (*mem)->va = va;
1936         (*mem)->domain = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ?
1937                 AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT;
1938         (*mem)->mapped_to_gpu_memory = 0;
1939         (*mem)->process_info = avm->process_info;
1940         add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, false);
1941         amdgpu_sync_create(&(*mem)->sync);
1942         (*mem)->is_imported = true;
1943
1944         return 0;
1945 }
1946
1947 /* Evict a userptr BO by stopping the queues if necessary
1948  *
1949  * Runs in MMU notifier, may be in RECLAIM_FS context. This means it
1950  * cannot do any memory allocations, and cannot take any locks that
1951  * are held elsewhere while allocating memory. Therefore this is as
1952  * simple as possible, using atomic counters.
1953  *
1954  * It doesn't do anything to the BO itself. The real work happens in
1955  * restore, where we get updated page addresses. This function only
1956  * ensures that GPU access to the BO is stopped.
1957  */
1958 int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem,
1959                                 struct mm_struct *mm)
1960 {
1961         struct amdkfd_process_info *process_info = mem->process_info;
1962         int evicted_bos;
1963         int r = 0;
1964
1965         atomic_inc(&mem->invalid);
1966         evicted_bos = atomic_inc_return(&process_info->evicted_bos);
1967         if (evicted_bos == 1) {
1968                 /* First eviction, stop the queues */
1969                 r = kgd2kfd_quiesce_mm(mm);
1970                 if (r)
1971                         pr_err("Failed to quiesce KFD\n");
1972                 schedule_delayed_work(&process_info->restore_userptr_work,
1973                         msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS));
1974         }
1975
1976         return r;
1977 }
1978
1979 /* Update invalid userptr BOs
1980  *
1981  * Moves invalidated (evicted) userptr BOs from userptr_valid_list to
1982  * userptr_inval_list and updates user pages for all BOs that have
1983  * been invalidated since their last update.
1984  */
1985 static int update_invalid_user_pages(struct amdkfd_process_info *process_info,
1986                                      struct mm_struct *mm)
1987 {
1988         struct kgd_mem *mem, *tmp_mem;
1989         struct amdgpu_bo *bo;
1990         struct ttm_operation_ctx ctx = { false, false };
1991         int invalid, ret;
1992
1993         /* Move all invalidated BOs to the userptr_inval_list and
1994          * release their user pages by migration to the CPU domain
1995          */
1996         list_for_each_entry_safe(mem, tmp_mem,
1997                                  &process_info->userptr_valid_list,
1998                                  validate_list.head) {
1999                 if (!atomic_read(&mem->invalid))
2000                         continue; /* BO is still valid */
2001
2002                 bo = mem->bo;
2003
2004                 if (amdgpu_bo_reserve(bo, true))
2005                         return -EAGAIN;
2006                 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
2007                 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
2008                 amdgpu_bo_unreserve(bo);
2009                 if (ret) {
2010                         pr_err("%s: Failed to invalidate userptr BO\n",
2011                                __func__);
2012                         return -EAGAIN;
2013                 }
2014
2015                 list_move_tail(&mem->validate_list.head,
2016                                &process_info->userptr_inval_list);
2017         }
2018
2019         if (list_empty(&process_info->userptr_inval_list))
2020                 return 0; /* All evicted userptr BOs were freed */
2021
2022         /* Go through userptr_inval_list and update any invalid user_pages */
2023         list_for_each_entry(mem, &process_info->userptr_inval_list,
2024                             validate_list.head) {
2025                 invalid = atomic_read(&mem->invalid);
2026                 if (!invalid)
2027                         /* BO hasn't been invalidated since the last
2028                          * revalidation attempt. Keep its BO list.
2029                          */
2030                         continue;
2031
2032                 bo = mem->bo;
2033
2034                 /* Get updated user pages */
2035                 ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages);
2036                 if (ret) {
2037                         pr_debug("%s: Failed to get user pages: %d\n",
2038                                 __func__, ret);
2039
2040                         /* Return error -EBUSY or -ENOMEM, retry restore */
2041                         return ret;
2042                 }
2043
2044                 /*
2045                  * FIXME: Cannot ignore the return code, must hold
2046                  * notifier_lock
2047                  */
2048                 amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
2049
2050                 /* Mark the BO as valid unless it was invalidated
2051                  * again concurrently.
2052                  */
2053                 if (atomic_cmpxchg(&mem->invalid, invalid, 0) != invalid)
2054                         return -EAGAIN;
2055         }
2056
2057         return 0;
2058 }
2059
2060 /* Validate invalid userptr BOs
2061  *
2062  * Validates BOs on the userptr_inval_list, and moves them back to the
2063  * userptr_valid_list. Also updates GPUVM page tables with new page
2064  * addresses and waits for the page table updates to complete.
2065  */
2066 static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
2067 {
2068         struct amdgpu_bo_list_entry *pd_bo_list_entries;
2069         struct list_head resv_list, duplicates;
2070         struct ww_acquire_ctx ticket;
2071         struct amdgpu_sync sync;
2072
2073         struct amdgpu_vm *peer_vm;
2074         struct kgd_mem *mem, *tmp_mem;
2075         struct amdgpu_bo *bo;
2076         struct ttm_operation_ctx ctx = { false, false };
2077         int i, ret;
2078
2079         pd_bo_list_entries = kcalloc(process_info->n_vms,
2080                                      sizeof(struct amdgpu_bo_list_entry),
2081                                      GFP_KERNEL);
2082         if (!pd_bo_list_entries) {
2083                 pr_err("%s: Failed to allocate PD BO list entries\n", __func__);
2084                 ret = -ENOMEM;
2085                 goto out_no_mem;
2086         }
2087
2088         INIT_LIST_HEAD(&resv_list);
2089         INIT_LIST_HEAD(&duplicates);
2090
2091         /* Get all the page directory BOs that need to be reserved */
2092         i = 0;
2093         list_for_each_entry(peer_vm, &process_info->vm_list_head,
2094                             vm_list_node)
2095                 amdgpu_vm_get_pd_bo(peer_vm, &resv_list,
2096                                     &pd_bo_list_entries[i++]);
2097         /* Add the userptr_inval_list entries to resv_list */
2098         list_for_each_entry(mem, &process_info->userptr_inval_list,
2099                             validate_list.head) {
2100                 list_add_tail(&mem->resv_list.head, &resv_list);
2101                 mem->resv_list.bo = mem->validate_list.bo;
2102                 mem->resv_list.num_shared = mem->validate_list.num_shared;
2103         }
2104
2105         /* Reserve all BOs and page tables for validation */
2106         ret = ttm_eu_reserve_buffers(&ticket, &resv_list, false, &duplicates);
2107         WARN(!list_empty(&duplicates), "Duplicates should be empty");
2108         if (ret)
2109                 goto out_free;
2110
2111         amdgpu_sync_create(&sync);
2112
2113         ret = process_validate_vms(process_info);
2114         if (ret)
2115                 goto unreserve_out;
2116
2117         /* Validate BOs and update GPUVM page tables */
2118         list_for_each_entry_safe(mem, tmp_mem,
2119                                  &process_info->userptr_inval_list,
2120                                  validate_list.head) {
2121                 struct kfd_mem_attachment *attachment;
2122
2123                 bo = mem->bo;
2124
2125                 /* Validate the BO if we got user pages */
2126                 if (bo->tbo.ttm->pages[0]) {
2127                         amdgpu_bo_placement_from_domain(bo, mem->domain);
2128                         ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
2129                         if (ret) {
2130                                 pr_err("%s: failed to validate BO\n", __func__);
2131                                 goto unreserve_out;
2132                         }
2133                 }
2134
2135                 list_move_tail(&mem->validate_list.head,
2136                                &process_info->userptr_valid_list);
2137
2138                 /* Update mapping. If the BO was not validated
2139                  * (because we couldn't get user pages), this will
2140                  * clear the page table entries, which will result in
2141                  * VM faults if the GPU tries to access the invalid
2142                  * memory.
2143                  */
2144                 list_for_each_entry(attachment, &mem->attachments, list) {
2145                         if (!attachment->is_mapped)
2146                                 continue;
2147
2148                         kfd_mem_dmaunmap_attachment(mem, attachment);
2149                         ret = update_gpuvm_pte(mem, attachment, &sync);
2150                         if (ret) {
2151                                 pr_err("%s: update PTE failed\n", __func__);
2152                                 /* make sure this gets validated again */
2153                                 atomic_inc(&mem->invalid);
2154                                 goto unreserve_out;
2155                         }
2156                 }
2157         }
2158
2159         /* Update page directories */
2160         ret = process_update_pds(process_info, &sync);
2161
2162 unreserve_out:
2163         ttm_eu_backoff_reservation(&ticket, &resv_list);
2164         amdgpu_sync_wait(&sync, false);
2165         amdgpu_sync_free(&sync);
2166 out_free:
2167         kfree(pd_bo_list_entries);
2168 out_no_mem:
2169
2170         return ret;
2171 }
2172
2173 /* Worker callback to restore evicted userptr BOs
2174  *
2175  * Tries to update and validate all userptr BOs. If successful and no
2176  * concurrent evictions happened, the queues are restarted. Otherwise,
2177  * reschedule for another attempt later.
2178  */
2179 static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work)
2180 {
2181         struct delayed_work *dwork = to_delayed_work(work);
2182         struct amdkfd_process_info *process_info =
2183                 container_of(dwork, struct amdkfd_process_info,
2184                              restore_userptr_work);
2185         struct task_struct *usertask;
2186         struct mm_struct *mm;
2187         int evicted_bos;
2188
2189         evicted_bos = atomic_read(&process_info->evicted_bos);
2190         if (!evicted_bos)
2191                 return;
2192
2193         /* Reference task and mm in case of concurrent process termination */
2194         usertask = get_pid_task(process_info->pid, PIDTYPE_PID);
2195         if (!usertask)
2196                 return;
2197         mm = get_task_mm(usertask);
2198         if (!mm) {
2199                 put_task_struct(usertask);
2200                 return;
2201         }
2202
2203         mutex_lock(&process_info->lock);
2204
2205         if (update_invalid_user_pages(process_info, mm))
2206                 goto unlock_out;
2207         /* userptr_inval_list can be empty if all evicted userptr BOs
2208          * have been freed. In that case there is nothing to validate
2209          * and we can just restart the queues.
2210          */
2211         if (!list_empty(&process_info->userptr_inval_list)) {
2212                 if (atomic_read(&process_info->evicted_bos) != evicted_bos)
2213                         goto unlock_out; /* Concurrent eviction, try again */
2214
2215                 if (validate_invalid_user_pages(process_info))
2216                         goto unlock_out;
2217         }
2218         /* Final check for concurrent evicton and atomic update. If
2219          * another eviction happens after successful update, it will
2220          * be a first eviction that calls quiesce_mm. The eviction
2221          * reference counting inside KFD will handle this case.
2222          */
2223         if (atomic_cmpxchg(&process_info->evicted_bos, evicted_bos, 0) !=
2224             evicted_bos)
2225                 goto unlock_out;
2226         evicted_bos = 0;
2227         if (kgd2kfd_resume_mm(mm)) {
2228                 pr_err("%s: Failed to resume KFD\n", __func__);
2229                 /* No recovery from this failure. Probably the CP is
2230                  * hanging. No point trying again.
2231                  */
2232         }
2233
2234 unlock_out:
2235         mutex_unlock(&process_info->lock);
2236         mmput(mm);
2237         put_task_struct(usertask);
2238
2239         /* If validation failed, reschedule another attempt */
2240         if (evicted_bos)
2241                 schedule_delayed_work(&process_info->restore_userptr_work,
2242                         msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS));
2243 }
2244
2245 /** amdgpu_amdkfd_gpuvm_restore_process_bos - Restore all BOs for the given
2246  *   KFD process identified by process_info
2247  *
2248  * @process_info: amdkfd_process_info of the KFD process
2249  *
2250  * After memory eviction, restore thread calls this function. The function
2251  * should be called when the Process is still valid. BO restore involves -
2252  *
2253  * 1.  Release old eviction fence and create new one
2254  * 2.  Get two copies of PD BO list from all the VMs. Keep one copy as pd_list.
2255  * 3   Use the second PD list and kfd_bo_list to create a list (ctx.list) of
2256  *     BOs that need to be reserved.
2257  * 4.  Reserve all the BOs
2258  * 5.  Validate of PD and PT BOs.
2259  * 6.  Validate all KFD BOs using kfd_bo_list and Map them and add new fence
2260  * 7.  Add fence to all PD and PT BOs.
2261  * 8.  Unreserve all BOs
2262  */
2263 int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
2264 {
2265         struct amdgpu_bo_list_entry *pd_bo_list;
2266         struct amdkfd_process_info *process_info = info;
2267         struct amdgpu_vm *peer_vm;
2268         struct kgd_mem *mem;
2269         struct bo_vm_reservation_context ctx;
2270         struct amdgpu_amdkfd_fence *new_fence;
2271         int ret = 0, i;
2272         struct list_head duplicate_save;
2273         struct amdgpu_sync sync_obj;
2274         unsigned long failed_size = 0;
2275         unsigned long total_size = 0;
2276
2277         INIT_LIST_HEAD(&duplicate_save);
2278         INIT_LIST_HEAD(&ctx.list);
2279         INIT_LIST_HEAD(&ctx.duplicates);
2280
2281         pd_bo_list = kcalloc(process_info->n_vms,
2282                              sizeof(struct amdgpu_bo_list_entry),
2283                              GFP_KERNEL);
2284         if (!pd_bo_list)
2285                 return -ENOMEM;
2286
2287         i = 0;
2288         mutex_lock(&process_info->lock);
2289         list_for_each_entry(peer_vm, &process_info->vm_list_head,
2290                         vm_list_node)
2291                 amdgpu_vm_get_pd_bo(peer_vm, &ctx.list, &pd_bo_list[i++]);
2292
2293         /* Reserve all BOs and page tables/directory. Add all BOs from
2294          * kfd_bo_list to ctx.list
2295          */
2296         list_for_each_entry(mem, &process_info->kfd_bo_list,
2297                             validate_list.head) {
2298
2299                 list_add_tail(&mem->resv_list.head, &ctx.list);
2300                 mem->resv_list.bo = mem->validate_list.bo;
2301                 mem->resv_list.num_shared = mem->validate_list.num_shared;
2302         }
2303
2304         ret = ttm_eu_reserve_buffers(&ctx.ticket, &ctx.list,
2305                                      false, &duplicate_save);
2306         if (ret) {
2307                 pr_debug("Memory eviction: TTM Reserve Failed. Try again\n");
2308                 goto ttm_reserve_fail;
2309         }
2310
2311         amdgpu_sync_create(&sync_obj);
2312
2313         /* Validate PDs and PTs */
2314         ret = process_validate_vms(process_info);
2315         if (ret)
2316                 goto validate_map_fail;
2317
2318         ret = process_sync_pds_resv(process_info, &sync_obj);
2319         if (ret) {
2320                 pr_debug("Memory eviction: Failed to sync to PD BO moving fence. Try again\n");
2321                 goto validate_map_fail;
2322         }
2323
2324         /* Validate BOs and map them to GPUVM (update VM page tables). */
2325         list_for_each_entry(mem, &process_info->kfd_bo_list,
2326                             validate_list.head) {
2327
2328                 struct amdgpu_bo *bo = mem->bo;
2329                 uint32_t domain = mem->domain;
2330                 struct kfd_mem_attachment *attachment;
2331
2332                 total_size += amdgpu_bo_size(bo);
2333
2334                 ret = amdgpu_amdkfd_bo_validate(bo, domain, false);
2335                 if (ret) {
2336                         pr_debug("Memory eviction: Validate BOs failed\n");
2337                         failed_size += amdgpu_bo_size(bo);
2338                         ret = amdgpu_amdkfd_bo_validate(bo,
2339                                                 AMDGPU_GEM_DOMAIN_GTT, false);
2340                         if (ret) {
2341                                 pr_debug("Memory eviction: Try again\n");
2342                                 goto validate_map_fail;
2343                         }
2344                 }
2345                 ret = amdgpu_sync_fence(&sync_obj, bo->tbo.moving);
2346                 if (ret) {
2347                         pr_debug("Memory eviction: Sync BO fence failed. Try again\n");
2348                         goto validate_map_fail;
2349                 }
2350                 list_for_each_entry(attachment, &mem->attachments, list) {
2351                         if (!attachment->is_mapped)
2352                                 continue;
2353
2354                         kfd_mem_dmaunmap_attachment(mem, attachment);
2355                         ret = update_gpuvm_pte(mem, attachment, &sync_obj);
2356                         if (ret) {
2357                                 pr_debug("Memory eviction: update PTE failed. Try again\n");
2358                                 goto validate_map_fail;
2359                         }
2360                 }
2361         }
2362
2363         if (failed_size)
2364                 pr_debug("0x%lx/0x%lx in system\n", failed_size, total_size);
2365
2366         /* Update page directories */
2367         ret = process_update_pds(process_info, &sync_obj);
2368         if (ret) {
2369                 pr_debug("Memory eviction: update PDs failed. Try again\n");
2370                 goto validate_map_fail;
2371         }
2372
2373         /* Wait for validate and PT updates to finish */
2374         amdgpu_sync_wait(&sync_obj, false);
2375
2376         /* Release old eviction fence and create new one, because fence only
2377          * goes from unsignaled to signaled, fence cannot be reused.
2378          * Use context and mm from the old fence.
2379          */
2380         new_fence = amdgpu_amdkfd_fence_create(
2381                                 process_info->eviction_fence->base.context,
2382                                 process_info->eviction_fence->mm,
2383                                 NULL);
2384         if (!new_fence) {
2385                 pr_err("Failed to create eviction fence\n");
2386                 ret = -ENOMEM;
2387                 goto validate_map_fail;
2388         }
2389         dma_fence_put(&process_info->eviction_fence->base);
2390         process_info->eviction_fence = new_fence;
2391         *ef = dma_fence_get(&new_fence->base);
2392
2393         /* Attach new eviction fence to all BOs */
2394         list_for_each_entry(mem, &process_info->kfd_bo_list,
2395                 validate_list.head)
2396                 amdgpu_bo_fence(mem->bo,
2397                         &process_info->eviction_fence->base, true);
2398
2399         /* Attach eviction fence to PD / PT BOs */
2400         list_for_each_entry(peer_vm, &process_info->vm_list_head,
2401                             vm_list_node) {
2402                 struct amdgpu_bo *bo = peer_vm->root.base.bo;
2403
2404                 amdgpu_bo_fence(bo, &process_info->eviction_fence->base, true);
2405         }
2406
2407 validate_map_fail:
2408         ttm_eu_backoff_reservation(&ctx.ticket, &ctx.list);
2409         amdgpu_sync_free(&sync_obj);
2410 ttm_reserve_fail:
2411         mutex_unlock(&process_info->lock);
2412         kfree(pd_bo_list);
2413         return ret;
2414 }
2415
2416 int amdgpu_amdkfd_add_gws_to_process(void *info, void *gws, struct kgd_mem **mem)
2417 {
2418         struct amdkfd_process_info *process_info = (struct amdkfd_process_info *)info;
2419         struct amdgpu_bo *gws_bo = (struct amdgpu_bo *)gws;
2420         int ret;
2421
2422         if (!info || !gws)
2423                 return -EINVAL;
2424
2425         *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
2426         if (!*mem)
2427                 return -ENOMEM;
2428
2429         mutex_init(&(*mem)->lock);
2430         INIT_LIST_HEAD(&(*mem)->attachments);
2431         (*mem)->bo = amdgpu_bo_ref(gws_bo);
2432         (*mem)->domain = AMDGPU_GEM_DOMAIN_GWS;
2433         (*mem)->process_info = process_info;
2434         add_kgd_mem_to_kfd_bo_list(*mem, process_info, false);
2435         amdgpu_sync_create(&(*mem)->sync);
2436
2437
2438         /* Validate gws bo the first time it is added to process */
2439         mutex_lock(&(*mem)->process_info->lock);
2440         ret = amdgpu_bo_reserve(gws_bo, false);
2441         if (unlikely(ret)) {
2442                 pr_err("Reserve gws bo failed %d\n", ret);
2443                 goto bo_reservation_failure;
2444         }
2445
2446         ret = amdgpu_amdkfd_bo_validate(gws_bo, AMDGPU_GEM_DOMAIN_GWS, true);
2447         if (ret) {
2448                 pr_err("GWS BO validate failed %d\n", ret);
2449                 goto bo_validation_failure;
2450         }
2451         /* GWS resource is shared b/t amdgpu and amdkfd
2452          * Add process eviction fence to bo so they can
2453          * evict each other.
2454          */
2455         ret = dma_resv_reserve_shared(gws_bo->tbo.base.resv, 1);
2456         if (ret)
2457                 goto reserve_shared_fail;
2458         amdgpu_bo_fence(gws_bo, &process_info->eviction_fence->base, true);
2459         amdgpu_bo_unreserve(gws_bo);
2460         mutex_unlock(&(*mem)->process_info->lock);
2461
2462         return ret;
2463
2464 reserve_shared_fail:
2465 bo_validation_failure:
2466         amdgpu_bo_unreserve(gws_bo);
2467 bo_reservation_failure:
2468         mutex_unlock(&(*mem)->process_info->lock);
2469         amdgpu_sync_free(&(*mem)->sync);
2470         remove_kgd_mem_from_kfd_bo_list(*mem, process_info);
2471         amdgpu_bo_unref(&gws_bo);
2472         mutex_destroy(&(*mem)->lock);
2473         kfree(*mem);
2474         *mem = NULL;
2475         return ret;
2476 }
2477
2478 int amdgpu_amdkfd_remove_gws_from_process(void *info, void *mem)
2479 {
2480         int ret;
2481         struct amdkfd_process_info *process_info = (struct amdkfd_process_info *)info;
2482         struct kgd_mem *kgd_mem = (struct kgd_mem *)mem;
2483         struct amdgpu_bo *gws_bo = kgd_mem->bo;
2484
2485         /* Remove BO from process's validate list so restore worker won't touch
2486          * it anymore
2487          */
2488         remove_kgd_mem_from_kfd_bo_list(kgd_mem, process_info);
2489
2490         ret = amdgpu_bo_reserve(gws_bo, false);
2491         if (unlikely(ret)) {
2492                 pr_err("Reserve gws bo failed %d\n", ret);
2493                 //TODO add BO back to validate_list?
2494                 return ret;
2495         }
2496         amdgpu_amdkfd_remove_eviction_fence(gws_bo,
2497                         process_info->eviction_fence);
2498         amdgpu_bo_unreserve(gws_bo);
2499         amdgpu_sync_free(&kgd_mem->sync);
2500         amdgpu_bo_unref(&gws_bo);
2501         mutex_destroy(&kgd_mem->lock);
2502         kfree(mem);
2503         return 0;
2504 }
2505
2506 /* Returns GPU-specific tiling mode information */
2507 int amdgpu_amdkfd_get_tile_config(struct kgd_dev *kgd,
2508                                 struct tile_config *config)
2509 {
2510         struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
2511
2512         config->gb_addr_config = adev->gfx.config.gb_addr_config;
2513         config->tile_config_ptr = adev->gfx.config.tile_mode_array;
2514         config->num_tile_configs =
2515                         ARRAY_SIZE(adev->gfx.config.tile_mode_array);
2516         config->macro_tile_config_ptr =
2517                         adev->gfx.config.macrotile_mode_array;
2518         config->num_macro_tile_configs =
2519                         ARRAY_SIZE(adev->gfx.config.macrotile_mode_array);
2520
2521         /* Those values are not set from GFX9 onwards */
2522         config->num_banks = adev->gfx.config.num_banks;
2523         config->num_ranks = adev->gfx.config.num_ranks;
2524
2525         return 0;
2526 }