drm/amdkfd: Introduce kfd_node struct (v5)
[linux-2.6-microblaze.git] / drivers / gpu / drm / amd / amdkfd / kfd_migrate.c
1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /*
3  * Copyright 2020-2021 Advanced Micro Devices, Inc.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21  * OTHER DEALINGS IN THE SOFTWARE.
22  */
23 #include <linux/types.h>
24 #include <linux/hmm.h>
25 #include <linux/dma-direction.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/migrate.h>
28 #include "amdgpu_sync.h"
29 #include "amdgpu_object.h"
30 #include "amdgpu_vm.h"
31 #include "amdgpu_res_cursor.h"
32 #include "kfd_priv.h"
33 #include "kfd_svm.h"
34 #include "kfd_migrate.h"
35 #include "kfd_smi_events.h"
36
37 #ifdef dev_fmt
38 #undef dev_fmt
39 #endif
40 #define dev_fmt(fmt) "kfd_migrate: " fmt
41
42 static uint64_t
43 svm_migrate_direct_mapping_addr(struct amdgpu_device *adev, uint64_t addr)
44 {
45         return addr + amdgpu_ttm_domain_start(adev, TTM_PL_VRAM);
46 }
47
48 static int
49 svm_migrate_gart_map(struct amdgpu_ring *ring, uint64_t npages,
50                      dma_addr_t *addr, uint64_t *gart_addr, uint64_t flags)
51 {
52         struct amdgpu_device *adev = ring->adev;
53         struct amdgpu_job *job;
54         unsigned int num_dw, num_bytes;
55         struct dma_fence *fence;
56         uint64_t src_addr, dst_addr;
57         uint64_t pte_flags;
58         void *cpu_addr;
59         int r;
60
61         /* use gart window 0 */
62         *gart_addr = adev->gmc.gart_start;
63
64         num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8);
65         num_bytes = npages * 8;
66
67         r = amdgpu_job_alloc_with_ib(adev, &adev->mman.entity,
68                                      AMDGPU_FENCE_OWNER_UNDEFINED,
69                                      num_dw * 4 + num_bytes,
70                                      AMDGPU_IB_POOL_DELAYED,
71                                      &job);
72         if (r)
73                 return r;
74
75         src_addr = num_dw * 4;
76         src_addr += job->ibs[0].gpu_addr;
77
78         dst_addr = amdgpu_bo_gpu_offset(adev->gart.bo);
79         amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr,
80                                 dst_addr, num_bytes, false);
81
82         amdgpu_ring_pad_ib(ring, &job->ibs[0]);
83         WARN_ON(job->ibs[0].length_dw > num_dw);
84
85         pte_flags = AMDGPU_PTE_VALID | AMDGPU_PTE_READABLE;
86         pte_flags |= AMDGPU_PTE_SYSTEM | AMDGPU_PTE_SNOOPED;
87         if (!(flags & KFD_IOCTL_SVM_FLAG_GPU_RO))
88                 pte_flags |= AMDGPU_PTE_WRITEABLE;
89         pte_flags |= adev->gart.gart_pte_flags;
90
91         cpu_addr = &job->ibs[0].ptr[num_dw];
92
93         amdgpu_gart_map(adev, 0, npages, addr, pte_flags, cpu_addr);
94         fence = amdgpu_job_submit(job);
95         dma_fence_put(fence);
96
97         return r;
98 }
99
100 /**
101  * svm_migrate_copy_memory_gart - sdma copy data between ram and vram
102  *
103  * @adev: amdgpu device the sdma ring running
104  * @sys: system DMA pointer to be copied
105  * @vram: vram destination DMA pointer
106  * @npages: number of pages to copy
107  * @direction: enum MIGRATION_COPY_DIR
108  * @mfence: output, sdma fence to signal after sdma is done
109  *
110  * ram address uses GART table continuous entries mapping to ram pages,
111  * vram address uses direct mapping of vram pages, which must have npages
112  * number of continuous pages.
113  * GART update and sdma uses same buf copy function ring, sdma is splited to
114  * multiple GTT_MAX_PAGES transfer, all sdma operations are serialized, wait for
115  * the last sdma finish fence which is returned to check copy memory is done.
116  *
117  * Context: Process context, takes and releases gtt_window_lock
118  *
119  * Return:
120  * 0 - OK, otherwise error code
121  */
122
123 static int
124 svm_migrate_copy_memory_gart(struct amdgpu_device *adev, dma_addr_t *sys,
125                              uint64_t *vram, uint64_t npages,
126                              enum MIGRATION_COPY_DIR direction,
127                              struct dma_fence **mfence)
128 {
129         const uint64_t GTT_MAX_PAGES = AMDGPU_GTT_MAX_TRANSFER_SIZE;
130         struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
131         uint64_t gart_s, gart_d;
132         struct dma_fence *next;
133         uint64_t size;
134         int r;
135
136         mutex_lock(&adev->mman.gtt_window_lock);
137
138         while (npages) {
139                 size = min(GTT_MAX_PAGES, npages);
140
141                 if (direction == FROM_VRAM_TO_RAM) {
142                         gart_s = svm_migrate_direct_mapping_addr(adev, *vram);
143                         r = svm_migrate_gart_map(ring, size, sys, &gart_d, 0);
144
145                 } else if (direction == FROM_RAM_TO_VRAM) {
146                         r = svm_migrate_gart_map(ring, size, sys, &gart_s,
147                                                  KFD_IOCTL_SVM_FLAG_GPU_RO);
148                         gart_d = svm_migrate_direct_mapping_addr(adev, *vram);
149                 }
150                 if (r) {
151                         dev_err(adev->dev, "fail %d create gart mapping\n", r);
152                         goto out_unlock;
153                 }
154
155                 r = amdgpu_copy_buffer(ring, gart_s, gart_d, size * PAGE_SIZE,
156                                        NULL, &next, false, true, false);
157                 if (r) {
158                         dev_err(adev->dev, "fail %d to copy memory\n", r);
159                         goto out_unlock;
160                 }
161
162                 dma_fence_put(*mfence);
163                 *mfence = next;
164                 npages -= size;
165                 if (npages) {
166                         sys += size;
167                         vram += size;
168                 }
169         }
170
171 out_unlock:
172         mutex_unlock(&adev->mman.gtt_window_lock);
173
174         return r;
175 }
176
177 /**
178  * svm_migrate_copy_done - wait for memory copy sdma is done
179  *
180  * @adev: amdgpu device the sdma memory copy is executing on
181  * @mfence: migrate fence
182  *
183  * Wait for dma fence is signaled, if the copy ssplit into multiple sdma
184  * operations, this is the last sdma operation fence.
185  *
186  * Context: called after svm_migrate_copy_memory
187  *
188  * Return:
189  * 0            - success
190  * otherwise    - error code from dma fence signal
191  */
192 static int
193 svm_migrate_copy_done(struct amdgpu_device *adev, struct dma_fence *mfence)
194 {
195         int r = 0;
196
197         if (mfence) {
198                 r = dma_fence_wait(mfence, false);
199                 dma_fence_put(mfence);
200                 pr_debug("sdma copy memory fence done\n");
201         }
202
203         return r;
204 }
205
206 unsigned long
207 svm_migrate_addr_to_pfn(struct amdgpu_device *adev, unsigned long addr)
208 {
209         return (addr + adev->kfd.dev->pgmap.range.start) >> PAGE_SHIFT;
210 }
211
212 static void
213 svm_migrate_get_vram_page(struct svm_range *prange, unsigned long pfn)
214 {
215         struct page *page;
216
217         page = pfn_to_page(pfn);
218         svm_range_bo_ref(prange->svm_bo);
219         page->zone_device_data = prange->svm_bo;
220         zone_device_page_init(page);
221 }
222
223 static void
224 svm_migrate_put_vram_page(struct amdgpu_device *adev, unsigned long addr)
225 {
226         struct page *page;
227
228         page = pfn_to_page(svm_migrate_addr_to_pfn(adev, addr));
229         unlock_page(page);
230         put_page(page);
231 }
232
233 static unsigned long
234 svm_migrate_addr(struct amdgpu_device *adev, struct page *page)
235 {
236         unsigned long addr;
237
238         addr = page_to_pfn(page) << PAGE_SHIFT;
239         return (addr - adev->kfd.dev->pgmap.range.start);
240 }
241
242 static struct page *
243 svm_migrate_get_sys_page(struct vm_area_struct *vma, unsigned long addr)
244 {
245         struct page *page;
246
247         page = alloc_page_vma(GFP_HIGHUSER, vma, addr);
248         if (page)
249                 lock_page(page);
250
251         return page;
252 }
253
254 static void svm_migrate_put_sys_page(unsigned long addr)
255 {
256         struct page *page;
257
258         page = pfn_to_page(addr >> PAGE_SHIFT);
259         unlock_page(page);
260         put_page(page);
261 }
262
263 static unsigned long svm_migrate_successful_pages(struct migrate_vma *migrate)
264 {
265         unsigned long cpages = 0;
266         unsigned long i;
267
268         for (i = 0; i < migrate->npages; i++) {
269                 if (migrate->src[i] & MIGRATE_PFN_VALID &&
270                     migrate->src[i] & MIGRATE_PFN_MIGRATE)
271                         cpages++;
272         }
273         return cpages;
274 }
275
276 static unsigned long svm_migrate_unsuccessful_pages(struct migrate_vma *migrate)
277 {
278         unsigned long upages = 0;
279         unsigned long i;
280
281         for (i = 0; i < migrate->npages; i++) {
282                 if (migrate->src[i] & MIGRATE_PFN_VALID &&
283                     !(migrate->src[i] & MIGRATE_PFN_MIGRATE))
284                         upages++;
285         }
286         return upages;
287 }
288
289 static int
290 svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
291                          struct migrate_vma *migrate, struct dma_fence **mfence,
292                          dma_addr_t *scratch, uint64_t ttm_res_offset)
293 {
294         uint64_t npages = migrate->npages;
295         struct device *dev = adev->dev;
296         struct amdgpu_res_cursor cursor;
297         dma_addr_t *src;
298         uint64_t *dst;
299         uint64_t i, j;
300         int r;
301
302         pr_debug("svms 0x%p [0x%lx 0x%lx 0x%llx]\n", prange->svms, prange->start,
303                  prange->last, ttm_res_offset);
304
305         src = scratch;
306         dst = (uint64_t *)(scratch + npages);
307
308         amdgpu_res_first(prange->ttm_res, ttm_res_offset,
309                          npages << PAGE_SHIFT, &cursor);
310         for (i = j = 0; i < npages; i++) {
311                 struct page *spage;
312
313                 dst[i] = cursor.start + (j << PAGE_SHIFT);
314                 migrate->dst[i] = svm_migrate_addr_to_pfn(adev, dst[i]);
315                 svm_migrate_get_vram_page(prange, migrate->dst[i]);
316                 migrate->dst[i] = migrate_pfn(migrate->dst[i]);
317
318                 spage = migrate_pfn_to_page(migrate->src[i]);
319                 if (spage && !is_zone_device_page(spage)) {
320                         src[i] = dma_map_page(dev, spage, 0, PAGE_SIZE,
321                                               DMA_TO_DEVICE);
322                         r = dma_mapping_error(dev, src[i]);
323                         if (r) {
324                                 dev_err(adev->dev, "%s: fail %d dma_map_page\n",
325                                         __func__, r);
326                                 goto out_free_vram_pages;
327                         }
328                 } else {
329                         if (j) {
330                                 r = svm_migrate_copy_memory_gart(
331                                                 adev, src + i - j,
332                                                 dst + i - j, j,
333                                                 FROM_RAM_TO_VRAM,
334                                                 mfence);
335                                 if (r)
336                                         goto out_free_vram_pages;
337                                 amdgpu_res_next(&cursor, (j + 1) << PAGE_SHIFT);
338                                 j = 0;
339                         } else {
340                                 amdgpu_res_next(&cursor, PAGE_SIZE);
341                         }
342                         continue;
343                 }
344
345                 pr_debug_ratelimited("dma mapping src to 0x%llx, pfn 0x%lx\n",
346                                      src[i] >> PAGE_SHIFT, page_to_pfn(spage));
347
348                 if (j >= (cursor.size >> PAGE_SHIFT) - 1 && i < npages - 1) {
349                         r = svm_migrate_copy_memory_gart(adev, src + i - j,
350                                                          dst + i - j, j + 1,
351                                                          FROM_RAM_TO_VRAM,
352                                                          mfence);
353                         if (r)
354                                 goto out_free_vram_pages;
355                         amdgpu_res_next(&cursor, (j + 1) * PAGE_SIZE);
356                         j = 0;
357                 } else {
358                         j++;
359                 }
360         }
361
362         r = svm_migrate_copy_memory_gart(adev, src + i - j, dst + i - j, j,
363                                          FROM_RAM_TO_VRAM, mfence);
364
365 out_free_vram_pages:
366         if (r) {
367                 pr_debug("failed %d to copy memory to vram\n", r);
368                 while (i--) {
369                         svm_migrate_put_vram_page(adev, dst[i]);
370                         migrate->dst[i] = 0;
371                 }
372         }
373
374 #ifdef DEBUG_FORCE_MIXED_DOMAINS
375         for (i = 0, j = 0; i < npages; i += 4, j++) {
376                 if (j & 1)
377                         continue;
378                 svm_migrate_put_vram_page(adev, dst[i]);
379                 migrate->dst[i] = 0;
380                 svm_migrate_put_vram_page(adev, dst[i + 1]);
381                 migrate->dst[i + 1] = 0;
382                 svm_migrate_put_vram_page(adev, dst[i + 2]);
383                 migrate->dst[i + 2] = 0;
384                 svm_migrate_put_vram_page(adev, dst[i + 3]);
385                 migrate->dst[i + 3] = 0;
386         }
387 #endif
388
389         return r;
390 }
391
392 static long
393 svm_migrate_vma_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
394                         struct vm_area_struct *vma, uint64_t start,
395                         uint64_t end, uint32_t trigger, uint64_t ttm_res_offset)
396 {
397         struct kfd_process *p = container_of(prange->svms, struct kfd_process, svms);
398         uint64_t npages = (end - start) >> PAGE_SHIFT;
399         struct kfd_process_device *pdd;
400         struct dma_fence *mfence = NULL;
401         struct migrate_vma migrate = { 0 };
402         unsigned long cpages = 0;
403         dma_addr_t *scratch;
404         void *buf;
405         int r = -ENOMEM;
406
407         memset(&migrate, 0, sizeof(migrate));
408         migrate.vma = vma;
409         migrate.start = start;
410         migrate.end = end;
411         migrate.flags = MIGRATE_VMA_SELECT_SYSTEM;
412         migrate.pgmap_owner = SVM_ADEV_PGMAP_OWNER(adev);
413
414         buf = kvcalloc(npages,
415                        2 * sizeof(*migrate.src) + sizeof(uint64_t) + sizeof(dma_addr_t),
416                        GFP_KERNEL);
417         if (!buf)
418                 goto out;
419
420         migrate.src = buf;
421         migrate.dst = migrate.src + npages;
422         scratch = (dma_addr_t *)(migrate.dst + npages);
423
424         kfd_smi_event_migration_start(adev->kfd.dev, p->lead_thread->pid,
425                                       start >> PAGE_SHIFT, end >> PAGE_SHIFT,
426                                       0, adev->kfd.dev->node->id, prange->prefetch_loc,
427                                       prange->preferred_loc, trigger);
428
429         r = migrate_vma_setup(&migrate);
430         if (r) {
431                 dev_err(adev->dev, "%s: vma setup fail %d range [0x%lx 0x%lx]\n",
432                         __func__, r, prange->start, prange->last);
433                 goto out_free;
434         }
435
436         cpages = migrate.cpages;
437         if (!cpages) {
438                 pr_debug("failed collect migrate sys pages [0x%lx 0x%lx]\n",
439                          prange->start, prange->last);
440                 goto out_free;
441         }
442         if (cpages != npages)
443                 pr_debug("partial migration, 0x%lx/0x%llx pages migrated\n",
444                          cpages, npages);
445         else
446                 pr_debug("0x%lx pages migrated\n", cpages);
447
448         r = svm_migrate_copy_to_vram(adev, prange, &migrate, &mfence, scratch, ttm_res_offset);
449         migrate_vma_pages(&migrate);
450
451         pr_debug("successful/cpages/npages 0x%lx/0x%lx/0x%lx\n",
452                 svm_migrate_successful_pages(&migrate), cpages, migrate.npages);
453
454         svm_migrate_copy_done(adev, mfence);
455         migrate_vma_finalize(&migrate);
456
457         kfd_smi_event_migration_end(adev->kfd.dev, p->lead_thread->pid,
458                                     start >> PAGE_SHIFT, end >> PAGE_SHIFT,
459                                     0, adev->kfd.dev->node->id, trigger);
460
461         svm_range_dma_unmap(adev->dev, scratch, 0, npages);
462         svm_range_free_dma_mappings(prange);
463
464 out_free:
465         kvfree(buf);
466 out:
467         if (!r && cpages) {
468                 pdd = svm_range_get_pdd_by_adev(prange, adev);
469                 if (pdd)
470                         WRITE_ONCE(pdd->page_in, pdd->page_in + cpages);
471
472                 return cpages;
473         }
474         return r;
475 }
476
477 /**
478  * svm_migrate_ram_to_vram - migrate svm range from system to device
479  * @prange: range structure
480  * @best_loc: the device to migrate to
481  * @mm: the process mm structure
482  * @trigger: reason of migration
483  *
484  * Context: Process context, caller hold mmap read lock, svms lock, prange lock
485  *
486  * Return:
487  * 0 - OK, otherwise error code
488  */
489 static int
490 svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc,
491                         struct mm_struct *mm, uint32_t trigger)
492 {
493         unsigned long addr, start, end;
494         struct vm_area_struct *vma;
495         struct amdgpu_device *adev;
496         uint64_t ttm_res_offset;
497         unsigned long cpages = 0;
498         long r = 0;
499
500         if (prange->actual_loc == best_loc) {
501                 pr_debug("svms 0x%p [0x%lx 0x%lx] already on best_loc 0x%x\n",
502                          prange->svms, prange->start, prange->last, best_loc);
503                 return 0;
504         }
505
506         adev = svm_range_get_adev_by_id(prange, best_loc);
507         if (!adev) {
508                 pr_debug("failed to get device by id 0x%x\n", best_loc);
509                 return -ENODEV;
510         }
511
512         pr_debug("svms 0x%p [0x%lx 0x%lx] to gpu 0x%x\n", prange->svms,
513                  prange->start, prange->last, best_loc);
514
515         start = prange->start << PAGE_SHIFT;
516         end = (prange->last + 1) << PAGE_SHIFT;
517
518         r = svm_range_vram_node_new(adev, prange, true);
519         if (r) {
520                 dev_dbg(adev->dev, "fail %ld to alloc vram\n", r);
521                 return r;
522         }
523         ttm_res_offset = prange->offset << PAGE_SHIFT;
524
525         for (addr = start; addr < end;) {
526                 unsigned long next;
527
528                 vma = vma_lookup(mm, addr);
529                 if (!vma)
530                         break;
531
532                 next = min(vma->vm_end, end);
533                 r = svm_migrate_vma_to_vram(adev, prange, vma, addr, next, trigger, ttm_res_offset);
534                 if (r < 0) {
535                         pr_debug("failed %ld to migrate\n", r);
536                         break;
537                 } else {
538                         cpages += r;
539                 }
540                 ttm_res_offset += next - addr;
541                 addr = next;
542         }
543
544         if (cpages)
545                 prange->actual_loc = best_loc;
546         else
547                 svm_range_vram_node_free(prange);
548
549         return r < 0 ? r : 0;
550 }
551
552 static void svm_migrate_page_free(struct page *page)
553 {
554         struct svm_range_bo *svm_bo = page->zone_device_data;
555
556         if (svm_bo) {
557                 pr_debug_ratelimited("ref: %d\n", kref_read(&svm_bo->kref));
558                 svm_range_bo_unref_async(svm_bo);
559         }
560 }
561
562 static int
563 svm_migrate_copy_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
564                         struct migrate_vma *migrate, struct dma_fence **mfence,
565                         dma_addr_t *scratch, uint64_t npages)
566 {
567         struct device *dev = adev->dev;
568         uint64_t *src;
569         dma_addr_t *dst;
570         struct page *dpage;
571         uint64_t i = 0, j;
572         uint64_t addr;
573         int r = 0;
574
575         pr_debug("svms 0x%p [0x%lx 0x%lx]\n", prange->svms, prange->start,
576                  prange->last);
577
578         addr = prange->start << PAGE_SHIFT;
579
580         src = (uint64_t *)(scratch + npages);
581         dst = scratch;
582
583         for (i = 0, j = 0; i < npages; i++, addr += PAGE_SIZE) {
584                 struct page *spage;
585
586                 spage = migrate_pfn_to_page(migrate->src[i]);
587                 if (!spage || !is_zone_device_page(spage)) {
588                         pr_debug("invalid page. Could be in CPU already svms 0x%p [0x%lx 0x%lx]\n",
589                                  prange->svms, prange->start, prange->last);
590                         if (j) {
591                                 r = svm_migrate_copy_memory_gart(adev, dst + i - j,
592                                                                  src + i - j, j,
593                                                                  FROM_VRAM_TO_RAM,
594                                                                  mfence);
595                                 if (r)
596                                         goto out_oom;
597                                 j = 0;
598                         }
599                         continue;
600                 }
601                 src[i] = svm_migrate_addr(adev, spage);
602                 if (j > 0 && src[i] != src[i - 1] + PAGE_SIZE) {
603                         r = svm_migrate_copy_memory_gart(adev, dst + i - j,
604                                                          src + i - j, j,
605                                                          FROM_VRAM_TO_RAM,
606                                                          mfence);
607                         if (r)
608                                 goto out_oom;
609                         j = 0;
610                 }
611
612                 dpage = svm_migrate_get_sys_page(migrate->vma, addr);
613                 if (!dpage) {
614                         pr_debug("failed get page svms 0x%p [0x%lx 0x%lx]\n",
615                                  prange->svms, prange->start, prange->last);
616                         r = -ENOMEM;
617                         goto out_oom;
618                 }
619
620                 dst[i] = dma_map_page(dev, dpage, 0, PAGE_SIZE, DMA_FROM_DEVICE);
621                 r = dma_mapping_error(dev, dst[i]);
622                 if (r) {
623                         dev_err(adev->dev, "%s: fail %d dma_map_page\n", __func__, r);
624                         goto out_oom;
625                 }
626
627                 pr_debug_ratelimited("dma mapping dst to 0x%llx, pfn 0x%lx\n",
628                                      dst[i] >> PAGE_SHIFT, page_to_pfn(dpage));
629
630                 migrate->dst[i] = migrate_pfn(page_to_pfn(dpage));
631                 j++;
632         }
633
634         r = svm_migrate_copy_memory_gart(adev, dst + i - j, src + i - j, j,
635                                          FROM_VRAM_TO_RAM, mfence);
636
637 out_oom:
638         if (r) {
639                 pr_debug("failed %d copy to ram\n", r);
640                 while (i--) {
641                         svm_migrate_put_sys_page(dst[i]);
642                         migrate->dst[i] = 0;
643                 }
644         }
645
646         return r;
647 }
648
649 /**
650  * svm_migrate_vma_to_ram - migrate range inside one vma from device to system
651  *
652  * @adev: amdgpu device to migrate from
653  * @prange: svm range structure
654  * @vma: vm_area_struct that range [start, end] belongs to
655  * @start: range start virtual address in pages
656  * @end: range end virtual address in pages
657  *
658  * Context: Process context, caller hold mmap read lock, prange->migrate_mutex
659  *
660  * Return:
661  *   0 - success with all pages migrated
662  *   negative values - indicate error
663  *   positive values - partial migration, number of pages not migrated
664  */
665 static long
666 svm_migrate_vma_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
667                        struct vm_area_struct *vma, uint64_t start, uint64_t end,
668                        uint32_t trigger, struct page *fault_page)
669 {
670         struct kfd_process *p = container_of(prange->svms, struct kfd_process, svms);
671         uint64_t npages = (end - start) >> PAGE_SHIFT;
672         unsigned long upages = npages;
673         unsigned long cpages = 0;
674         struct kfd_process_device *pdd;
675         struct dma_fence *mfence = NULL;
676         struct migrate_vma migrate = { 0 };
677         dma_addr_t *scratch;
678         void *buf;
679         int r = -ENOMEM;
680
681         memset(&migrate, 0, sizeof(migrate));
682         migrate.vma = vma;
683         migrate.start = start;
684         migrate.end = end;
685         migrate.pgmap_owner = SVM_ADEV_PGMAP_OWNER(adev);
686         if (adev->gmc.xgmi.connected_to_cpu)
687                 migrate.flags = MIGRATE_VMA_SELECT_DEVICE_COHERENT;
688         else
689                 migrate.flags = MIGRATE_VMA_SELECT_DEVICE_PRIVATE;
690
691         buf = kvcalloc(npages,
692                        2 * sizeof(*migrate.src) + sizeof(uint64_t) + sizeof(dma_addr_t),
693                        GFP_KERNEL);
694         if (!buf)
695                 goto out;
696
697         migrate.src = buf;
698         migrate.dst = migrate.src + npages;
699         migrate.fault_page = fault_page;
700         scratch = (dma_addr_t *)(migrate.dst + npages);
701
702         kfd_smi_event_migration_start(adev->kfd.dev, p->lead_thread->pid,
703                                       start >> PAGE_SHIFT, end >> PAGE_SHIFT,
704                                       adev->kfd.dev->node->id, 0, prange->prefetch_loc,
705                                       prange->preferred_loc, trigger);
706
707         r = migrate_vma_setup(&migrate);
708         if (r) {
709                 dev_err(adev->dev, "%s: vma setup fail %d range [0x%lx 0x%lx]\n",
710                         __func__, r, prange->start, prange->last);
711                 goto out_free;
712         }
713
714         cpages = migrate.cpages;
715         if (!cpages) {
716                 pr_debug("failed collect migrate device pages [0x%lx 0x%lx]\n",
717                          prange->start, prange->last);
718                 upages = svm_migrate_unsuccessful_pages(&migrate);
719                 goto out_free;
720         }
721         if (cpages != npages)
722                 pr_debug("partial migration, 0x%lx/0x%llx pages migrated\n",
723                          cpages, npages);
724         else
725                 pr_debug("0x%lx pages migrated\n", cpages);
726
727         r = svm_migrate_copy_to_ram(adev, prange, &migrate, &mfence,
728                                     scratch, npages);
729         migrate_vma_pages(&migrate);
730
731         upages = svm_migrate_unsuccessful_pages(&migrate);
732         pr_debug("unsuccessful/cpages/npages 0x%lx/0x%lx/0x%lx\n",
733                  upages, cpages, migrate.npages);
734
735         svm_migrate_copy_done(adev, mfence);
736         migrate_vma_finalize(&migrate);
737
738         kfd_smi_event_migration_end(adev->kfd.dev, p->lead_thread->pid,
739                                     start >> PAGE_SHIFT, end >> PAGE_SHIFT,
740                                     adev->kfd.dev->node->id, 0, trigger);
741
742         svm_range_dma_unmap(adev->dev, scratch, 0, npages);
743
744 out_free:
745         kvfree(buf);
746 out:
747         if (!r && cpages) {
748                 pdd = svm_range_get_pdd_by_adev(prange, adev);
749                 if (pdd)
750                         WRITE_ONCE(pdd->page_out, pdd->page_out + cpages);
751         }
752         return r ? r : upages;
753 }
754
755 /**
756  * svm_migrate_vram_to_ram - migrate svm range from device to system
757  * @prange: range structure
758  * @mm: process mm, use current->mm if NULL
759  * @trigger: reason of migration
760  *
761  * Context: Process context, caller hold mmap read lock, prange->migrate_mutex
762  *
763  * Return:
764  * 0 - OK, otherwise error code
765  */
766 int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm,
767                             uint32_t trigger, struct page *fault_page)
768 {
769         struct amdgpu_device *adev;
770         struct vm_area_struct *vma;
771         unsigned long addr;
772         unsigned long start;
773         unsigned long end;
774         unsigned long upages = 0;
775         long r = 0;
776
777         if (!prange->actual_loc) {
778                 pr_debug("[0x%lx 0x%lx] already migrated to ram\n",
779                          prange->start, prange->last);
780                 return 0;
781         }
782
783         adev = svm_range_get_adev_by_id(prange, prange->actual_loc);
784         if (!adev) {
785                 pr_debug("failed to get device by id 0x%x\n",
786                          prange->actual_loc);
787                 return -ENODEV;
788         }
789
790         pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx] from gpu 0x%x to ram\n",
791                  prange->svms, prange, prange->start, prange->last,
792                  prange->actual_loc);
793
794         start = prange->start << PAGE_SHIFT;
795         end = (prange->last + 1) << PAGE_SHIFT;
796
797         for (addr = start; addr < end;) {
798                 unsigned long next;
799
800                 vma = vma_lookup(mm, addr);
801                 if (!vma) {
802                         pr_debug("failed to find vma for prange %p\n", prange);
803                         r = -EFAULT;
804                         break;
805                 }
806
807                 next = min(vma->vm_end, end);
808                 r = svm_migrate_vma_to_ram(adev, prange, vma, addr, next, trigger,
809                         fault_page);
810                 if (r < 0) {
811                         pr_debug("failed %ld to migrate prange %p\n", r, prange);
812                         break;
813                 } else {
814                         upages += r;
815                 }
816                 addr = next;
817         }
818
819         if (r >= 0 && !upages) {
820                 svm_range_vram_node_free(prange);
821                 prange->actual_loc = 0;
822         }
823
824         return r < 0 ? r : 0;
825 }
826
827 /**
828  * svm_migrate_vram_to_vram - migrate svm range from device to device
829  * @prange: range structure
830  * @best_loc: the device to migrate to
831  * @mm: process mm, use current->mm if NULL
832  * @trigger: reason of migration
833  *
834  * Context: Process context, caller hold mmap read lock, svms lock, prange lock
835  *
836  * Return:
837  * 0 - OK, otherwise error code
838  */
839 static int
840 svm_migrate_vram_to_vram(struct svm_range *prange, uint32_t best_loc,
841                          struct mm_struct *mm, uint32_t trigger)
842 {
843         int r, retries = 3;
844
845         /*
846          * TODO: for both devices with PCIe large bar or on same xgmi hive, skip
847          * system memory as migration bridge
848          */
849
850         pr_debug("from gpu 0x%x to gpu 0x%x\n", prange->actual_loc, best_loc);
851
852         do {
853                 r = svm_migrate_vram_to_ram(prange, mm, trigger, NULL);
854                 if (r)
855                         return r;
856         } while (prange->actual_loc && --retries);
857
858         if (prange->actual_loc)
859                 return -EDEADLK;
860
861         return svm_migrate_ram_to_vram(prange, best_loc, mm, trigger);
862 }
863
864 int
865 svm_migrate_to_vram(struct svm_range *prange, uint32_t best_loc,
866                     struct mm_struct *mm, uint32_t trigger)
867 {
868         if  (!prange->actual_loc)
869                 return svm_migrate_ram_to_vram(prange, best_loc, mm, trigger);
870         else
871                 return svm_migrate_vram_to_vram(prange, best_loc, mm, trigger);
872
873 }
874
875 /**
876  * svm_migrate_to_ram - CPU page fault handler
877  * @vmf: CPU vm fault vma, address
878  *
879  * Context: vm fault handler, caller holds the mmap read lock
880  *
881  * Return:
882  * 0 - OK
883  * VM_FAULT_SIGBUS - notice application to have SIGBUS page fault
884  */
885 static vm_fault_t svm_migrate_to_ram(struct vm_fault *vmf)
886 {
887         unsigned long addr = vmf->address;
888         struct svm_range_bo *svm_bo;
889         enum svm_work_list_ops op;
890         struct svm_range *parent;
891         struct svm_range *prange;
892         struct kfd_process *p;
893         struct mm_struct *mm;
894         int r = 0;
895
896         svm_bo = vmf->page->zone_device_data;
897         if (!svm_bo) {
898                 pr_debug("failed get device page at addr 0x%lx\n", addr);
899                 return VM_FAULT_SIGBUS;
900         }
901         if (!mmget_not_zero(svm_bo->eviction_fence->mm)) {
902                 pr_debug("addr 0x%lx of process mm is destroyed\n", addr);
903                 return VM_FAULT_SIGBUS;
904         }
905
906         mm = svm_bo->eviction_fence->mm;
907         if (mm != vmf->vma->vm_mm)
908                 pr_debug("addr 0x%lx is COW mapping in child process\n", addr);
909
910         p = kfd_lookup_process_by_mm(mm);
911         if (!p) {
912                 pr_debug("failed find process at fault address 0x%lx\n", addr);
913                 r = VM_FAULT_SIGBUS;
914                 goto out_mmput;
915         }
916         if (READ_ONCE(p->svms.faulting_task) == current) {
917                 pr_debug("skipping ram migration\n");
918                 r = 0;
919                 goto out_unref_process;
920         }
921
922         pr_debug("CPU page fault svms 0x%p address 0x%lx\n", &p->svms, addr);
923         addr >>= PAGE_SHIFT;
924
925         mutex_lock(&p->svms.lock);
926
927         prange = svm_range_from_addr(&p->svms, addr, &parent);
928         if (!prange) {
929                 pr_debug("failed get range svms 0x%p addr 0x%lx\n", &p->svms, addr);
930                 r = -EFAULT;
931                 goto out_unlock_svms;
932         }
933
934         mutex_lock(&parent->migrate_mutex);
935         if (prange != parent)
936                 mutex_lock_nested(&prange->migrate_mutex, 1);
937
938         if (!prange->actual_loc)
939                 goto out_unlock_prange;
940
941         svm_range_lock(parent);
942         if (prange != parent)
943                 mutex_lock_nested(&prange->lock, 1);
944         r = svm_range_split_by_granularity(p, mm, addr, parent, prange);
945         if (prange != parent)
946                 mutex_unlock(&prange->lock);
947         svm_range_unlock(parent);
948         if (r) {
949                 pr_debug("failed %d to split range by granularity\n", r);
950                 goto out_unlock_prange;
951         }
952
953         r = svm_migrate_vram_to_ram(prange, vmf->vma->vm_mm,
954                                     KFD_MIGRATE_TRIGGER_PAGEFAULT_CPU,
955                                     vmf->page);
956         if (r)
957                 pr_debug("failed %d migrate svms 0x%p range 0x%p [0x%lx 0x%lx]\n",
958                          r, prange->svms, prange, prange->start, prange->last);
959
960         /* xnack on, update mapping on GPUs with ACCESS_IN_PLACE */
961         if (p->xnack_enabled && parent == prange)
962                 op = SVM_OP_UPDATE_RANGE_NOTIFIER_AND_MAP;
963         else
964                 op = SVM_OP_UPDATE_RANGE_NOTIFIER;
965         svm_range_add_list_work(&p->svms, parent, mm, op);
966         schedule_deferred_list_work(&p->svms);
967
968 out_unlock_prange:
969         if (prange != parent)
970                 mutex_unlock(&prange->migrate_mutex);
971         mutex_unlock(&parent->migrate_mutex);
972 out_unlock_svms:
973         mutex_unlock(&p->svms.lock);
974 out_unref_process:
975         pr_debug("CPU fault svms 0x%p address 0x%lx done\n", &p->svms, addr);
976         kfd_unref_process(p);
977 out_mmput:
978         mmput(mm);
979         return r ? VM_FAULT_SIGBUS : 0;
980 }
981
982 static const struct dev_pagemap_ops svm_migrate_pgmap_ops = {
983         .page_free              = svm_migrate_page_free,
984         .migrate_to_ram         = svm_migrate_to_ram,
985 };
986
987 /* Each VRAM page uses sizeof(struct page) on system memory */
988 #define SVM_HMM_PAGE_STRUCT_SIZE(size) ((size)/PAGE_SIZE * sizeof(struct page))
989
990 int svm_migrate_init(struct amdgpu_device *adev)
991 {
992         struct kfd_dev *kfddev = adev->kfd.dev;
993         struct dev_pagemap *pgmap;
994         struct resource *res = NULL;
995         unsigned long size;
996         void *r;
997
998         /* Page migration works on Vega10 or newer */
999         if (!KFD_IS_SOC15(kfddev))
1000                 return -EINVAL;
1001
1002         pgmap = &kfddev->pgmap;
1003         memset(pgmap, 0, sizeof(*pgmap));
1004
1005         /* TODO: register all vram to HMM for now.
1006          * should remove reserved size
1007          */
1008         size = ALIGN(adev->gmc.real_vram_size, 2ULL << 20);
1009         if (adev->gmc.xgmi.connected_to_cpu) {
1010                 pgmap->range.start = adev->gmc.aper_base;
1011                 pgmap->range.end = adev->gmc.aper_base + adev->gmc.aper_size - 1;
1012                 pgmap->type = MEMORY_DEVICE_COHERENT;
1013         } else {
1014                 res = devm_request_free_mem_region(adev->dev, &iomem_resource, size);
1015                 if (IS_ERR(res))
1016                         return -ENOMEM;
1017                 pgmap->range.start = res->start;
1018                 pgmap->range.end = res->end;
1019                 pgmap->type = MEMORY_DEVICE_PRIVATE;
1020         }
1021
1022         pgmap->nr_range = 1;
1023         pgmap->ops = &svm_migrate_pgmap_ops;
1024         pgmap->owner = SVM_ADEV_PGMAP_OWNER(adev);
1025         pgmap->flags = 0;
1026         /* Device manager releases device-specific resources, memory region and
1027          * pgmap when driver disconnects from device.
1028          */
1029         r = devm_memremap_pages(adev->dev, pgmap);
1030         if (IS_ERR(r)) {
1031                 pr_err("failed to register HMM device memory\n");
1032                 /* Disable SVM support capability */
1033                 pgmap->type = 0;
1034                 if (pgmap->type == MEMORY_DEVICE_PRIVATE)
1035                         devm_release_mem_region(adev->dev, res->start, resource_size(res));
1036                 return PTR_ERR(r);
1037         }
1038
1039         pr_debug("reserve %ldMB system memory for VRAM pages struct\n",
1040                  SVM_HMM_PAGE_STRUCT_SIZE(size) >> 20);
1041
1042         amdgpu_amdkfd_reserve_system_mem(SVM_HMM_PAGE_STRUCT_SIZE(size));
1043
1044         svm_range_set_max_pages(adev);
1045
1046         pr_info("HMM registered %ldMB device memory\n", size >> 20);
1047
1048         return 0;
1049 }