Merge branch 'parisc-5.12-2' of git://git.kernel.org/pub/scm/linux/kernel/git/deller...
[linux-2.6-microblaze.git] / drivers / gpu / drm / nouveau / nouveau_dmem.c
1 /*
2  * Copyright 2018 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22 #include "nouveau_dmem.h"
23 #include "nouveau_drv.h"
24 #include "nouveau_chan.h"
25 #include "nouveau_dma.h"
26 #include "nouveau_mem.h"
27 #include "nouveau_bo.h"
28 #include "nouveau_svm.h"
29
30 #include <nvif/class.h>
31 #include <nvif/object.h>
32 #include <nvif/push906f.h>
33 #include <nvif/if000c.h>
34 #include <nvif/if500b.h>
35 #include <nvif/if900b.h>
36 #include <nvif/if000c.h>
37
38 #include <nvhw/class/cla0b5.h>
39
40 #include <linux/sched/mm.h>
41 #include <linux/hmm.h>
42
43 /*
44  * FIXME: this is ugly right now we are using TTM to allocate vram and we pin
45  * it in vram while in use. We likely want to overhaul memory management for
46  * nouveau to be more page like (not necessarily with system page size but a
47  * bigger page size) at lowest level and have some shim layer on top that would
48  * provide the same functionality as TTM.
49  */
50 #define DMEM_CHUNK_SIZE (2UL << 20)
51 #define DMEM_CHUNK_NPAGES (DMEM_CHUNK_SIZE >> PAGE_SHIFT)
52
53 enum nouveau_aper {
54         NOUVEAU_APER_VIRT,
55         NOUVEAU_APER_VRAM,
56         NOUVEAU_APER_HOST,
57 };
58
59 typedef int (*nouveau_migrate_copy_t)(struct nouveau_drm *drm, u64 npages,
60                                       enum nouveau_aper, u64 dst_addr,
61                                       enum nouveau_aper, u64 src_addr);
62 typedef int (*nouveau_clear_page_t)(struct nouveau_drm *drm, u32 length,
63                                       enum nouveau_aper, u64 dst_addr);
64
65 struct nouveau_dmem_chunk {
66         struct list_head list;
67         struct nouveau_bo *bo;
68         struct nouveau_drm *drm;
69         unsigned long callocated;
70         struct dev_pagemap pagemap;
71 };
72
73 struct nouveau_dmem_migrate {
74         nouveau_migrate_copy_t copy_func;
75         nouveau_clear_page_t clear_func;
76         struct nouveau_channel *chan;
77 };
78
79 struct nouveau_dmem {
80         struct nouveau_drm *drm;
81         struct nouveau_dmem_migrate migrate;
82         struct list_head chunks;
83         struct mutex mutex;
84         struct page *free_pages;
85         spinlock_t lock;
86 };
87
88 static struct nouveau_dmem_chunk *nouveau_page_to_chunk(struct page *page)
89 {
90         return container_of(page->pgmap, struct nouveau_dmem_chunk, pagemap);
91 }
92
93 static struct nouveau_drm *page_to_drm(struct page *page)
94 {
95         struct nouveau_dmem_chunk *chunk = nouveau_page_to_chunk(page);
96
97         return chunk->drm;
98 }
99
100 unsigned long nouveau_dmem_page_addr(struct page *page)
101 {
102         struct nouveau_dmem_chunk *chunk = nouveau_page_to_chunk(page);
103         unsigned long off = (page_to_pfn(page) << PAGE_SHIFT) -
104                                 chunk->pagemap.range.start;
105
106         return chunk->bo->offset + off;
107 }
108
109 static void nouveau_dmem_page_free(struct page *page)
110 {
111         struct nouveau_dmem_chunk *chunk = nouveau_page_to_chunk(page);
112         struct nouveau_dmem *dmem = chunk->drm->dmem;
113
114         spin_lock(&dmem->lock);
115         page->zone_device_data = dmem->free_pages;
116         dmem->free_pages = page;
117
118         WARN_ON(!chunk->callocated);
119         chunk->callocated--;
120         /*
121          * FIXME when chunk->callocated reach 0 we should add the chunk to
122          * a reclaim list so that it can be freed in case of memory pressure.
123          */
124         spin_unlock(&dmem->lock);
125 }
126
127 static void nouveau_dmem_fence_done(struct nouveau_fence **fence)
128 {
129         if (fence) {
130                 nouveau_fence_wait(*fence, true, false);
131                 nouveau_fence_unref(fence);
132         } else {
133                 /*
134                  * FIXME wait for channel to be IDLE before calling finalizing
135                  * the hmem object.
136                  */
137         }
138 }
139
140 static vm_fault_t nouveau_dmem_fault_copy_one(struct nouveau_drm *drm,
141                 struct vm_fault *vmf, struct migrate_vma *args,
142                 dma_addr_t *dma_addr)
143 {
144         struct device *dev = drm->dev->dev;
145         struct page *dpage, *spage;
146         struct nouveau_svmm *svmm;
147
148         spage = migrate_pfn_to_page(args->src[0]);
149         if (!spage || !(args->src[0] & MIGRATE_PFN_MIGRATE))
150                 return 0;
151
152         dpage = alloc_page_vma(GFP_HIGHUSER, vmf->vma, vmf->address);
153         if (!dpage)
154                 return VM_FAULT_SIGBUS;
155         lock_page(dpage);
156
157         *dma_addr = dma_map_page(dev, dpage, 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
158         if (dma_mapping_error(dev, *dma_addr))
159                 goto error_free_page;
160
161         svmm = spage->zone_device_data;
162         mutex_lock(&svmm->mutex);
163         nouveau_svmm_invalidate(svmm, args->start, args->end);
164         if (drm->dmem->migrate.copy_func(drm, 1, NOUVEAU_APER_HOST, *dma_addr,
165                         NOUVEAU_APER_VRAM, nouveau_dmem_page_addr(spage)))
166                 goto error_dma_unmap;
167         mutex_unlock(&svmm->mutex);
168
169         args->dst[0] = migrate_pfn(page_to_pfn(dpage)) | MIGRATE_PFN_LOCKED;
170         return 0;
171
172 error_dma_unmap:
173         mutex_unlock(&svmm->mutex);
174         dma_unmap_page(dev, *dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
175 error_free_page:
176         __free_page(dpage);
177         return VM_FAULT_SIGBUS;
178 }
179
180 static vm_fault_t nouveau_dmem_migrate_to_ram(struct vm_fault *vmf)
181 {
182         struct nouveau_drm *drm = page_to_drm(vmf->page);
183         struct nouveau_dmem *dmem = drm->dmem;
184         struct nouveau_fence *fence;
185         unsigned long src = 0, dst = 0;
186         dma_addr_t dma_addr = 0;
187         vm_fault_t ret;
188         struct migrate_vma args = {
189                 .vma            = vmf->vma,
190                 .start          = vmf->address,
191                 .end            = vmf->address + PAGE_SIZE,
192                 .src            = &src,
193                 .dst            = &dst,
194                 .pgmap_owner    = drm->dev,
195                 .flags          = MIGRATE_VMA_SELECT_DEVICE_PRIVATE,
196         };
197
198         /*
199          * FIXME what we really want is to find some heuristic to migrate more
200          * than just one page on CPU fault. When such fault happens it is very
201          * likely that more surrounding page will CPU fault too.
202          */
203         if (migrate_vma_setup(&args) < 0)
204                 return VM_FAULT_SIGBUS;
205         if (!args.cpages)
206                 return 0;
207
208         ret = nouveau_dmem_fault_copy_one(drm, vmf, &args, &dma_addr);
209         if (ret || dst == 0)
210                 goto done;
211
212         nouveau_fence_new(dmem->migrate.chan, false, &fence);
213         migrate_vma_pages(&args);
214         nouveau_dmem_fence_done(&fence);
215         dma_unmap_page(drm->dev->dev, dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
216 done:
217         migrate_vma_finalize(&args);
218         return ret;
219 }
220
221 static const struct dev_pagemap_ops nouveau_dmem_pagemap_ops = {
222         .page_free              = nouveau_dmem_page_free,
223         .migrate_to_ram         = nouveau_dmem_migrate_to_ram,
224 };
225
226 static int
227 nouveau_dmem_chunk_alloc(struct nouveau_drm *drm, struct page **ppage)
228 {
229         struct nouveau_dmem_chunk *chunk;
230         struct resource *res;
231         struct page *page;
232         void *ptr;
233         unsigned long i, pfn_first;
234         int ret;
235
236         chunk = kzalloc(sizeof(*chunk), GFP_KERNEL);
237         if (chunk == NULL) {
238                 ret = -ENOMEM;
239                 goto out;
240         }
241
242         /* Allocate unused physical address space for device private pages. */
243         res = request_free_mem_region(&iomem_resource, DMEM_CHUNK_SIZE,
244                                       "nouveau_dmem");
245         if (IS_ERR(res)) {
246                 ret = PTR_ERR(res);
247                 goto out_free;
248         }
249
250         chunk->drm = drm;
251         chunk->pagemap.type = MEMORY_DEVICE_PRIVATE;
252         chunk->pagemap.range.start = res->start;
253         chunk->pagemap.range.end = res->end;
254         chunk->pagemap.nr_range = 1;
255         chunk->pagemap.ops = &nouveau_dmem_pagemap_ops;
256         chunk->pagemap.owner = drm->dev;
257
258         ret = nouveau_bo_new(&drm->client, DMEM_CHUNK_SIZE, 0,
259                              NOUVEAU_GEM_DOMAIN_VRAM, 0, 0, NULL, NULL,
260                              &chunk->bo);
261         if (ret)
262                 goto out_release;
263
264         ret = nouveau_bo_pin(chunk->bo, NOUVEAU_GEM_DOMAIN_VRAM, false);
265         if (ret)
266                 goto out_bo_free;
267
268         ptr = memremap_pages(&chunk->pagemap, numa_node_id());
269         if (IS_ERR(ptr)) {
270                 ret = PTR_ERR(ptr);
271                 goto out_bo_unpin;
272         }
273
274         mutex_lock(&drm->dmem->mutex);
275         list_add(&chunk->list, &drm->dmem->chunks);
276         mutex_unlock(&drm->dmem->mutex);
277
278         pfn_first = chunk->pagemap.range.start >> PAGE_SHIFT;
279         page = pfn_to_page(pfn_first);
280         spin_lock(&drm->dmem->lock);
281         for (i = 0; i < DMEM_CHUNK_NPAGES - 1; ++i, ++page) {
282                 page->zone_device_data = drm->dmem->free_pages;
283                 drm->dmem->free_pages = page;
284         }
285         *ppage = page;
286         chunk->callocated++;
287         spin_unlock(&drm->dmem->lock);
288
289         NV_INFO(drm, "DMEM: registered %ldMB of device memory\n",
290                 DMEM_CHUNK_SIZE >> 20);
291
292         return 0;
293
294 out_bo_unpin:
295         nouveau_bo_unpin(chunk->bo);
296 out_bo_free:
297         nouveau_bo_ref(NULL, &chunk->bo);
298 out_release:
299         release_mem_region(chunk->pagemap.range.start, range_len(&chunk->pagemap.range));
300 out_free:
301         kfree(chunk);
302 out:
303         return ret;
304 }
305
306 static struct page *
307 nouveau_dmem_page_alloc_locked(struct nouveau_drm *drm)
308 {
309         struct nouveau_dmem_chunk *chunk;
310         struct page *page = NULL;
311         int ret;
312
313         spin_lock(&drm->dmem->lock);
314         if (drm->dmem->free_pages) {
315                 page = drm->dmem->free_pages;
316                 drm->dmem->free_pages = page->zone_device_data;
317                 chunk = nouveau_page_to_chunk(page);
318                 chunk->callocated++;
319                 spin_unlock(&drm->dmem->lock);
320         } else {
321                 spin_unlock(&drm->dmem->lock);
322                 ret = nouveau_dmem_chunk_alloc(drm, &page);
323                 if (ret)
324                         return NULL;
325         }
326
327         get_page(page);
328         lock_page(page);
329         return page;
330 }
331
332 static void
333 nouveau_dmem_page_free_locked(struct nouveau_drm *drm, struct page *page)
334 {
335         unlock_page(page);
336         put_page(page);
337 }
338
339 void
340 nouveau_dmem_resume(struct nouveau_drm *drm)
341 {
342         struct nouveau_dmem_chunk *chunk;
343         int ret;
344
345         if (drm->dmem == NULL)
346                 return;
347
348         mutex_lock(&drm->dmem->mutex);
349         list_for_each_entry(chunk, &drm->dmem->chunks, list) {
350                 ret = nouveau_bo_pin(chunk->bo, NOUVEAU_GEM_DOMAIN_VRAM, false);
351                 /* FIXME handle pin failure */
352                 WARN_ON(ret);
353         }
354         mutex_unlock(&drm->dmem->mutex);
355 }
356
357 void
358 nouveau_dmem_suspend(struct nouveau_drm *drm)
359 {
360         struct nouveau_dmem_chunk *chunk;
361
362         if (drm->dmem == NULL)
363                 return;
364
365         mutex_lock(&drm->dmem->mutex);
366         list_for_each_entry(chunk, &drm->dmem->chunks, list)
367                 nouveau_bo_unpin(chunk->bo);
368         mutex_unlock(&drm->dmem->mutex);
369 }
370
371 void
372 nouveau_dmem_fini(struct nouveau_drm *drm)
373 {
374         struct nouveau_dmem_chunk *chunk, *tmp;
375
376         if (drm->dmem == NULL)
377                 return;
378
379         mutex_lock(&drm->dmem->mutex);
380
381         list_for_each_entry_safe(chunk, tmp, &drm->dmem->chunks, list) {
382                 nouveau_bo_unpin(chunk->bo);
383                 nouveau_bo_ref(NULL, &chunk->bo);
384                 list_del(&chunk->list);
385                 memunmap_pages(&chunk->pagemap);
386                 release_mem_region(chunk->pagemap.range.start,
387                                    range_len(&chunk->pagemap.range));
388                 kfree(chunk);
389         }
390
391         mutex_unlock(&drm->dmem->mutex);
392 }
393
394 static int
395 nvc0b5_migrate_copy(struct nouveau_drm *drm, u64 npages,
396                     enum nouveau_aper dst_aper, u64 dst_addr,
397                     enum nouveau_aper src_aper, u64 src_addr)
398 {
399         struct nvif_push *push = drm->dmem->migrate.chan->chan.push;
400         u32 launch_dma = 0;
401         int ret;
402
403         ret = PUSH_WAIT(push, 13);
404         if (ret)
405                 return ret;
406
407         if (src_aper != NOUVEAU_APER_VIRT) {
408                 switch (src_aper) {
409                 case NOUVEAU_APER_VRAM:
410                         PUSH_IMMD(push, NVA0B5, SET_SRC_PHYS_MODE,
411                                   NVDEF(NVA0B5, SET_SRC_PHYS_MODE, TARGET, LOCAL_FB));
412                         break;
413                 case NOUVEAU_APER_HOST:
414                         PUSH_IMMD(push, NVA0B5, SET_SRC_PHYS_MODE,
415                                   NVDEF(NVA0B5, SET_SRC_PHYS_MODE, TARGET, COHERENT_SYSMEM));
416                         break;
417                 default:
418                         return -EINVAL;
419                 }
420
421                 launch_dma |= NVDEF(NVA0B5, LAUNCH_DMA, SRC_TYPE, PHYSICAL);
422         }
423
424         if (dst_aper != NOUVEAU_APER_VIRT) {
425                 switch (dst_aper) {
426                 case NOUVEAU_APER_VRAM:
427                         PUSH_IMMD(push, NVA0B5, SET_DST_PHYS_MODE,
428                                   NVDEF(NVA0B5, SET_DST_PHYS_MODE, TARGET, LOCAL_FB));
429                         break;
430                 case NOUVEAU_APER_HOST:
431                         PUSH_IMMD(push, NVA0B5, SET_DST_PHYS_MODE,
432                                   NVDEF(NVA0B5, SET_DST_PHYS_MODE, TARGET, COHERENT_SYSMEM));
433                         break;
434                 default:
435                         return -EINVAL;
436                 }
437
438                 launch_dma |= NVDEF(NVA0B5, LAUNCH_DMA, DST_TYPE, PHYSICAL);
439         }
440
441         PUSH_MTHD(push, NVA0B5, OFFSET_IN_UPPER,
442                   NVVAL(NVA0B5, OFFSET_IN_UPPER, UPPER, upper_32_bits(src_addr)),
443
444                                 OFFSET_IN_LOWER, lower_32_bits(src_addr),
445
446                                 OFFSET_OUT_UPPER,
447                   NVVAL(NVA0B5, OFFSET_OUT_UPPER, UPPER, upper_32_bits(dst_addr)),
448
449                                 OFFSET_OUT_LOWER, lower_32_bits(dst_addr),
450                                 PITCH_IN, PAGE_SIZE,
451                                 PITCH_OUT, PAGE_SIZE,
452                                 LINE_LENGTH_IN, PAGE_SIZE,
453                                 LINE_COUNT, npages);
454
455         PUSH_MTHD(push, NVA0B5, LAUNCH_DMA, launch_dma |
456                   NVDEF(NVA0B5, LAUNCH_DMA, DATA_TRANSFER_TYPE, NON_PIPELINED) |
457                   NVDEF(NVA0B5, LAUNCH_DMA, FLUSH_ENABLE, TRUE) |
458                   NVDEF(NVA0B5, LAUNCH_DMA, SEMAPHORE_TYPE, NONE) |
459                   NVDEF(NVA0B5, LAUNCH_DMA, INTERRUPT_TYPE, NONE) |
460                   NVDEF(NVA0B5, LAUNCH_DMA, SRC_MEMORY_LAYOUT, PITCH) |
461                   NVDEF(NVA0B5, LAUNCH_DMA, DST_MEMORY_LAYOUT, PITCH) |
462                   NVDEF(NVA0B5, LAUNCH_DMA, MULTI_LINE_ENABLE, TRUE) |
463                   NVDEF(NVA0B5, LAUNCH_DMA, REMAP_ENABLE, FALSE) |
464                   NVDEF(NVA0B5, LAUNCH_DMA, BYPASS_L2, USE_PTE_SETTING));
465         return 0;
466 }
467
468 static int
469 nvc0b5_migrate_clear(struct nouveau_drm *drm, u32 length,
470                      enum nouveau_aper dst_aper, u64 dst_addr)
471 {
472         struct nvif_push *push = drm->dmem->migrate.chan->chan.push;
473         u32 launch_dma = 0;
474         int ret;
475
476         ret = PUSH_WAIT(push, 12);
477         if (ret)
478                 return ret;
479
480         switch (dst_aper) {
481         case NOUVEAU_APER_VRAM:
482                 PUSH_IMMD(push, NVA0B5, SET_DST_PHYS_MODE,
483                           NVDEF(NVA0B5, SET_DST_PHYS_MODE, TARGET, LOCAL_FB));
484                 break;
485         case NOUVEAU_APER_HOST:
486                 PUSH_IMMD(push, NVA0B5, SET_DST_PHYS_MODE,
487                           NVDEF(NVA0B5, SET_DST_PHYS_MODE, TARGET, COHERENT_SYSMEM));
488                 break;
489         default:
490                 return -EINVAL;
491         }
492
493         launch_dma |= NVDEF(NVA0B5, LAUNCH_DMA, DST_TYPE, PHYSICAL);
494
495         PUSH_MTHD(push, NVA0B5, SET_REMAP_CONST_A, 0,
496                                 SET_REMAP_CONST_B, 0,
497
498                                 SET_REMAP_COMPONENTS,
499                   NVDEF(NVA0B5, SET_REMAP_COMPONENTS, DST_X, CONST_A) |
500                   NVDEF(NVA0B5, SET_REMAP_COMPONENTS, DST_Y, CONST_B) |
501                   NVDEF(NVA0B5, SET_REMAP_COMPONENTS, COMPONENT_SIZE, FOUR) |
502                   NVDEF(NVA0B5, SET_REMAP_COMPONENTS, NUM_DST_COMPONENTS, TWO));
503
504         PUSH_MTHD(push, NVA0B5, OFFSET_OUT_UPPER,
505                   NVVAL(NVA0B5, OFFSET_OUT_UPPER, UPPER, upper_32_bits(dst_addr)),
506
507                                 OFFSET_OUT_LOWER, lower_32_bits(dst_addr));
508
509         PUSH_MTHD(push, NVA0B5, LINE_LENGTH_IN, length >> 3);
510
511         PUSH_MTHD(push, NVA0B5, LAUNCH_DMA, launch_dma |
512                   NVDEF(NVA0B5, LAUNCH_DMA, DATA_TRANSFER_TYPE, NON_PIPELINED) |
513                   NVDEF(NVA0B5, LAUNCH_DMA, FLUSH_ENABLE, TRUE) |
514                   NVDEF(NVA0B5, LAUNCH_DMA, SEMAPHORE_TYPE, NONE) |
515                   NVDEF(NVA0B5, LAUNCH_DMA, INTERRUPT_TYPE, NONE) |
516                   NVDEF(NVA0B5, LAUNCH_DMA, SRC_MEMORY_LAYOUT, PITCH) |
517                   NVDEF(NVA0B5, LAUNCH_DMA, DST_MEMORY_LAYOUT, PITCH) |
518                   NVDEF(NVA0B5, LAUNCH_DMA, MULTI_LINE_ENABLE, FALSE) |
519                   NVDEF(NVA0B5, LAUNCH_DMA, REMAP_ENABLE, TRUE) |
520                   NVDEF(NVA0B5, LAUNCH_DMA, BYPASS_L2, USE_PTE_SETTING));
521         return 0;
522 }
523
524 static int
525 nouveau_dmem_migrate_init(struct nouveau_drm *drm)
526 {
527         switch (drm->ttm.copy.oclass) {
528         case PASCAL_DMA_COPY_A:
529         case PASCAL_DMA_COPY_B:
530         case  VOLTA_DMA_COPY_A:
531         case TURING_DMA_COPY_A:
532                 drm->dmem->migrate.copy_func = nvc0b5_migrate_copy;
533                 drm->dmem->migrate.clear_func = nvc0b5_migrate_clear;
534                 drm->dmem->migrate.chan = drm->ttm.chan;
535                 return 0;
536         default:
537                 break;
538         }
539         return -ENODEV;
540 }
541
542 void
543 nouveau_dmem_init(struct nouveau_drm *drm)
544 {
545         int ret;
546
547         /* This only make sense on PASCAL or newer */
548         if (drm->client.device.info.family < NV_DEVICE_INFO_V0_PASCAL)
549                 return;
550
551         if (!(drm->dmem = kzalloc(sizeof(*drm->dmem), GFP_KERNEL)))
552                 return;
553
554         drm->dmem->drm = drm;
555         mutex_init(&drm->dmem->mutex);
556         INIT_LIST_HEAD(&drm->dmem->chunks);
557         mutex_init(&drm->dmem->mutex);
558         spin_lock_init(&drm->dmem->lock);
559
560         /* Initialize migration dma helpers before registering memory */
561         ret = nouveau_dmem_migrate_init(drm);
562         if (ret) {
563                 kfree(drm->dmem);
564                 drm->dmem = NULL;
565         }
566 }
567
568 static unsigned long nouveau_dmem_migrate_copy_one(struct nouveau_drm *drm,
569                 struct nouveau_svmm *svmm, unsigned long src,
570                 dma_addr_t *dma_addr, u64 *pfn)
571 {
572         struct device *dev = drm->dev->dev;
573         struct page *dpage, *spage;
574         unsigned long paddr;
575
576         spage = migrate_pfn_to_page(src);
577         if (!(src & MIGRATE_PFN_MIGRATE))
578                 goto out;
579
580         dpage = nouveau_dmem_page_alloc_locked(drm);
581         if (!dpage)
582                 goto out;
583
584         paddr = nouveau_dmem_page_addr(dpage);
585         if (spage) {
586                 *dma_addr = dma_map_page(dev, spage, 0, page_size(spage),
587                                          DMA_BIDIRECTIONAL);
588                 if (dma_mapping_error(dev, *dma_addr))
589                         goto out_free_page;
590                 if (drm->dmem->migrate.copy_func(drm, 1,
591                         NOUVEAU_APER_VRAM, paddr, NOUVEAU_APER_HOST, *dma_addr))
592                         goto out_dma_unmap;
593         } else {
594                 *dma_addr = DMA_MAPPING_ERROR;
595                 if (drm->dmem->migrate.clear_func(drm, page_size(dpage),
596                         NOUVEAU_APER_VRAM, paddr))
597                         goto out_free_page;
598         }
599
600         dpage->zone_device_data = svmm;
601         *pfn = NVIF_VMM_PFNMAP_V0_V | NVIF_VMM_PFNMAP_V0_VRAM |
602                 ((paddr >> PAGE_SHIFT) << NVIF_VMM_PFNMAP_V0_ADDR_SHIFT);
603         if (src & MIGRATE_PFN_WRITE)
604                 *pfn |= NVIF_VMM_PFNMAP_V0_W;
605         return migrate_pfn(page_to_pfn(dpage)) | MIGRATE_PFN_LOCKED;
606
607 out_dma_unmap:
608         dma_unmap_page(dev, *dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
609 out_free_page:
610         nouveau_dmem_page_free_locked(drm, dpage);
611 out:
612         *pfn = NVIF_VMM_PFNMAP_V0_NONE;
613         return 0;
614 }
615
616 static void nouveau_dmem_migrate_chunk(struct nouveau_drm *drm,
617                 struct nouveau_svmm *svmm, struct migrate_vma *args,
618                 dma_addr_t *dma_addrs, u64 *pfns)
619 {
620         struct nouveau_fence *fence;
621         unsigned long addr = args->start, nr_dma = 0, i;
622
623         for (i = 0; addr < args->end; i++) {
624                 args->dst[i] = nouveau_dmem_migrate_copy_one(drm, svmm,
625                                 args->src[i], dma_addrs + nr_dma, pfns + i);
626                 if (!dma_mapping_error(drm->dev->dev, dma_addrs[nr_dma]))
627                         nr_dma++;
628                 addr += PAGE_SIZE;
629         }
630
631         nouveau_fence_new(drm->dmem->migrate.chan, false, &fence);
632         migrate_vma_pages(args);
633         nouveau_dmem_fence_done(&fence);
634         nouveau_pfns_map(svmm, args->vma->vm_mm, args->start, pfns, i);
635
636         while (nr_dma--) {
637                 dma_unmap_page(drm->dev->dev, dma_addrs[nr_dma], PAGE_SIZE,
638                                 DMA_BIDIRECTIONAL);
639         }
640         migrate_vma_finalize(args);
641 }
642
643 int
644 nouveau_dmem_migrate_vma(struct nouveau_drm *drm,
645                          struct nouveau_svmm *svmm,
646                          struct vm_area_struct *vma,
647                          unsigned long start,
648                          unsigned long end)
649 {
650         unsigned long npages = (end - start) >> PAGE_SHIFT;
651         unsigned long max = min(SG_MAX_SINGLE_ALLOC, npages);
652         dma_addr_t *dma_addrs;
653         struct migrate_vma args = {
654                 .vma            = vma,
655                 .start          = start,
656                 .pgmap_owner    = drm->dev,
657                 .flags          = MIGRATE_VMA_SELECT_SYSTEM,
658         };
659         unsigned long i;
660         u64 *pfns;
661         int ret = -ENOMEM;
662
663         if (drm->dmem == NULL)
664                 return -ENODEV;
665
666         args.src = kcalloc(max, sizeof(*args.src), GFP_KERNEL);
667         if (!args.src)
668                 goto out;
669         args.dst = kcalloc(max, sizeof(*args.dst), GFP_KERNEL);
670         if (!args.dst)
671                 goto out_free_src;
672
673         dma_addrs = kmalloc_array(max, sizeof(*dma_addrs), GFP_KERNEL);
674         if (!dma_addrs)
675                 goto out_free_dst;
676
677         pfns = nouveau_pfns_alloc(max);
678         if (!pfns)
679                 goto out_free_dma;
680
681         for (i = 0; i < npages; i += max) {
682                 args.end = start + (max << PAGE_SHIFT);
683                 ret = migrate_vma_setup(&args);
684                 if (ret)
685                         goto out_free_pfns;
686
687                 if (args.cpages)
688                         nouveau_dmem_migrate_chunk(drm, svmm, &args, dma_addrs,
689                                                    pfns);
690                 args.start = args.end;
691         }
692
693         ret = 0;
694 out_free_pfns:
695         nouveau_pfns_free(pfns);
696 out_free_dma:
697         kfree(dma_addrs);
698 out_free_dst:
699         kfree(args.dst);
700 out_free_src:
701         kfree(args.src);
702 out:
703         return ret;
704 }