drm/nouveau/svm: convert migrate_clear to new push macros
[linux-2.6-microblaze.git] / drivers / gpu / drm / nouveau / nouveau_dmem.c
1 /*
2  * Copyright 2018 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22 #include "nouveau_dmem.h"
23 #include "nouveau_drv.h"
24 #include "nouveau_chan.h"
25 #include "nouveau_dma.h"
26 #include "nouveau_mem.h"
27 #include "nouveau_bo.h"
28 #include "nouveau_svm.h"
29
30 #include <nvif/class.h>
31 #include <nvif/object.h>
32 #include <nvif/push906f.h>
33 #include <nvif/if000c.h>
34 #include <nvif/if500b.h>
35 #include <nvif/if900b.h>
36 #include <nvif/if000c.h>
37
38 #include <linux/sched/mm.h>
39 #include <linux/hmm.h>
40
41 /*
42  * FIXME: this is ugly right now we are using TTM to allocate vram and we pin
43  * it in vram while in use. We likely want to overhaul memory management for
44  * nouveau to be more page like (not necessarily with system page size but a
45  * bigger page size) at lowest level and have some shim layer on top that would
46  * provide the same functionality as TTM.
47  */
48 #define DMEM_CHUNK_SIZE (2UL << 20)
49 #define DMEM_CHUNK_NPAGES (DMEM_CHUNK_SIZE >> PAGE_SHIFT)
50
51 enum nouveau_aper {
52         NOUVEAU_APER_VIRT,
53         NOUVEAU_APER_VRAM,
54         NOUVEAU_APER_HOST,
55 };
56
57 typedef int (*nouveau_migrate_copy_t)(struct nouveau_drm *drm, u64 npages,
58                                       enum nouveau_aper, u64 dst_addr,
59                                       enum nouveau_aper, u64 src_addr);
60 typedef int (*nouveau_clear_page_t)(struct nouveau_drm *drm, u32 length,
61                                       enum nouveau_aper, u64 dst_addr);
62
63 struct nouveau_dmem_chunk {
64         struct list_head list;
65         struct nouveau_bo *bo;
66         struct nouveau_drm *drm;
67         unsigned long callocated;
68         struct dev_pagemap pagemap;
69 };
70
71 struct nouveau_dmem_migrate {
72         nouveau_migrate_copy_t copy_func;
73         nouveau_clear_page_t clear_func;
74         struct nouveau_channel *chan;
75 };
76
77 struct nouveau_dmem {
78         struct nouveau_drm *drm;
79         struct nouveau_dmem_migrate migrate;
80         struct list_head chunks;
81         struct mutex mutex;
82         struct page *free_pages;
83         spinlock_t lock;
84 };
85
86 static struct nouveau_dmem_chunk *nouveau_page_to_chunk(struct page *page)
87 {
88         return container_of(page->pgmap, struct nouveau_dmem_chunk, pagemap);
89 }
90
91 static struct nouveau_drm *page_to_drm(struct page *page)
92 {
93         struct nouveau_dmem_chunk *chunk = nouveau_page_to_chunk(page);
94
95         return chunk->drm;
96 }
97
98 unsigned long nouveau_dmem_page_addr(struct page *page)
99 {
100         struct nouveau_dmem_chunk *chunk = nouveau_page_to_chunk(page);
101         unsigned long off = (page_to_pfn(page) << PAGE_SHIFT) -
102                                 chunk->pagemap.res.start;
103
104         return chunk->bo->offset + off;
105 }
106
107 static void nouveau_dmem_page_free(struct page *page)
108 {
109         struct nouveau_dmem_chunk *chunk = nouveau_page_to_chunk(page);
110         struct nouveau_dmem *dmem = chunk->drm->dmem;
111
112         spin_lock(&dmem->lock);
113         page->zone_device_data = dmem->free_pages;
114         dmem->free_pages = page;
115
116         WARN_ON(!chunk->callocated);
117         chunk->callocated--;
118         /*
119          * FIXME when chunk->callocated reach 0 we should add the chunk to
120          * a reclaim list so that it can be freed in case of memory pressure.
121          */
122         spin_unlock(&dmem->lock);
123 }
124
125 static void nouveau_dmem_fence_done(struct nouveau_fence **fence)
126 {
127         if (fence) {
128                 nouveau_fence_wait(*fence, true, false);
129                 nouveau_fence_unref(fence);
130         } else {
131                 /*
132                  * FIXME wait for channel to be IDLE before calling finalizing
133                  * the hmem object.
134                  */
135         }
136 }
137
138 static vm_fault_t nouveau_dmem_fault_copy_one(struct nouveau_drm *drm,
139                 struct vm_fault *vmf, struct migrate_vma *args,
140                 dma_addr_t *dma_addr)
141 {
142         struct device *dev = drm->dev->dev;
143         struct page *dpage, *spage;
144
145         spage = migrate_pfn_to_page(args->src[0]);
146         if (!spage || !(args->src[0] & MIGRATE_PFN_MIGRATE))
147                 return 0;
148
149         dpage = alloc_page_vma(GFP_HIGHUSER, vmf->vma, vmf->address);
150         if (!dpage)
151                 return VM_FAULT_SIGBUS;
152         lock_page(dpage);
153
154         *dma_addr = dma_map_page(dev, dpage, 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
155         if (dma_mapping_error(dev, *dma_addr))
156                 goto error_free_page;
157
158         if (drm->dmem->migrate.copy_func(drm, 1, NOUVEAU_APER_HOST, *dma_addr,
159                         NOUVEAU_APER_VRAM, nouveau_dmem_page_addr(spage)))
160                 goto error_dma_unmap;
161
162         args->dst[0] = migrate_pfn(page_to_pfn(dpage)) | MIGRATE_PFN_LOCKED;
163         return 0;
164
165 error_dma_unmap:
166         dma_unmap_page(dev, *dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
167 error_free_page:
168         __free_page(dpage);
169         return VM_FAULT_SIGBUS;
170 }
171
172 static vm_fault_t nouveau_dmem_migrate_to_ram(struct vm_fault *vmf)
173 {
174         struct nouveau_drm *drm = page_to_drm(vmf->page);
175         struct nouveau_dmem *dmem = drm->dmem;
176         struct nouveau_fence *fence;
177         unsigned long src = 0, dst = 0;
178         dma_addr_t dma_addr = 0;
179         vm_fault_t ret;
180         struct migrate_vma args = {
181                 .vma            = vmf->vma,
182                 .start          = vmf->address,
183                 .end            = vmf->address + PAGE_SIZE,
184                 .src            = &src,
185                 .dst            = &dst,
186                 .src_owner      = drm->dev,
187         };
188
189         /*
190          * FIXME what we really want is to find some heuristic to migrate more
191          * than just one page on CPU fault. When such fault happens it is very
192          * likely that more surrounding page will CPU fault too.
193          */
194         if (migrate_vma_setup(&args) < 0)
195                 return VM_FAULT_SIGBUS;
196         if (!args.cpages)
197                 return 0;
198
199         ret = nouveau_dmem_fault_copy_one(drm, vmf, &args, &dma_addr);
200         if (ret || dst == 0)
201                 goto done;
202
203         nouveau_fence_new(dmem->migrate.chan, false, &fence);
204         migrate_vma_pages(&args);
205         nouveau_dmem_fence_done(&fence);
206         dma_unmap_page(drm->dev->dev, dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
207 done:
208         migrate_vma_finalize(&args);
209         return ret;
210 }
211
212 static const struct dev_pagemap_ops nouveau_dmem_pagemap_ops = {
213         .page_free              = nouveau_dmem_page_free,
214         .migrate_to_ram         = nouveau_dmem_migrate_to_ram,
215 };
216
217 static int
218 nouveau_dmem_chunk_alloc(struct nouveau_drm *drm, struct page **ppage)
219 {
220         struct nouveau_dmem_chunk *chunk;
221         struct resource *res;
222         struct page *page;
223         void *ptr;
224         unsigned long i, pfn_first;
225         int ret;
226
227         chunk = kzalloc(sizeof(*chunk), GFP_KERNEL);
228         if (chunk == NULL) {
229                 ret = -ENOMEM;
230                 goto out;
231         }
232
233         /* Allocate unused physical address space for device private pages. */
234         res = request_free_mem_region(&iomem_resource, DMEM_CHUNK_SIZE,
235                                       "nouveau_dmem");
236         if (IS_ERR(res)) {
237                 ret = PTR_ERR(res);
238                 goto out_free;
239         }
240
241         chunk->drm = drm;
242         chunk->pagemap.type = MEMORY_DEVICE_PRIVATE;
243         chunk->pagemap.res = *res;
244         chunk->pagemap.ops = &nouveau_dmem_pagemap_ops;
245         chunk->pagemap.owner = drm->dev;
246
247         ret = nouveau_bo_new(&drm->client, DMEM_CHUNK_SIZE, 0,
248                              TTM_PL_FLAG_VRAM, 0, 0, NULL, NULL,
249                              &chunk->bo);
250         if (ret)
251                 goto out_release;
252
253         ret = nouveau_bo_pin(chunk->bo, TTM_PL_FLAG_VRAM, false);
254         if (ret)
255                 goto out_bo_free;
256
257         ptr = memremap_pages(&chunk->pagemap, numa_node_id());
258         if (IS_ERR(ptr)) {
259                 ret = PTR_ERR(ptr);
260                 goto out_bo_unpin;
261         }
262
263         mutex_lock(&drm->dmem->mutex);
264         list_add(&chunk->list, &drm->dmem->chunks);
265         mutex_unlock(&drm->dmem->mutex);
266
267         pfn_first = chunk->pagemap.res.start >> PAGE_SHIFT;
268         page = pfn_to_page(pfn_first);
269         spin_lock(&drm->dmem->lock);
270         for (i = 0; i < DMEM_CHUNK_NPAGES - 1; ++i, ++page) {
271                 page->zone_device_data = drm->dmem->free_pages;
272                 drm->dmem->free_pages = page;
273         }
274         *ppage = page;
275         chunk->callocated++;
276         spin_unlock(&drm->dmem->lock);
277
278         NV_INFO(drm, "DMEM: registered %ldMB of device memory\n",
279                 DMEM_CHUNK_SIZE >> 20);
280
281         return 0;
282
283 out_bo_unpin:
284         nouveau_bo_unpin(chunk->bo);
285 out_bo_free:
286         nouveau_bo_ref(NULL, &chunk->bo);
287 out_release:
288         release_mem_region(chunk->pagemap.res.start,
289                            resource_size(&chunk->pagemap.res));
290 out_free:
291         kfree(chunk);
292 out:
293         return ret;
294 }
295
296 static struct page *
297 nouveau_dmem_page_alloc_locked(struct nouveau_drm *drm)
298 {
299         struct nouveau_dmem_chunk *chunk;
300         struct page *page = NULL;
301         int ret;
302
303         spin_lock(&drm->dmem->lock);
304         if (drm->dmem->free_pages) {
305                 page = drm->dmem->free_pages;
306                 drm->dmem->free_pages = page->zone_device_data;
307                 chunk = nouveau_page_to_chunk(page);
308                 chunk->callocated++;
309                 spin_unlock(&drm->dmem->lock);
310         } else {
311                 spin_unlock(&drm->dmem->lock);
312                 ret = nouveau_dmem_chunk_alloc(drm, &page);
313                 if (ret)
314                         return NULL;
315         }
316
317         get_page(page);
318         lock_page(page);
319         return page;
320 }
321
322 static void
323 nouveau_dmem_page_free_locked(struct nouveau_drm *drm, struct page *page)
324 {
325         unlock_page(page);
326         put_page(page);
327 }
328
329 void
330 nouveau_dmem_resume(struct nouveau_drm *drm)
331 {
332         struct nouveau_dmem_chunk *chunk;
333         int ret;
334
335         if (drm->dmem == NULL)
336                 return;
337
338         mutex_lock(&drm->dmem->mutex);
339         list_for_each_entry(chunk, &drm->dmem->chunks, list) {
340                 ret = nouveau_bo_pin(chunk->bo, TTM_PL_FLAG_VRAM, false);
341                 /* FIXME handle pin failure */
342                 WARN_ON(ret);
343         }
344         mutex_unlock(&drm->dmem->mutex);
345 }
346
347 void
348 nouveau_dmem_suspend(struct nouveau_drm *drm)
349 {
350         struct nouveau_dmem_chunk *chunk;
351
352         if (drm->dmem == NULL)
353                 return;
354
355         mutex_lock(&drm->dmem->mutex);
356         list_for_each_entry(chunk, &drm->dmem->chunks, list)
357                 nouveau_bo_unpin(chunk->bo);
358         mutex_unlock(&drm->dmem->mutex);
359 }
360
361 void
362 nouveau_dmem_fini(struct nouveau_drm *drm)
363 {
364         struct nouveau_dmem_chunk *chunk, *tmp;
365
366         if (drm->dmem == NULL)
367                 return;
368
369         mutex_lock(&drm->dmem->mutex);
370
371         list_for_each_entry_safe(chunk, tmp, &drm->dmem->chunks, list) {
372                 nouveau_bo_unpin(chunk->bo);
373                 nouveau_bo_ref(NULL, &chunk->bo);
374                 list_del(&chunk->list);
375                 memunmap_pages(&chunk->pagemap);
376                 release_mem_region(chunk->pagemap.res.start,
377                                    resource_size(&chunk->pagemap.res));
378                 kfree(chunk);
379         }
380
381         mutex_unlock(&drm->dmem->mutex);
382 }
383
384 static int
385 nvc0b5_migrate_copy(struct nouveau_drm *drm, u64 npages,
386                     enum nouveau_aper dst_aper, u64 dst_addr,
387                     enum nouveau_aper src_aper, u64 src_addr)
388 {
389         struct nvif_push *push = drm->dmem->migrate.chan->chan.push;
390         u32 launch_dma = (1 << 9) /* MULTI_LINE_ENABLE. */ |
391                          (1 << 8) /* DST_MEMORY_LAYOUT_PITCH. */ |
392                          (1 << 7) /* SRC_MEMORY_LAYOUT_PITCH. */ |
393                          (1 << 2) /* FLUSH_ENABLE_TRUE. */ |
394                          (2 << 0) /* DATA_TRANSFER_TYPE_NON_PIPELINED. */;
395         int ret;
396
397         ret = PUSH_WAIT(push, 13);
398         if (ret)
399                 return ret;
400
401         if (src_aper != NOUVEAU_APER_VIRT) {
402                 switch (src_aper) {
403                 case NOUVEAU_APER_VRAM:
404                         PUSH_NVIM(push, NVA0B5, 0x0260, 0);
405                         break;
406                 case NOUVEAU_APER_HOST:
407                         PUSH_NVIM(push, NVA0B5, 0x0260, 1);
408                         break;
409                 default:
410                         return -EINVAL;
411                 }
412                 launch_dma |= 0x00001000; /* SRC_TYPE_PHYSICAL. */
413         }
414
415         if (dst_aper != NOUVEAU_APER_VIRT) {
416                 switch (dst_aper) {
417                 case NOUVEAU_APER_VRAM:
418                         PUSH_NVIM(push, NVA0B5, 0x0264, 0);
419                         break;
420                 case NOUVEAU_APER_HOST:
421                         PUSH_NVIM(push, NVA0B5, 0x0264, 1);
422                         break;
423                 default:
424                         return -EINVAL;
425                 }
426                 launch_dma |= 0x00002000; /* DST_TYPE_PHYSICAL. */
427         }
428
429         PUSH_NVSQ(push, NVA0B5, 0x0400, upper_32_bits(src_addr),
430                                 0x0404, lower_32_bits(src_addr),
431                                 0x0408, upper_32_bits(dst_addr),
432                                 0x040c, lower_32_bits(dst_addr),
433                                 0x0410, PAGE_SIZE,
434                                 0x0414, PAGE_SIZE,
435                                 0x0418, PAGE_SIZE,
436                                 0x041c, npages);
437         PUSH_NVSQ(push, NVA0B5, 0x0300, launch_dma);
438         return 0;
439 }
440
441 static int
442 nvc0b5_migrate_clear(struct nouveau_drm *drm, u32 length,
443                      enum nouveau_aper dst_aper, u64 dst_addr)
444 {
445         struct nvif_push *push = drm->dmem->migrate.chan->chan.push;
446         u32 launch_dma = (1 << 10) /* REMAP_ENABLE_TRUE */ |
447                          (1 << 8) /* DST_MEMORY_LAYOUT_PITCH. */ |
448                          (1 << 7) /* SRC_MEMORY_LAYOUT_PITCH. */ |
449                          (1 << 2) /* FLUSH_ENABLE_TRUE. */ |
450                          (2 << 0) /* DATA_TRANSFER_TYPE_NON_PIPELINED. */;
451         u32 remap = (4 <<  0) /* DST_X_CONST_A */ |
452                     (5 <<  4) /* DST_Y_CONST_B */ |
453                     (3 << 16) /* COMPONENT_SIZE_FOUR */ |
454                     (1 << 24) /* NUM_DST_COMPONENTS_TWO */;
455         int ret;
456
457         ret = PUSH_WAIT(push, 12);
458         if (ret)
459                 return ret;
460
461         switch (dst_aper) {
462         case NOUVEAU_APER_VRAM:
463                 PUSH_NVIM(push, NVA0B5, 0x0264, 0);
464                 break;
465         case NOUVEAU_APER_HOST:
466                 PUSH_NVIM(push, NVA0B5, 0x0264, 1);
467                 break;
468         default:
469                 return -EINVAL;
470         }
471         launch_dma |= 0x00002000; /* DST_TYPE_PHYSICAL. */
472
473         PUSH_NVSQ(push, NVA0B5, 0x0700, 0,
474                                 0x0704, 0,
475                                 0x0708, remap);
476         PUSH_NVSQ(push, NVA0B5, 0x0408, upper_32_bits(dst_addr),
477                                 0x040c, lower_32_bits(dst_addr));
478         PUSH_NVSQ(push, NVA0B5, 0x0418, length >> 3);
479         PUSH_NVSQ(push, NVA0B5, 0x0300, launch_dma);
480         return 0;
481 }
482
483 static int
484 nouveau_dmem_migrate_init(struct nouveau_drm *drm)
485 {
486         switch (drm->ttm.copy.oclass) {
487         case PASCAL_DMA_COPY_A:
488         case PASCAL_DMA_COPY_B:
489         case  VOLTA_DMA_COPY_A:
490         case TURING_DMA_COPY_A:
491                 drm->dmem->migrate.copy_func = nvc0b5_migrate_copy;
492                 drm->dmem->migrate.clear_func = nvc0b5_migrate_clear;
493                 drm->dmem->migrate.chan = drm->ttm.chan;
494                 return 0;
495         default:
496                 break;
497         }
498         return -ENODEV;
499 }
500
501 void
502 nouveau_dmem_init(struct nouveau_drm *drm)
503 {
504         int ret;
505
506         /* This only make sense on PASCAL or newer */
507         if (drm->client.device.info.family < NV_DEVICE_INFO_V0_PASCAL)
508                 return;
509
510         if (!(drm->dmem = kzalloc(sizeof(*drm->dmem), GFP_KERNEL)))
511                 return;
512
513         drm->dmem->drm = drm;
514         mutex_init(&drm->dmem->mutex);
515         INIT_LIST_HEAD(&drm->dmem->chunks);
516         mutex_init(&drm->dmem->mutex);
517         spin_lock_init(&drm->dmem->lock);
518
519         /* Initialize migration dma helpers before registering memory */
520         ret = nouveau_dmem_migrate_init(drm);
521         if (ret) {
522                 kfree(drm->dmem);
523                 drm->dmem = NULL;
524         }
525 }
526
527 static unsigned long nouveau_dmem_migrate_copy_one(struct nouveau_drm *drm,
528                 unsigned long src, dma_addr_t *dma_addr, u64 *pfn)
529 {
530         struct device *dev = drm->dev->dev;
531         struct page *dpage, *spage;
532         unsigned long paddr;
533
534         spage = migrate_pfn_to_page(src);
535         if (!(src & MIGRATE_PFN_MIGRATE))
536                 goto out;
537
538         dpage = nouveau_dmem_page_alloc_locked(drm);
539         if (!dpage)
540                 goto out;
541
542         paddr = nouveau_dmem_page_addr(dpage);
543         if (spage) {
544                 *dma_addr = dma_map_page(dev, spage, 0, page_size(spage),
545                                          DMA_BIDIRECTIONAL);
546                 if (dma_mapping_error(dev, *dma_addr))
547                         goto out_free_page;
548                 if (drm->dmem->migrate.copy_func(drm, 1,
549                         NOUVEAU_APER_VRAM, paddr, NOUVEAU_APER_HOST, *dma_addr))
550                         goto out_dma_unmap;
551         } else {
552                 *dma_addr = DMA_MAPPING_ERROR;
553                 if (drm->dmem->migrate.clear_func(drm, page_size(dpage),
554                         NOUVEAU_APER_VRAM, paddr))
555                         goto out_free_page;
556         }
557
558         *pfn = NVIF_VMM_PFNMAP_V0_V | NVIF_VMM_PFNMAP_V0_VRAM |
559                 ((paddr >> PAGE_SHIFT) << NVIF_VMM_PFNMAP_V0_ADDR_SHIFT);
560         if (src & MIGRATE_PFN_WRITE)
561                 *pfn |= NVIF_VMM_PFNMAP_V0_W;
562         return migrate_pfn(page_to_pfn(dpage)) | MIGRATE_PFN_LOCKED;
563
564 out_dma_unmap:
565         dma_unmap_page(dev, *dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
566 out_free_page:
567         nouveau_dmem_page_free_locked(drm, dpage);
568 out:
569         *pfn = NVIF_VMM_PFNMAP_V0_NONE;
570         return 0;
571 }
572
573 static void nouveau_dmem_migrate_chunk(struct nouveau_drm *drm,
574                 struct nouveau_svmm *svmm, struct migrate_vma *args,
575                 dma_addr_t *dma_addrs, u64 *pfns)
576 {
577         struct nouveau_fence *fence;
578         unsigned long addr = args->start, nr_dma = 0, i;
579
580         for (i = 0; addr < args->end; i++) {
581                 args->dst[i] = nouveau_dmem_migrate_copy_one(drm, args->src[i],
582                                 dma_addrs + nr_dma, pfns + i);
583                 if (!dma_mapping_error(drm->dev->dev, dma_addrs[nr_dma]))
584                         nr_dma++;
585                 addr += PAGE_SIZE;
586         }
587
588         nouveau_fence_new(drm->dmem->migrate.chan, false, &fence);
589         migrate_vma_pages(args);
590         nouveau_dmem_fence_done(&fence);
591         nouveau_pfns_map(svmm, args->vma->vm_mm, args->start, pfns, i);
592
593         while (nr_dma--) {
594                 dma_unmap_page(drm->dev->dev, dma_addrs[nr_dma], PAGE_SIZE,
595                                 DMA_BIDIRECTIONAL);
596         }
597         migrate_vma_finalize(args);
598 }
599
600 int
601 nouveau_dmem_migrate_vma(struct nouveau_drm *drm,
602                          struct nouveau_svmm *svmm,
603                          struct vm_area_struct *vma,
604                          unsigned long start,
605                          unsigned long end)
606 {
607         unsigned long npages = (end - start) >> PAGE_SHIFT;
608         unsigned long max = min(SG_MAX_SINGLE_ALLOC, npages);
609         dma_addr_t *dma_addrs;
610         struct migrate_vma args = {
611                 .vma            = vma,
612                 .start          = start,
613         };
614         unsigned long i;
615         u64 *pfns;
616         int ret = -ENOMEM;
617
618         if (drm->dmem == NULL)
619                 return -ENODEV;
620
621         args.src = kcalloc(max, sizeof(*args.src), GFP_KERNEL);
622         if (!args.src)
623                 goto out;
624         args.dst = kcalloc(max, sizeof(*args.dst), GFP_KERNEL);
625         if (!args.dst)
626                 goto out_free_src;
627
628         dma_addrs = kmalloc_array(max, sizeof(*dma_addrs), GFP_KERNEL);
629         if (!dma_addrs)
630                 goto out_free_dst;
631
632         pfns = nouveau_pfns_alloc(max);
633         if (!pfns)
634                 goto out_free_dma;
635
636         for (i = 0; i < npages; i += max) {
637                 args.end = start + (max << PAGE_SHIFT);
638                 ret = migrate_vma_setup(&args);
639                 if (ret)
640                         goto out_free_pfns;
641
642                 if (args.cpages)
643                         nouveau_dmem_migrate_chunk(drm, svmm, &args, dma_addrs,
644                                                    pfns);
645                 args.start = args.end;
646         }
647
648         ret = 0;
649 out_free_pfns:
650         nouveau_pfns_free(pfns);
651 out_free_dma:
652         kfree(dma_addrs);
653 out_free_dst:
654         kfree(args.dst);
655 out_free_src:
656         kfree(args.src);
657 out:
658         return ret;
659 }