2 * Copyright 2018 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
22 #include "nouveau_dmem.h"
23 #include "nouveau_drv.h"
24 #include "nouveau_chan.h"
25 #include "nouveau_dma.h"
26 #include "nouveau_mem.h"
27 #include "nouveau_bo.h"
28 #include "nouveau_svm.h"
30 #include <nvif/class.h>
31 #include <nvif/object.h>
32 #include <nvif/push906f.h>
33 #include <nvif/if000c.h>
34 #include <nvif/if500b.h>
35 #include <nvif/if900b.h>
36 #include <nvif/if000c.h>
38 #include <linux/sched/mm.h>
39 #include <linux/hmm.h>
42 * FIXME: this is ugly right now we are using TTM to allocate vram and we pin
43 * it in vram while in use. We likely want to overhaul memory management for
44 * nouveau to be more page like (not necessarily with system page size but a
45 * bigger page size) at lowest level and have some shim layer on top that would
46 * provide the same functionality as TTM.
48 #define DMEM_CHUNK_SIZE (2UL << 20)
49 #define DMEM_CHUNK_NPAGES (DMEM_CHUNK_SIZE >> PAGE_SHIFT)
57 typedef int (*nouveau_migrate_copy_t)(struct nouveau_drm *drm, u64 npages,
58 enum nouveau_aper, u64 dst_addr,
59 enum nouveau_aper, u64 src_addr);
60 typedef int (*nouveau_clear_page_t)(struct nouveau_drm *drm, u32 length,
61 enum nouveau_aper, u64 dst_addr);
63 struct nouveau_dmem_chunk {
64 struct list_head list;
65 struct nouveau_bo *bo;
66 struct nouveau_drm *drm;
67 unsigned long callocated;
68 struct dev_pagemap pagemap;
71 struct nouveau_dmem_migrate {
72 nouveau_migrate_copy_t copy_func;
73 nouveau_clear_page_t clear_func;
74 struct nouveau_channel *chan;
78 struct nouveau_drm *drm;
79 struct nouveau_dmem_migrate migrate;
80 struct list_head chunks;
82 struct page *free_pages;
86 static struct nouveau_dmem_chunk *nouveau_page_to_chunk(struct page *page)
88 return container_of(page->pgmap, struct nouveau_dmem_chunk, pagemap);
91 static struct nouveau_drm *page_to_drm(struct page *page)
93 struct nouveau_dmem_chunk *chunk = nouveau_page_to_chunk(page);
98 unsigned long nouveau_dmem_page_addr(struct page *page)
100 struct nouveau_dmem_chunk *chunk = nouveau_page_to_chunk(page);
101 unsigned long off = (page_to_pfn(page) << PAGE_SHIFT) -
102 chunk->pagemap.res.start;
104 return chunk->bo->offset + off;
107 static void nouveau_dmem_page_free(struct page *page)
109 struct nouveau_dmem_chunk *chunk = nouveau_page_to_chunk(page);
110 struct nouveau_dmem *dmem = chunk->drm->dmem;
112 spin_lock(&dmem->lock);
113 page->zone_device_data = dmem->free_pages;
114 dmem->free_pages = page;
116 WARN_ON(!chunk->callocated);
119 * FIXME when chunk->callocated reach 0 we should add the chunk to
120 * a reclaim list so that it can be freed in case of memory pressure.
122 spin_unlock(&dmem->lock);
125 static void nouveau_dmem_fence_done(struct nouveau_fence **fence)
128 nouveau_fence_wait(*fence, true, false);
129 nouveau_fence_unref(fence);
132 * FIXME wait for channel to be IDLE before calling finalizing
138 static vm_fault_t nouveau_dmem_fault_copy_one(struct nouveau_drm *drm,
139 struct vm_fault *vmf, struct migrate_vma *args,
140 dma_addr_t *dma_addr)
142 struct device *dev = drm->dev->dev;
143 struct page *dpage, *spage;
145 spage = migrate_pfn_to_page(args->src[0]);
146 if (!spage || !(args->src[0] & MIGRATE_PFN_MIGRATE))
149 dpage = alloc_page_vma(GFP_HIGHUSER, vmf->vma, vmf->address);
151 return VM_FAULT_SIGBUS;
154 *dma_addr = dma_map_page(dev, dpage, 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
155 if (dma_mapping_error(dev, *dma_addr))
156 goto error_free_page;
158 if (drm->dmem->migrate.copy_func(drm, 1, NOUVEAU_APER_HOST, *dma_addr,
159 NOUVEAU_APER_VRAM, nouveau_dmem_page_addr(spage)))
160 goto error_dma_unmap;
162 args->dst[0] = migrate_pfn(page_to_pfn(dpage)) | MIGRATE_PFN_LOCKED;
166 dma_unmap_page(dev, *dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
169 return VM_FAULT_SIGBUS;
172 static vm_fault_t nouveau_dmem_migrate_to_ram(struct vm_fault *vmf)
174 struct nouveau_drm *drm = page_to_drm(vmf->page);
175 struct nouveau_dmem *dmem = drm->dmem;
176 struct nouveau_fence *fence;
177 unsigned long src = 0, dst = 0;
178 dma_addr_t dma_addr = 0;
180 struct migrate_vma args = {
182 .start = vmf->address,
183 .end = vmf->address + PAGE_SIZE,
186 .src_owner = drm->dev,
190 * FIXME what we really want is to find some heuristic to migrate more
191 * than just one page on CPU fault. When such fault happens it is very
192 * likely that more surrounding page will CPU fault too.
194 if (migrate_vma_setup(&args) < 0)
195 return VM_FAULT_SIGBUS;
199 ret = nouveau_dmem_fault_copy_one(drm, vmf, &args, &dma_addr);
203 nouveau_fence_new(dmem->migrate.chan, false, &fence);
204 migrate_vma_pages(&args);
205 nouveau_dmem_fence_done(&fence);
206 dma_unmap_page(drm->dev->dev, dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
208 migrate_vma_finalize(&args);
212 static const struct dev_pagemap_ops nouveau_dmem_pagemap_ops = {
213 .page_free = nouveau_dmem_page_free,
214 .migrate_to_ram = nouveau_dmem_migrate_to_ram,
218 nouveau_dmem_chunk_alloc(struct nouveau_drm *drm, struct page **ppage)
220 struct nouveau_dmem_chunk *chunk;
221 struct resource *res;
224 unsigned long i, pfn_first;
227 chunk = kzalloc(sizeof(*chunk), GFP_KERNEL);
233 /* Allocate unused physical address space for device private pages. */
234 res = request_free_mem_region(&iomem_resource, DMEM_CHUNK_SIZE,
242 chunk->pagemap.type = MEMORY_DEVICE_PRIVATE;
243 chunk->pagemap.res = *res;
244 chunk->pagemap.ops = &nouveau_dmem_pagemap_ops;
245 chunk->pagemap.owner = drm->dev;
247 ret = nouveau_bo_new(&drm->client, DMEM_CHUNK_SIZE, 0,
248 TTM_PL_FLAG_VRAM, 0, 0, NULL, NULL,
253 ret = nouveau_bo_pin(chunk->bo, TTM_PL_FLAG_VRAM, false);
257 ptr = memremap_pages(&chunk->pagemap, numa_node_id());
263 mutex_lock(&drm->dmem->mutex);
264 list_add(&chunk->list, &drm->dmem->chunks);
265 mutex_unlock(&drm->dmem->mutex);
267 pfn_first = chunk->pagemap.res.start >> PAGE_SHIFT;
268 page = pfn_to_page(pfn_first);
269 spin_lock(&drm->dmem->lock);
270 for (i = 0; i < DMEM_CHUNK_NPAGES - 1; ++i, ++page) {
271 page->zone_device_data = drm->dmem->free_pages;
272 drm->dmem->free_pages = page;
276 spin_unlock(&drm->dmem->lock);
278 NV_INFO(drm, "DMEM: registered %ldMB of device memory\n",
279 DMEM_CHUNK_SIZE >> 20);
284 nouveau_bo_unpin(chunk->bo);
286 nouveau_bo_ref(NULL, &chunk->bo);
288 release_mem_region(chunk->pagemap.res.start,
289 resource_size(&chunk->pagemap.res));
297 nouveau_dmem_page_alloc_locked(struct nouveau_drm *drm)
299 struct nouveau_dmem_chunk *chunk;
300 struct page *page = NULL;
303 spin_lock(&drm->dmem->lock);
304 if (drm->dmem->free_pages) {
305 page = drm->dmem->free_pages;
306 drm->dmem->free_pages = page->zone_device_data;
307 chunk = nouveau_page_to_chunk(page);
309 spin_unlock(&drm->dmem->lock);
311 spin_unlock(&drm->dmem->lock);
312 ret = nouveau_dmem_chunk_alloc(drm, &page);
323 nouveau_dmem_page_free_locked(struct nouveau_drm *drm, struct page *page)
330 nouveau_dmem_resume(struct nouveau_drm *drm)
332 struct nouveau_dmem_chunk *chunk;
335 if (drm->dmem == NULL)
338 mutex_lock(&drm->dmem->mutex);
339 list_for_each_entry(chunk, &drm->dmem->chunks, list) {
340 ret = nouveau_bo_pin(chunk->bo, TTM_PL_FLAG_VRAM, false);
341 /* FIXME handle pin failure */
344 mutex_unlock(&drm->dmem->mutex);
348 nouveau_dmem_suspend(struct nouveau_drm *drm)
350 struct nouveau_dmem_chunk *chunk;
352 if (drm->dmem == NULL)
355 mutex_lock(&drm->dmem->mutex);
356 list_for_each_entry(chunk, &drm->dmem->chunks, list)
357 nouveau_bo_unpin(chunk->bo);
358 mutex_unlock(&drm->dmem->mutex);
362 nouveau_dmem_fini(struct nouveau_drm *drm)
364 struct nouveau_dmem_chunk *chunk, *tmp;
366 if (drm->dmem == NULL)
369 mutex_lock(&drm->dmem->mutex);
371 list_for_each_entry_safe(chunk, tmp, &drm->dmem->chunks, list) {
372 nouveau_bo_unpin(chunk->bo);
373 nouveau_bo_ref(NULL, &chunk->bo);
374 list_del(&chunk->list);
375 memunmap_pages(&chunk->pagemap);
376 release_mem_region(chunk->pagemap.res.start,
377 resource_size(&chunk->pagemap.res));
381 mutex_unlock(&drm->dmem->mutex);
385 nvc0b5_migrate_copy(struct nouveau_drm *drm, u64 npages,
386 enum nouveau_aper dst_aper, u64 dst_addr,
387 enum nouveau_aper src_aper, u64 src_addr)
389 struct nvif_push *push = drm->dmem->migrate.chan->chan.push;
390 u32 launch_dma = (1 << 9) /* MULTI_LINE_ENABLE. */ |
391 (1 << 8) /* DST_MEMORY_LAYOUT_PITCH. */ |
392 (1 << 7) /* SRC_MEMORY_LAYOUT_PITCH. */ |
393 (1 << 2) /* FLUSH_ENABLE_TRUE. */ |
394 (2 << 0) /* DATA_TRANSFER_TYPE_NON_PIPELINED. */;
397 ret = PUSH_WAIT(push, 13);
401 if (src_aper != NOUVEAU_APER_VIRT) {
403 case NOUVEAU_APER_VRAM:
404 PUSH_NVIM(push, NVA0B5, 0x0260, 0);
406 case NOUVEAU_APER_HOST:
407 PUSH_NVIM(push, NVA0B5, 0x0260, 1);
412 launch_dma |= 0x00001000; /* SRC_TYPE_PHYSICAL. */
415 if (dst_aper != NOUVEAU_APER_VIRT) {
417 case NOUVEAU_APER_VRAM:
418 PUSH_NVIM(push, NVA0B5, 0x0264, 0);
420 case NOUVEAU_APER_HOST:
421 PUSH_NVIM(push, NVA0B5, 0x0264, 1);
426 launch_dma |= 0x00002000; /* DST_TYPE_PHYSICAL. */
429 PUSH_NVSQ(push, NVA0B5, 0x0400, upper_32_bits(src_addr),
430 0x0404, lower_32_bits(src_addr),
431 0x0408, upper_32_bits(dst_addr),
432 0x040c, lower_32_bits(dst_addr),
437 PUSH_NVSQ(push, NVA0B5, 0x0300, launch_dma);
442 nvc0b5_migrate_clear(struct nouveau_drm *drm, u32 length,
443 enum nouveau_aper dst_aper, u64 dst_addr)
445 struct nvif_push *push = drm->dmem->migrate.chan->chan.push;
446 u32 launch_dma = (1 << 10) /* REMAP_ENABLE_TRUE */ |
447 (1 << 8) /* DST_MEMORY_LAYOUT_PITCH. */ |
448 (1 << 7) /* SRC_MEMORY_LAYOUT_PITCH. */ |
449 (1 << 2) /* FLUSH_ENABLE_TRUE. */ |
450 (2 << 0) /* DATA_TRANSFER_TYPE_NON_PIPELINED. */;
451 u32 remap = (4 << 0) /* DST_X_CONST_A */ |
452 (5 << 4) /* DST_Y_CONST_B */ |
453 (3 << 16) /* COMPONENT_SIZE_FOUR */ |
454 (1 << 24) /* NUM_DST_COMPONENTS_TWO */;
457 ret = PUSH_WAIT(push, 12);
462 case NOUVEAU_APER_VRAM:
463 PUSH_NVIM(push, NVA0B5, 0x0264, 0);
465 case NOUVEAU_APER_HOST:
466 PUSH_NVIM(push, NVA0B5, 0x0264, 1);
471 launch_dma |= 0x00002000; /* DST_TYPE_PHYSICAL. */
473 PUSH_NVSQ(push, NVA0B5, 0x0700, 0,
476 PUSH_NVSQ(push, NVA0B5, 0x0408, upper_32_bits(dst_addr),
477 0x040c, lower_32_bits(dst_addr));
478 PUSH_NVSQ(push, NVA0B5, 0x0418, length >> 3);
479 PUSH_NVSQ(push, NVA0B5, 0x0300, launch_dma);
484 nouveau_dmem_migrate_init(struct nouveau_drm *drm)
486 switch (drm->ttm.copy.oclass) {
487 case PASCAL_DMA_COPY_A:
488 case PASCAL_DMA_COPY_B:
489 case VOLTA_DMA_COPY_A:
490 case TURING_DMA_COPY_A:
491 drm->dmem->migrate.copy_func = nvc0b5_migrate_copy;
492 drm->dmem->migrate.clear_func = nvc0b5_migrate_clear;
493 drm->dmem->migrate.chan = drm->ttm.chan;
502 nouveau_dmem_init(struct nouveau_drm *drm)
506 /* This only make sense on PASCAL or newer */
507 if (drm->client.device.info.family < NV_DEVICE_INFO_V0_PASCAL)
510 if (!(drm->dmem = kzalloc(sizeof(*drm->dmem), GFP_KERNEL)))
513 drm->dmem->drm = drm;
514 mutex_init(&drm->dmem->mutex);
515 INIT_LIST_HEAD(&drm->dmem->chunks);
516 mutex_init(&drm->dmem->mutex);
517 spin_lock_init(&drm->dmem->lock);
519 /* Initialize migration dma helpers before registering memory */
520 ret = nouveau_dmem_migrate_init(drm);
527 static unsigned long nouveau_dmem_migrate_copy_one(struct nouveau_drm *drm,
528 unsigned long src, dma_addr_t *dma_addr, u64 *pfn)
530 struct device *dev = drm->dev->dev;
531 struct page *dpage, *spage;
534 spage = migrate_pfn_to_page(src);
535 if (!(src & MIGRATE_PFN_MIGRATE))
538 dpage = nouveau_dmem_page_alloc_locked(drm);
542 paddr = nouveau_dmem_page_addr(dpage);
544 *dma_addr = dma_map_page(dev, spage, 0, page_size(spage),
546 if (dma_mapping_error(dev, *dma_addr))
548 if (drm->dmem->migrate.copy_func(drm, 1,
549 NOUVEAU_APER_VRAM, paddr, NOUVEAU_APER_HOST, *dma_addr))
552 *dma_addr = DMA_MAPPING_ERROR;
553 if (drm->dmem->migrate.clear_func(drm, page_size(dpage),
554 NOUVEAU_APER_VRAM, paddr))
558 *pfn = NVIF_VMM_PFNMAP_V0_V | NVIF_VMM_PFNMAP_V0_VRAM |
559 ((paddr >> PAGE_SHIFT) << NVIF_VMM_PFNMAP_V0_ADDR_SHIFT);
560 if (src & MIGRATE_PFN_WRITE)
561 *pfn |= NVIF_VMM_PFNMAP_V0_W;
562 return migrate_pfn(page_to_pfn(dpage)) | MIGRATE_PFN_LOCKED;
565 dma_unmap_page(dev, *dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
567 nouveau_dmem_page_free_locked(drm, dpage);
569 *pfn = NVIF_VMM_PFNMAP_V0_NONE;
573 static void nouveau_dmem_migrate_chunk(struct nouveau_drm *drm,
574 struct nouveau_svmm *svmm, struct migrate_vma *args,
575 dma_addr_t *dma_addrs, u64 *pfns)
577 struct nouveau_fence *fence;
578 unsigned long addr = args->start, nr_dma = 0, i;
580 for (i = 0; addr < args->end; i++) {
581 args->dst[i] = nouveau_dmem_migrate_copy_one(drm, args->src[i],
582 dma_addrs + nr_dma, pfns + i);
583 if (!dma_mapping_error(drm->dev->dev, dma_addrs[nr_dma]))
588 nouveau_fence_new(drm->dmem->migrate.chan, false, &fence);
589 migrate_vma_pages(args);
590 nouveau_dmem_fence_done(&fence);
591 nouveau_pfns_map(svmm, args->vma->vm_mm, args->start, pfns, i);
594 dma_unmap_page(drm->dev->dev, dma_addrs[nr_dma], PAGE_SIZE,
597 migrate_vma_finalize(args);
601 nouveau_dmem_migrate_vma(struct nouveau_drm *drm,
602 struct nouveau_svmm *svmm,
603 struct vm_area_struct *vma,
607 unsigned long npages = (end - start) >> PAGE_SHIFT;
608 unsigned long max = min(SG_MAX_SINGLE_ALLOC, npages);
609 dma_addr_t *dma_addrs;
610 struct migrate_vma args = {
618 if (drm->dmem == NULL)
621 args.src = kcalloc(max, sizeof(*args.src), GFP_KERNEL);
624 args.dst = kcalloc(max, sizeof(*args.dst), GFP_KERNEL);
628 dma_addrs = kmalloc_array(max, sizeof(*dma_addrs), GFP_KERNEL);
632 pfns = nouveau_pfns_alloc(max);
636 for (i = 0; i < npages; i += max) {
637 args.end = start + (max << PAGE_SHIFT);
638 ret = migrate_vma_setup(&args);
643 nouveau_dmem_migrate_chunk(drm, svmm, &args, dma_addrs,
645 args.start = args.end;
650 nouveau_pfns_free(pfns);