2 * Copyright 2018 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
22 #include "nouveau_dmem.h"
23 #include "nouveau_drv.h"
24 #include "nouveau_chan.h"
25 #include "nouveau_dma.h"
26 #include "nouveau_mem.h"
27 #include "nouveau_bo.h"
29 #include <nvif/class.h>
30 #include <nvif/object.h>
31 #include <nvif/if500b.h>
32 #include <nvif/if900b.h>
34 #include <linux/sched/mm.h>
35 #include <linux/hmm.h>
38 * FIXME: this is ugly right now we are using TTM to allocate vram and we pin
39 * it in vram while in use. We likely want to overhaul memory management for
40 * nouveau to be more page like (not necessarily with system page size but a
41 * bigger page size) at lowest level and have some shim layer on top that would
42 * provide the same functionality as TTM.
44 #define DMEM_CHUNK_SIZE (2UL << 20)
45 #define DMEM_CHUNK_NPAGES (DMEM_CHUNK_SIZE >> PAGE_SHIFT)
47 struct nouveau_migrate;
55 typedef int (*nouveau_migrate_copy_t)(struct nouveau_drm *drm, u64 npages,
56 enum nouveau_aper, u64 dst_addr,
57 enum nouveau_aper, u64 src_addr);
59 struct nouveau_dmem_chunk {
60 struct list_head list;
61 struct nouveau_bo *bo;
62 struct nouveau_drm *drm;
63 unsigned long pfn_first;
64 unsigned long callocated;
65 unsigned long bitmap[BITS_TO_LONGS(DMEM_CHUNK_NPAGES)];
69 struct nouveau_dmem_migrate {
70 nouveau_migrate_copy_t copy_func;
71 struct nouveau_channel *chan;
75 struct nouveau_drm *drm;
76 struct dev_pagemap pagemap;
77 struct nouveau_dmem_migrate migrate;
78 struct list_head chunk_free;
79 struct list_head chunk_full;
80 struct list_head chunk_empty;
84 static inline struct nouveau_dmem *page_to_dmem(struct page *page)
86 return container_of(page->pgmap, struct nouveau_dmem, pagemap);
89 struct nouveau_dmem_fault {
90 struct nouveau_drm *drm;
91 struct nouveau_fence *fence;
96 struct nouveau_migrate {
97 struct vm_area_struct *vma;
98 struct nouveau_drm *drm;
99 struct nouveau_fence *fence;
100 unsigned long npages;
102 unsigned long dma_nr;
105 static void nouveau_dmem_page_free(struct page *page)
107 struct nouveau_dmem_chunk *chunk = page->zone_device_data;
108 unsigned long idx = page_to_pfn(page) - chunk->pfn_first;
113 * This is really a bad example, we need to overhaul nouveau memory
114 * management to be more page focus and allow lighter locking scheme
115 * to be use in the process.
117 spin_lock(&chunk->lock);
118 clear_bit(idx, chunk->bitmap);
119 WARN_ON(!chunk->callocated);
122 * FIXME when chunk->callocated reach 0 we should add the chunk to
123 * a reclaim list so that it can be freed in case of memory pressure.
125 spin_unlock(&chunk->lock);
129 nouveau_dmem_fault_alloc_and_copy(struct vm_area_struct *vma,
130 const unsigned long *src_pfns,
131 unsigned long *dst_pfns,
136 struct nouveau_dmem_fault *fault = private;
137 struct nouveau_drm *drm = fault->drm;
138 struct device *dev = drm->dev->dev;
139 unsigned long addr, i, npages = 0;
140 nouveau_migrate_copy_t copy;
144 /* First allocate new memory */
145 for (addr = start, i = 0; addr < end; addr += PAGE_SIZE, i++) {
146 struct page *dpage, *spage;
149 spage = migrate_pfn_to_page(src_pfns[i]);
150 if (!spage || !(src_pfns[i] & MIGRATE_PFN_MIGRATE))
153 dpage = alloc_page_vma(GFP_HIGHUSER, vma, addr);
155 dst_pfns[i] = MIGRATE_PFN_ERROR;
160 dst_pfns[i] = migrate_pfn(page_to_pfn(dpage)) |
165 /* Allocate storage for DMA addresses, so we can unmap later. */
166 fault->dma = kmalloc(sizeof(*fault->dma) * npages, GFP_KERNEL);
170 /* Copy things over */
171 copy = drm->dmem->migrate.copy_func;
172 for (addr = start, i = 0; addr < end; addr += PAGE_SIZE, i++) {
173 struct nouveau_dmem_chunk *chunk;
174 struct page *spage, *dpage;
175 u64 src_addr, dst_addr;
177 dpage = migrate_pfn_to_page(dst_pfns[i]);
178 if (!dpage || dst_pfns[i] == MIGRATE_PFN_ERROR)
181 spage = migrate_pfn_to_page(src_pfns[i]);
182 if (!spage || !(src_pfns[i] & MIGRATE_PFN_MIGRATE)) {
183 dst_pfns[i] = MIGRATE_PFN_ERROR;
188 fault->dma[fault->npages] =
189 dma_map_page_attrs(dev, dpage, 0, PAGE_SIZE,
190 PCI_DMA_BIDIRECTIONAL,
191 DMA_ATTR_SKIP_CPU_SYNC);
192 if (dma_mapping_error(dev, fault->dma[fault->npages])) {
193 dst_pfns[i] = MIGRATE_PFN_ERROR;
198 dst_addr = fault->dma[fault->npages++];
200 chunk = spage->zone_device_data;
201 src_addr = page_to_pfn(spage) - chunk->pfn_first;
202 src_addr = (src_addr << PAGE_SHIFT) + chunk->bo->bo.offset;
204 ret = copy(drm, 1, NOUVEAU_APER_HOST, dst_addr,
205 NOUVEAU_APER_VRAM, src_addr);
207 dst_pfns[i] = MIGRATE_PFN_ERROR;
213 nouveau_fence_new(drm->dmem->migrate.chan, false, &fault->fence);
218 for (addr = start, i = 0; addr < end; addr += PAGE_SIZE, ++i) {
221 if (!dst_pfns[i] || dst_pfns[i] == MIGRATE_PFN_ERROR)
224 page = migrate_pfn_to_page(dst_pfns[i]);
225 dst_pfns[i] = MIGRATE_PFN_ERROR;
233 void nouveau_dmem_fault_finalize_and_map(struct vm_area_struct *vma,
234 const unsigned long *src_pfns,
235 const unsigned long *dst_pfns,
240 struct nouveau_dmem_fault *fault = private;
241 struct nouveau_drm *drm = fault->drm;
244 nouveau_fence_wait(fault->fence, true, false);
245 nouveau_fence_unref(&fault->fence);
248 * FIXME wait for channel to be IDLE before calling finalizing
249 * the hmem object below (nouveau_migrate_hmem_fini()).
253 while (fault->npages--) {
254 dma_unmap_page(drm->dev->dev, fault->dma[fault->npages],
255 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
260 static const struct migrate_vma_ops nouveau_dmem_fault_migrate_ops = {
261 .alloc_and_copy = nouveau_dmem_fault_alloc_and_copy,
262 .finalize_and_map = nouveau_dmem_fault_finalize_and_map,
265 static vm_fault_t nouveau_dmem_migrate_to_ram(struct vm_fault *vmf)
267 struct nouveau_dmem *dmem = page_to_dmem(vmf->page);
268 unsigned long src[1] = {0}, dst[1] = {0};
269 struct nouveau_dmem_fault fault = { .drm = dmem->drm };
273 * FIXME what we really want is to find some heuristic to migrate more
274 * than just one page on CPU fault. When such fault happens it is very
275 * likely that more surrounding page will CPU fault too.
277 ret = migrate_vma(&nouveau_dmem_fault_migrate_ops, vmf->vma,
278 vmf->address, vmf->address + PAGE_SIZE,
281 return VM_FAULT_SIGBUS;
283 if (dst[0] == MIGRATE_PFN_ERROR)
284 return VM_FAULT_SIGBUS;
289 static const struct dev_pagemap_ops nouveau_dmem_pagemap_ops = {
290 .page_free = nouveau_dmem_page_free,
291 .migrate_to_ram = nouveau_dmem_migrate_to_ram,
295 nouveau_dmem_chunk_alloc(struct nouveau_drm *drm)
297 struct nouveau_dmem_chunk *chunk;
300 if (drm->dmem == NULL)
303 mutex_lock(&drm->dmem->mutex);
304 chunk = list_first_entry_or_null(&drm->dmem->chunk_empty,
305 struct nouveau_dmem_chunk,
308 mutex_unlock(&drm->dmem->mutex);
312 list_del(&chunk->list);
313 mutex_unlock(&drm->dmem->mutex);
315 ret = nouveau_bo_new(&drm->client, DMEM_CHUNK_SIZE, 0,
316 TTM_PL_FLAG_VRAM, 0, 0, NULL, NULL,
321 ret = nouveau_bo_pin(chunk->bo, TTM_PL_FLAG_VRAM, false);
323 nouveau_bo_ref(NULL, &chunk->bo);
327 bitmap_zero(chunk->bitmap, DMEM_CHUNK_NPAGES);
328 spin_lock_init(&chunk->lock);
331 mutex_lock(&drm->dmem->mutex);
333 list_add(&chunk->list, &drm->dmem->chunk_empty);
335 list_add_tail(&chunk->list, &drm->dmem->chunk_empty);
336 mutex_unlock(&drm->dmem->mutex);
341 static struct nouveau_dmem_chunk *
342 nouveau_dmem_chunk_first_free_locked(struct nouveau_drm *drm)
344 struct nouveau_dmem_chunk *chunk;
346 chunk = list_first_entry_or_null(&drm->dmem->chunk_free,
347 struct nouveau_dmem_chunk,
352 chunk = list_first_entry_or_null(&drm->dmem->chunk_empty,
353 struct nouveau_dmem_chunk,
362 nouveau_dmem_pages_alloc(struct nouveau_drm *drm,
363 unsigned long npages,
364 unsigned long *pages)
366 struct nouveau_dmem_chunk *chunk;
370 memset(pages, 0xff, npages * sizeof(*pages));
372 mutex_lock(&drm->dmem->mutex);
373 for (c = 0; c < npages;) {
376 chunk = nouveau_dmem_chunk_first_free_locked(drm);
378 mutex_unlock(&drm->dmem->mutex);
379 ret = nouveau_dmem_chunk_alloc(drm);
385 mutex_lock(&drm->dmem->mutex);
389 spin_lock(&chunk->lock);
390 i = find_first_zero_bit(chunk->bitmap, DMEM_CHUNK_NPAGES);
391 while (i < DMEM_CHUNK_NPAGES && c < npages) {
392 pages[c] = chunk->pfn_first + i;
393 set_bit(i, chunk->bitmap);
397 i = find_next_zero_bit(chunk->bitmap,
398 DMEM_CHUNK_NPAGES, i);
400 spin_unlock(&chunk->lock);
402 mutex_unlock(&drm->dmem->mutex);
408 nouveau_dmem_page_alloc_locked(struct nouveau_drm *drm)
410 unsigned long pfns[1];
414 /* FIXME stop all the miss-match API ... */
415 ret = nouveau_dmem_pages_alloc(drm, 1, pfns);
419 page = pfn_to_page(pfns[0]);
426 nouveau_dmem_page_free_locked(struct nouveau_drm *drm, struct page *page)
433 nouveau_dmem_resume(struct nouveau_drm *drm)
435 struct nouveau_dmem_chunk *chunk;
438 if (drm->dmem == NULL)
441 mutex_lock(&drm->dmem->mutex);
442 list_for_each_entry (chunk, &drm->dmem->chunk_free, list) {
443 ret = nouveau_bo_pin(chunk->bo, TTM_PL_FLAG_VRAM, false);
444 /* FIXME handle pin failure */
447 list_for_each_entry (chunk, &drm->dmem->chunk_full, list) {
448 ret = nouveau_bo_pin(chunk->bo, TTM_PL_FLAG_VRAM, false);
449 /* FIXME handle pin failure */
452 mutex_unlock(&drm->dmem->mutex);
456 nouveau_dmem_suspend(struct nouveau_drm *drm)
458 struct nouveau_dmem_chunk *chunk;
460 if (drm->dmem == NULL)
463 mutex_lock(&drm->dmem->mutex);
464 list_for_each_entry (chunk, &drm->dmem->chunk_free, list) {
465 nouveau_bo_unpin(chunk->bo);
467 list_for_each_entry (chunk, &drm->dmem->chunk_full, list) {
468 nouveau_bo_unpin(chunk->bo);
470 mutex_unlock(&drm->dmem->mutex);
474 nouveau_dmem_fini(struct nouveau_drm *drm)
476 struct nouveau_dmem_chunk *chunk, *tmp;
478 if (drm->dmem == NULL)
481 mutex_lock(&drm->dmem->mutex);
483 WARN_ON(!list_empty(&drm->dmem->chunk_free));
484 WARN_ON(!list_empty(&drm->dmem->chunk_full));
486 list_for_each_entry_safe (chunk, tmp, &drm->dmem->chunk_empty, list) {
488 nouveau_bo_unpin(chunk->bo);
489 nouveau_bo_ref(NULL, &chunk->bo);
491 list_del(&chunk->list);
495 mutex_unlock(&drm->dmem->mutex);
499 nvc0b5_migrate_copy(struct nouveau_drm *drm, u64 npages,
500 enum nouveau_aper dst_aper, u64 dst_addr,
501 enum nouveau_aper src_aper, u64 src_addr)
503 struct nouveau_channel *chan = drm->dmem->migrate.chan;
504 u32 launch_dma = (1 << 9) /* MULTI_LINE_ENABLE. */ |
505 (1 << 8) /* DST_MEMORY_LAYOUT_PITCH. */ |
506 (1 << 7) /* SRC_MEMORY_LAYOUT_PITCH. */ |
507 (1 << 2) /* FLUSH_ENABLE_TRUE. */ |
508 (2 << 0) /* DATA_TRANSFER_TYPE_NON_PIPELINED. */;
511 ret = RING_SPACE(chan, 13);
515 if (src_aper != NOUVEAU_APER_VIRT) {
517 case NOUVEAU_APER_VRAM:
518 BEGIN_IMC0(chan, NvSubCopy, 0x0260, 0);
520 case NOUVEAU_APER_HOST:
521 BEGIN_IMC0(chan, NvSubCopy, 0x0260, 1);
526 launch_dma |= 0x00001000; /* SRC_TYPE_PHYSICAL. */
529 if (dst_aper != NOUVEAU_APER_VIRT) {
531 case NOUVEAU_APER_VRAM:
532 BEGIN_IMC0(chan, NvSubCopy, 0x0264, 0);
534 case NOUVEAU_APER_HOST:
535 BEGIN_IMC0(chan, NvSubCopy, 0x0264, 1);
540 launch_dma |= 0x00002000; /* DST_TYPE_PHYSICAL. */
543 BEGIN_NVC0(chan, NvSubCopy, 0x0400, 8);
544 OUT_RING (chan, upper_32_bits(src_addr));
545 OUT_RING (chan, lower_32_bits(src_addr));
546 OUT_RING (chan, upper_32_bits(dst_addr));
547 OUT_RING (chan, lower_32_bits(dst_addr));
548 OUT_RING (chan, PAGE_SIZE);
549 OUT_RING (chan, PAGE_SIZE);
550 OUT_RING (chan, PAGE_SIZE);
551 OUT_RING (chan, npages);
552 BEGIN_NVC0(chan, NvSubCopy, 0x0300, 1);
553 OUT_RING (chan, launch_dma);
558 nouveau_dmem_migrate_init(struct nouveau_drm *drm)
560 switch (drm->ttm.copy.oclass) {
561 case PASCAL_DMA_COPY_A:
562 case PASCAL_DMA_COPY_B:
563 case VOLTA_DMA_COPY_A:
564 case TURING_DMA_COPY_A:
565 drm->dmem->migrate.copy_func = nvc0b5_migrate_copy;
566 drm->dmem->migrate.chan = drm->ttm.chan;
575 nouveau_dmem_init(struct nouveau_drm *drm)
577 struct device *device = drm->dev->dev;
578 struct resource *res;
579 unsigned long i, size, pfn_first;
582 /* This only make sense on PASCAL or newer */
583 if (drm->client.device.info.family < NV_DEVICE_INFO_V0_PASCAL)
586 if (!(drm->dmem = kzalloc(sizeof(*drm->dmem), GFP_KERNEL)))
589 drm->dmem->drm = drm;
590 mutex_init(&drm->dmem->mutex);
591 INIT_LIST_HEAD(&drm->dmem->chunk_free);
592 INIT_LIST_HEAD(&drm->dmem->chunk_full);
593 INIT_LIST_HEAD(&drm->dmem->chunk_empty);
595 size = ALIGN(drm->client.device.info.ram_user, DMEM_CHUNK_SIZE);
597 /* Initialize migration dma helpers before registering memory */
598 ret = nouveau_dmem_migrate_init(drm);
603 * FIXME we need some kind of policy to decide how much VRAM we
604 * want to register with HMM. For now just register everything
605 * and latter if we want to do thing like over commit then we
606 * could revisit this.
608 res = devm_request_free_mem_region(device, &iomem_resource, size);
611 drm->dmem->pagemap.type = MEMORY_DEVICE_PRIVATE;
612 drm->dmem->pagemap.res = *res;
613 drm->dmem->pagemap.ops = &nouveau_dmem_pagemap_ops;
614 if (IS_ERR(devm_memremap_pages(device, &drm->dmem->pagemap)))
617 pfn_first = res->start >> PAGE_SHIFT;
618 for (i = 0; i < (size / DMEM_CHUNK_SIZE); ++i) {
619 struct nouveau_dmem_chunk *chunk;
623 chunk = kzalloc(sizeof(*chunk), GFP_KERNEL);
625 nouveau_dmem_fini(drm);
630 chunk->pfn_first = pfn_first + (i * DMEM_CHUNK_NPAGES);
631 list_add_tail(&chunk->list, &drm->dmem->chunk_empty);
633 page = pfn_to_page(chunk->pfn_first);
634 for (j = 0; j < DMEM_CHUNK_NPAGES; ++j, ++page)
635 page->zone_device_data = chunk;
638 NV_INFO(drm, "DMEM: registered %ldMB of device memory\n", size >> 20);
646 nouveau_dmem_migrate_alloc_and_copy(struct vm_area_struct *vma,
647 const unsigned long *src_pfns,
648 unsigned long *dst_pfns,
653 struct nouveau_migrate *migrate = private;
654 struct nouveau_drm *drm = migrate->drm;
655 struct device *dev = drm->dev->dev;
656 unsigned long addr, i, npages = 0;
657 nouveau_migrate_copy_t copy;
660 /* First allocate new memory */
661 for (addr = start, i = 0; addr < end; addr += PAGE_SIZE, i++) {
662 struct page *dpage, *spage;
665 spage = migrate_pfn_to_page(src_pfns[i]);
666 if (!spage || !(src_pfns[i] & MIGRATE_PFN_MIGRATE))
669 dpage = nouveau_dmem_page_alloc_locked(drm);
673 dst_pfns[i] = migrate_pfn(page_to_pfn(dpage)) |
682 /* Allocate storage for DMA addresses, so we can unmap later. */
683 migrate->dma = kmalloc(sizeof(*migrate->dma) * npages, GFP_KERNEL);
687 /* Copy things over */
688 copy = drm->dmem->migrate.copy_func;
689 for (addr = start, i = 0; addr < end; addr += PAGE_SIZE, i++) {
690 struct nouveau_dmem_chunk *chunk;
691 struct page *spage, *dpage;
692 u64 src_addr, dst_addr;
694 dpage = migrate_pfn_to_page(dst_pfns[i]);
695 if (!dpage || dst_pfns[i] == MIGRATE_PFN_ERROR)
698 chunk = dpage->zone_device_data;
699 dst_addr = page_to_pfn(dpage) - chunk->pfn_first;
700 dst_addr = (dst_addr << PAGE_SHIFT) + chunk->bo->bo.offset;
702 spage = migrate_pfn_to_page(src_pfns[i]);
703 if (!spage || !(src_pfns[i] & MIGRATE_PFN_MIGRATE)) {
704 nouveau_dmem_page_free_locked(drm, dpage);
709 migrate->dma[migrate->dma_nr] =
710 dma_map_page_attrs(dev, spage, 0, PAGE_SIZE,
711 PCI_DMA_BIDIRECTIONAL,
712 DMA_ATTR_SKIP_CPU_SYNC);
713 if (dma_mapping_error(dev, migrate->dma[migrate->dma_nr])) {
714 nouveau_dmem_page_free_locked(drm, dpage);
719 src_addr = migrate->dma[migrate->dma_nr++];
721 ret = copy(drm, 1, NOUVEAU_APER_VRAM, dst_addr,
722 NOUVEAU_APER_HOST, src_addr);
724 nouveau_dmem_page_free_locked(drm, dpage);
730 nouveau_fence_new(drm->dmem->migrate.chan, false, &migrate->fence);
735 for (addr = start, i = 0; addr < end; addr += PAGE_SIZE, ++i) {
738 if (!dst_pfns[i] || dst_pfns[i] == MIGRATE_PFN_ERROR)
741 page = migrate_pfn_to_page(dst_pfns[i]);
742 dst_pfns[i] = MIGRATE_PFN_ERROR;
750 void nouveau_dmem_migrate_finalize_and_map(struct vm_area_struct *vma,
751 const unsigned long *src_pfns,
752 const unsigned long *dst_pfns,
757 struct nouveau_migrate *migrate = private;
758 struct nouveau_drm *drm = migrate->drm;
760 if (migrate->fence) {
761 nouveau_fence_wait(migrate->fence, true, false);
762 nouveau_fence_unref(&migrate->fence);
765 * FIXME wait for channel to be IDLE before finalizing
766 * the hmem object below (nouveau_migrate_hmem_fini()) ?
770 while (migrate->dma_nr--) {
771 dma_unmap_page(drm->dev->dev, migrate->dma[migrate->dma_nr],
772 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
777 * FIXME optimization: update GPU page table to point to newly
782 static const struct migrate_vma_ops nouveau_dmem_migrate_ops = {
783 .alloc_and_copy = nouveau_dmem_migrate_alloc_and_copy,
784 .finalize_and_map = nouveau_dmem_migrate_finalize_and_map,
788 nouveau_dmem_migrate_vma(struct nouveau_drm *drm,
789 struct vm_area_struct *vma,
793 unsigned long *src_pfns, *dst_pfns, npages;
794 struct nouveau_migrate migrate = {0};
795 unsigned long i, c, max;
798 npages = (end - start) >> PAGE_SHIFT;
799 max = min(SG_MAX_SINGLE_ALLOC, npages);
800 src_pfns = kzalloc(sizeof(long) * max, GFP_KERNEL);
801 if (src_pfns == NULL)
803 dst_pfns = kzalloc(sizeof(long) * max, GFP_KERNEL);
804 if (dst_pfns == NULL) {
811 migrate.npages = npages;
812 for (i = 0; i < npages; i += c) {
815 c = min(SG_MAX_SINGLE_ALLOC, npages);
816 next = start + (c << PAGE_SHIFT);
817 ret = migrate_vma(&nouveau_dmem_migrate_ops, vma, start,
818 next, src_pfns, dst_pfns, &migrate);
831 nouveau_dmem_page(struct nouveau_drm *drm, struct page *page)
833 return is_device_private_page(page) && drm->dmem == page_to_dmem(page);
837 nouveau_dmem_convert_pfn(struct nouveau_drm *drm,
838 struct hmm_range *range)
840 unsigned long i, npages;
842 npages = (range->end - range->start) >> PAGE_SHIFT;
843 for (i = 0; i < npages; ++i) {
844 struct nouveau_dmem_chunk *chunk;
848 page = hmm_pfn_to_page(range, range->pfns[i]);
852 if (!(range->pfns[i] & range->flags[HMM_PFN_DEVICE_PRIVATE])) {
856 if (!nouveau_dmem_page(drm, page)) {
857 WARN(1, "Some unknown device memory !\n");
862 chunk = page->zone_device_data;
863 addr = page_to_pfn(page) - chunk->pfn_first;
864 addr = (addr + chunk->bo->bo.mem.start) << PAGE_SHIFT;
866 range->pfns[i] &= ((1UL << range->pfn_shift) - 1);
867 range->pfns[i] |= (addr >> PAGE_SHIFT) << range->pfn_shift;